Linux Audio

Check our new training course

Buildroot integration, development and maintenance

Need a Buildroot system for your embedded project?
Loading...
v3.5.6
  1/*
  2 * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
  3 *
  4 * Scatterlist handling helpers.
  5 *
  6 * This source code is licensed under the GNU General Public License,
  7 * Version 2. See the file COPYING for more details.
  8 */
  9#include <linux/export.h>
 10#include <linux/slab.h>
 11#include <linux/scatterlist.h>
 12#include <linux/highmem.h>
 13#include <linux/kmemleak.h>
 14
 15/**
 16 * sg_next - return the next scatterlist entry in a list
 17 * @sg:		The current sg entry
 18 *
 19 * Description:
 20 *   Usually the next entry will be @sg@ + 1, but if this sg element is part
 21 *   of a chained scatterlist, it could jump to the start of a new
 22 *   scatterlist array.
 23 *
 24 **/
 25struct scatterlist *sg_next(struct scatterlist *sg)
 26{
 27#ifdef CONFIG_DEBUG_SG
 28	BUG_ON(sg->sg_magic != SG_MAGIC);
 29#endif
 30	if (sg_is_last(sg))
 31		return NULL;
 32
 33	sg++;
 34	if (unlikely(sg_is_chain(sg)))
 35		sg = sg_chain_ptr(sg);
 36
 37	return sg;
 38}
 39EXPORT_SYMBOL(sg_next);
 40
 41/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 42 * sg_last - return the last scatterlist entry in a list
 43 * @sgl:	First entry in the scatterlist
 44 * @nents:	Number of entries in the scatterlist
 45 *
 46 * Description:
 47 *   Should only be used casually, it (currently) scans the entire list
 48 *   to get the last entry.
 49 *
 50 *   Note that the @sgl@ pointer passed in need not be the first one,
 51 *   the important bit is that @nents@ denotes the number of entries that
 52 *   exist from @sgl@.
 53 *
 54 **/
 55struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
 56{
 57#ifndef ARCH_HAS_SG_CHAIN
 58	struct scatterlist *ret = &sgl[nents - 1];
 59#else
 60	struct scatterlist *sg, *ret = NULL;
 61	unsigned int i;
 62
 63	for_each_sg(sgl, sg, nents, i)
 64		ret = sg;
 65
 66#endif
 67#ifdef CONFIG_DEBUG_SG
 68	BUG_ON(sgl[0].sg_magic != SG_MAGIC);
 69	BUG_ON(!sg_is_last(ret));
 70#endif
 71	return ret;
 72}
 73EXPORT_SYMBOL(sg_last);
 74
 75/**
 76 * sg_init_table - Initialize SG table
 77 * @sgl:	   The SG table
 78 * @nents:	   Number of entries in table
 79 *
 80 * Notes:
 81 *   If this is part of a chained sg table, sg_mark_end() should be
 82 *   used only on the last table part.
 83 *
 84 **/
 85void sg_init_table(struct scatterlist *sgl, unsigned int nents)
 86{
 87	memset(sgl, 0, sizeof(*sgl) * nents);
 88#ifdef CONFIG_DEBUG_SG
 89	{
 90		unsigned int i;
 91		for (i = 0; i < nents; i++)
 92			sgl[i].sg_magic = SG_MAGIC;
 93	}
 94#endif
 95	sg_mark_end(&sgl[nents - 1]);
 96}
 97EXPORT_SYMBOL(sg_init_table);
 98
 99/**
100 * sg_init_one - Initialize a single entry sg list
101 * @sg:		 SG entry
102 * @buf:	 Virtual address for IO
103 * @buflen:	 IO length
104 *
105 **/
106void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
107{
108	sg_init_table(sg, 1);
109	sg_set_buf(sg, buf, buflen);
110}
111EXPORT_SYMBOL(sg_init_one);
112
113/*
114 * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
115 * helpers.
116 */
117static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
118{
119	if (nents == SG_MAX_SINGLE_ALLOC) {
120		/*
121		 * Kmemleak doesn't track page allocations as they are not
122		 * commonly used (in a raw form) for kernel data structures.
123		 * As we chain together a list of pages and then a normal
124		 * kmalloc (tracked by kmemleak), in order to for that last
125		 * allocation not to become decoupled (and thus a
126		 * false-positive) we need to inform kmemleak of all the
127		 * intermediate allocations.
128		 */
129		void *ptr = (void *) __get_free_page(gfp_mask);
130		kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
131		return ptr;
132	} else
133		return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
134}
135
136static void sg_kfree(struct scatterlist *sg, unsigned int nents)
137{
138	if (nents == SG_MAX_SINGLE_ALLOC) {
139		kmemleak_free(sg);
140		free_page((unsigned long) sg);
141	} else
142		kfree(sg);
143}
144
145/**
146 * __sg_free_table - Free a previously mapped sg table
147 * @table:	The sg table header to use
148 * @max_ents:	The maximum number of entries per single scatterlist
149 * @free_fn:	Free function
150 *
151 *  Description:
152 *    Free an sg table previously allocated and setup with
153 *    __sg_alloc_table().  The @max_ents value must be identical to
154 *    that previously used with __sg_alloc_table().
155 *
156 **/
157void __sg_free_table(struct sg_table *table, unsigned int max_ents,
158		     sg_free_fn *free_fn)
159{
160	struct scatterlist *sgl, *next;
161
162	if (unlikely(!table->sgl))
163		return;
164
165	sgl = table->sgl;
166	while (table->orig_nents) {
167		unsigned int alloc_size = table->orig_nents;
168		unsigned int sg_size;
169
170		/*
171		 * If we have more than max_ents segments left,
172		 * then assign 'next' to the sg table after the current one.
173		 * sg_size is then one less than alloc size, since the last
174		 * element is the chain pointer.
175		 */
176		if (alloc_size > max_ents) {
177			next = sg_chain_ptr(&sgl[max_ents - 1]);
178			alloc_size = max_ents;
179			sg_size = alloc_size - 1;
180		} else {
181			sg_size = alloc_size;
182			next = NULL;
183		}
184
185		table->orig_nents -= sg_size;
186		free_fn(sgl, alloc_size);
187		sgl = next;
188	}
189
190	table->sgl = NULL;
191}
192EXPORT_SYMBOL(__sg_free_table);
193
194/**
195 * sg_free_table - Free a previously allocated sg table
196 * @table:	The mapped sg table header
197 *
198 **/
199void sg_free_table(struct sg_table *table)
200{
201	__sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
202}
203EXPORT_SYMBOL(sg_free_table);
204
205/**
206 * __sg_alloc_table - Allocate and initialize an sg table with given allocator
207 * @table:	The sg table header to use
208 * @nents:	Number of entries in sg list
209 * @max_ents:	The maximum number of entries the allocator returns per call
210 * @gfp_mask:	GFP allocation mask
211 * @alloc_fn:	Allocator to use
212 *
213 * Description:
214 *   This function returns a @table @nents long. The allocator is
215 *   defined to return scatterlist chunks of maximum size @max_ents.
216 *   Thus if @nents is bigger than @max_ents, the scatterlists will be
217 *   chained in units of @max_ents.
218 *
219 * Notes:
220 *   If this function returns non-0 (eg failure), the caller must call
221 *   __sg_free_table() to cleanup any leftover allocations.
222 *
223 **/
224int __sg_alloc_table(struct sg_table *table, unsigned int nents,
225		     unsigned int max_ents, gfp_t gfp_mask,
226		     sg_alloc_fn *alloc_fn)
227{
228	struct scatterlist *sg, *prv;
229	unsigned int left;
230
 
 
 
 
231#ifndef ARCH_HAS_SG_CHAIN
232	BUG_ON(nents > max_ents);
 
233#endif
234
235	memset(table, 0, sizeof(*table));
236
237	left = nents;
238	prv = NULL;
239	do {
240		unsigned int sg_size, alloc_size = left;
241
242		if (alloc_size > max_ents) {
243			alloc_size = max_ents;
244			sg_size = alloc_size - 1;
245		} else
246			sg_size = alloc_size;
247
248		left -= sg_size;
249
250		sg = alloc_fn(alloc_size, gfp_mask);
251		if (unlikely(!sg)) {
252			/*
253			 * Adjust entry count to reflect that the last
254			 * entry of the previous table won't be used for
255			 * linkage.  Without this, sg_kfree() may get
256			 * confused.
257			 */
258			if (prv)
259				table->nents = ++table->orig_nents;
260
261 			return -ENOMEM;
262		}
263
264		sg_init_table(sg, alloc_size);
265		table->nents = table->orig_nents += sg_size;
266
267		/*
268		 * If this is the first mapping, assign the sg table header.
269		 * If this is not the first mapping, chain previous part.
270		 */
271		if (prv)
272			sg_chain(prv, max_ents, sg);
273		else
274			table->sgl = sg;
275
276		/*
277		 * If no more entries after this one, mark the end
278		 */
279		if (!left)
280			sg_mark_end(&sg[sg_size - 1]);
281
282		/*
283		 * only really needed for mempool backed sg allocations (like
284		 * SCSI), a possible improvement here would be to pass the
285		 * table pointer into the allocator and let that clear these
286		 * flags
287		 */
288		gfp_mask &= ~__GFP_WAIT;
289		gfp_mask |= __GFP_HIGH;
290		prv = sg;
291	} while (left);
292
293	return 0;
294}
295EXPORT_SYMBOL(__sg_alloc_table);
296
297/**
298 * sg_alloc_table - Allocate and initialize an sg table
299 * @table:	The sg table header to use
300 * @nents:	Number of entries in sg list
301 * @gfp_mask:	GFP allocation mask
302 *
303 *  Description:
304 *    Allocate and initialize an sg table. If @nents@ is larger than
305 *    SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
306 *
307 **/
308int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
309{
310	int ret;
311
312	ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
313			       gfp_mask, sg_kmalloc);
314	if (unlikely(ret))
315		__sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
316
317	return ret;
318}
319EXPORT_SYMBOL(sg_alloc_table);
320
321/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
322 * sg_miter_start - start mapping iteration over a sg list
323 * @miter: sg mapping iter to be started
324 * @sgl: sg list to iterate over
325 * @nents: number of sg entries
326 *
327 * Description:
328 *   Starts mapping iterator @miter.
329 *
330 * Context:
331 *   Don't care.
332 */
333void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
334		    unsigned int nents, unsigned int flags)
335{
336	memset(miter, 0, sizeof(struct sg_mapping_iter));
337
338	miter->__sg = sgl;
339	miter->__nents = nents;
340	miter->__offset = 0;
341	WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
342	miter->__flags = flags;
343}
344EXPORT_SYMBOL(sg_miter_start);
345
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
346/**
347 * sg_miter_next - proceed mapping iterator to the next mapping
348 * @miter: sg mapping iter to proceed
349 *
350 * Description:
351 *   Proceeds @miter@ to the next mapping.  @miter@ should have been
352 *   started using sg_miter_start().  On successful return,
353 *   @miter@->page, @miter@->addr and @miter@->length point to the
354 *   current mapping.
355 *
356 * Context:
357 *   IRQ disabled if SG_MITER_ATOMIC.  IRQ must stay disabled till
358 *   @miter@ is stopped.  May sleep if !SG_MITER_ATOMIC.
359 *
360 * Returns:
361 *   true if @miter contains the next mapping.  false if end of sg
362 *   list is reached.
363 */
364bool sg_miter_next(struct sg_mapping_iter *miter)
365{
366	unsigned int off, len;
367
368	/* check for end and drop resources from the last iteration */
369	if (!miter->__nents)
370		return false;
371
372	sg_miter_stop(miter);
373
374	/* get to the next sg if necessary.  __offset is adjusted by stop */
375	while (miter->__offset == miter->__sg->length) {
376		if (--miter->__nents) {
377			miter->__sg = sg_next(miter->__sg);
378			miter->__offset = 0;
379		} else
380			return false;
381	}
382
383	/* map the next page */
384	off = miter->__sg->offset + miter->__offset;
385	len = miter->__sg->length - miter->__offset;
386
387	miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT);
388	off &= ~PAGE_MASK;
389	miter->length = min_t(unsigned int, len, PAGE_SIZE - off);
390	miter->consumed = miter->length;
391
392	if (miter->__flags & SG_MITER_ATOMIC)
393		miter->addr = kmap_atomic(miter->page) + off;
394	else
395		miter->addr = kmap(miter->page) + off;
396
397	return true;
398}
399EXPORT_SYMBOL(sg_miter_next);
400
401/**
402 * sg_miter_stop - stop mapping iteration
403 * @miter: sg mapping iter to be stopped
404 *
405 * Description:
406 *   Stops mapping iterator @miter.  @miter should have been started
407 *   started using sg_miter_start().  A stopped iteration can be
408 *   resumed by calling sg_miter_next() on it.  This is useful when
409 *   resources (kmap) need to be released during iteration.
410 *
411 * Context:
412 *   IRQ disabled if the SG_MITER_ATOMIC is set.  Don't care otherwise.
 
413 */
414void sg_miter_stop(struct sg_mapping_iter *miter)
415{
416	WARN_ON(miter->consumed > miter->length);
417
418	/* drop resources from the last iteration */
419	if (miter->addr) {
420		miter->__offset += miter->consumed;
 
421
422		if (miter->__flags & SG_MITER_TO_SG)
 
423			flush_kernel_dcache_page(miter->page);
424
425		if (miter->__flags & SG_MITER_ATOMIC) {
426			WARN_ON(!irqs_disabled());
427			kunmap_atomic(miter->addr);
428		} else
429			kunmap(miter->page);
430
431		miter->page = NULL;
432		miter->addr = NULL;
433		miter->length = 0;
434		miter->consumed = 0;
435	}
436}
437EXPORT_SYMBOL(sg_miter_stop);
438
439/**
440 * sg_copy_buffer - Copy data between a linear buffer and an SG list
441 * @sgl:		 The SG list
442 * @nents:		 Number of SG entries
443 * @buf:		 Where to copy from
444 * @buflen:		 The number of bytes to copy
445 * @to_buffer: 		 transfer direction (non zero == from an sg list to a
446 * 			 buffer, 0 == from a buffer to an sg list
 
447 *
448 * Returns the number of copied bytes.
449 *
450 **/
451static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
452			     void *buf, size_t buflen, int to_buffer)
 
453{
454	unsigned int offset = 0;
455	struct sg_mapping_iter miter;
456	unsigned long flags;
457	unsigned int sg_flags = SG_MITER_ATOMIC;
458
459	if (to_buffer)
460		sg_flags |= SG_MITER_FROM_SG;
461	else
462		sg_flags |= SG_MITER_TO_SG;
463
464	sg_miter_start(&miter, sgl, nents, sg_flags);
465
 
 
 
466	local_irq_save(flags);
467
468	while (sg_miter_next(&miter) && offset < buflen) {
469		unsigned int len;
470
471		len = min(miter.length, buflen - offset);
472
473		if (to_buffer)
474			memcpy(buf + offset, miter.addr, len);
475		else
476			memcpy(miter.addr, buf + offset, len);
477
478		offset += len;
479	}
480
481	sg_miter_stop(&miter);
482
483	local_irq_restore(flags);
484	return offset;
485}
486
487/**
488 * sg_copy_from_buffer - Copy from a linear buffer to an SG list
489 * @sgl:		 The SG list
490 * @nents:		 Number of SG entries
491 * @buf:		 Where to copy from
492 * @buflen:		 The number of bytes to copy
493 *
494 * Returns the number of copied bytes.
495 *
496 **/
497size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
498			   void *buf, size_t buflen)
499{
500	return sg_copy_buffer(sgl, nents, buf, buflen, 0);
501}
502EXPORT_SYMBOL(sg_copy_from_buffer);
503
504/**
505 * sg_copy_to_buffer - Copy from an SG list to a linear buffer
506 * @sgl:		 The SG list
507 * @nents:		 Number of SG entries
508 * @buf:		 Where to copy to
509 * @buflen:		 The number of bytes to copy
510 *
511 * Returns the number of copied bytes.
512 *
513 **/
514size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
515			 void *buf, size_t buflen)
516{
517	return sg_copy_buffer(sgl, nents, buf, buflen, 1);
518}
519EXPORT_SYMBOL(sg_copy_to_buffer);
v3.15
  1/*
  2 * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
  3 *
  4 * Scatterlist handling helpers.
  5 *
  6 * This source code is licensed under the GNU General Public License,
  7 * Version 2. See the file COPYING for more details.
  8 */
  9#include <linux/export.h>
 10#include <linux/slab.h>
 11#include <linux/scatterlist.h>
 12#include <linux/highmem.h>
 13#include <linux/kmemleak.h>
 14
 15/**
 16 * sg_next - return the next scatterlist entry in a list
 17 * @sg:		The current sg entry
 18 *
 19 * Description:
 20 *   Usually the next entry will be @sg@ + 1, but if this sg element is part
 21 *   of a chained scatterlist, it could jump to the start of a new
 22 *   scatterlist array.
 23 *
 24 **/
 25struct scatterlist *sg_next(struct scatterlist *sg)
 26{
 27#ifdef CONFIG_DEBUG_SG
 28	BUG_ON(sg->sg_magic != SG_MAGIC);
 29#endif
 30	if (sg_is_last(sg))
 31		return NULL;
 32
 33	sg++;
 34	if (unlikely(sg_is_chain(sg)))
 35		sg = sg_chain_ptr(sg);
 36
 37	return sg;
 38}
 39EXPORT_SYMBOL(sg_next);
 40
 41/**
 42 * sg_nents - return total count of entries in scatterlist
 43 * @sg:		The scatterlist
 44 *
 45 * Description:
 46 * Allows to know how many entries are in sg, taking into acount
 47 * chaining as well
 48 *
 49 **/
 50int sg_nents(struct scatterlist *sg)
 51{
 52	int nents;
 53	for (nents = 0; sg; sg = sg_next(sg))
 54		nents++;
 55	return nents;
 56}
 57EXPORT_SYMBOL(sg_nents);
 58
 59
 60/**
 61 * sg_last - return the last scatterlist entry in a list
 62 * @sgl:	First entry in the scatterlist
 63 * @nents:	Number of entries in the scatterlist
 64 *
 65 * Description:
 66 *   Should only be used casually, it (currently) scans the entire list
 67 *   to get the last entry.
 68 *
 69 *   Note that the @sgl@ pointer passed in need not be the first one,
 70 *   the important bit is that @nents@ denotes the number of entries that
 71 *   exist from @sgl@.
 72 *
 73 **/
 74struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
 75{
 76#ifndef ARCH_HAS_SG_CHAIN
 77	struct scatterlist *ret = &sgl[nents - 1];
 78#else
 79	struct scatterlist *sg, *ret = NULL;
 80	unsigned int i;
 81
 82	for_each_sg(sgl, sg, nents, i)
 83		ret = sg;
 84
 85#endif
 86#ifdef CONFIG_DEBUG_SG
 87	BUG_ON(sgl[0].sg_magic != SG_MAGIC);
 88	BUG_ON(!sg_is_last(ret));
 89#endif
 90	return ret;
 91}
 92EXPORT_SYMBOL(sg_last);
 93
 94/**
 95 * sg_init_table - Initialize SG table
 96 * @sgl:	   The SG table
 97 * @nents:	   Number of entries in table
 98 *
 99 * Notes:
100 *   If this is part of a chained sg table, sg_mark_end() should be
101 *   used only on the last table part.
102 *
103 **/
104void sg_init_table(struct scatterlist *sgl, unsigned int nents)
105{
106	memset(sgl, 0, sizeof(*sgl) * nents);
107#ifdef CONFIG_DEBUG_SG
108	{
109		unsigned int i;
110		for (i = 0; i < nents; i++)
111			sgl[i].sg_magic = SG_MAGIC;
112	}
113#endif
114	sg_mark_end(&sgl[nents - 1]);
115}
116EXPORT_SYMBOL(sg_init_table);
117
118/**
119 * sg_init_one - Initialize a single entry sg list
120 * @sg:		 SG entry
121 * @buf:	 Virtual address for IO
122 * @buflen:	 IO length
123 *
124 **/
125void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
126{
127	sg_init_table(sg, 1);
128	sg_set_buf(sg, buf, buflen);
129}
130EXPORT_SYMBOL(sg_init_one);
131
132/*
133 * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
134 * helpers.
135 */
136static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
137{
138	if (nents == SG_MAX_SINGLE_ALLOC) {
139		/*
140		 * Kmemleak doesn't track page allocations as they are not
141		 * commonly used (in a raw form) for kernel data structures.
142		 * As we chain together a list of pages and then a normal
143		 * kmalloc (tracked by kmemleak), in order to for that last
144		 * allocation not to become decoupled (and thus a
145		 * false-positive) we need to inform kmemleak of all the
146		 * intermediate allocations.
147		 */
148		void *ptr = (void *) __get_free_page(gfp_mask);
149		kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
150		return ptr;
151	} else
152		return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
153}
154
155static void sg_kfree(struct scatterlist *sg, unsigned int nents)
156{
157	if (nents == SG_MAX_SINGLE_ALLOC) {
158		kmemleak_free(sg);
159		free_page((unsigned long) sg);
160	} else
161		kfree(sg);
162}
163
164/**
165 * __sg_free_table - Free a previously mapped sg table
166 * @table:	The sg table header to use
167 * @max_ents:	The maximum number of entries per single scatterlist
168 * @free_fn:	Free function
169 *
170 *  Description:
171 *    Free an sg table previously allocated and setup with
172 *    __sg_alloc_table().  The @max_ents value must be identical to
173 *    that previously used with __sg_alloc_table().
174 *
175 **/
176void __sg_free_table(struct sg_table *table, unsigned int max_ents,
177		     sg_free_fn *free_fn)
178{
179	struct scatterlist *sgl, *next;
180
181	if (unlikely(!table->sgl))
182		return;
183
184	sgl = table->sgl;
185	while (table->orig_nents) {
186		unsigned int alloc_size = table->orig_nents;
187		unsigned int sg_size;
188
189		/*
190		 * If we have more than max_ents segments left,
191		 * then assign 'next' to the sg table after the current one.
192		 * sg_size is then one less than alloc size, since the last
193		 * element is the chain pointer.
194		 */
195		if (alloc_size > max_ents) {
196			next = sg_chain_ptr(&sgl[max_ents - 1]);
197			alloc_size = max_ents;
198			sg_size = alloc_size - 1;
199		} else {
200			sg_size = alloc_size;
201			next = NULL;
202		}
203
204		table->orig_nents -= sg_size;
205		free_fn(sgl, alloc_size);
206		sgl = next;
207	}
208
209	table->sgl = NULL;
210}
211EXPORT_SYMBOL(__sg_free_table);
212
213/**
214 * sg_free_table - Free a previously allocated sg table
215 * @table:	The mapped sg table header
216 *
217 **/
218void sg_free_table(struct sg_table *table)
219{
220	__sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
221}
222EXPORT_SYMBOL(sg_free_table);
223
224/**
225 * __sg_alloc_table - Allocate and initialize an sg table with given allocator
226 * @table:	The sg table header to use
227 * @nents:	Number of entries in sg list
228 * @max_ents:	The maximum number of entries the allocator returns per call
229 * @gfp_mask:	GFP allocation mask
230 * @alloc_fn:	Allocator to use
231 *
232 * Description:
233 *   This function returns a @table @nents long. The allocator is
234 *   defined to return scatterlist chunks of maximum size @max_ents.
235 *   Thus if @nents is bigger than @max_ents, the scatterlists will be
236 *   chained in units of @max_ents.
237 *
238 * Notes:
239 *   If this function returns non-0 (eg failure), the caller must call
240 *   __sg_free_table() to cleanup any leftover allocations.
241 *
242 **/
243int __sg_alloc_table(struct sg_table *table, unsigned int nents,
244		     unsigned int max_ents, gfp_t gfp_mask,
245		     sg_alloc_fn *alloc_fn)
246{
247	struct scatterlist *sg, *prv;
248	unsigned int left;
249
250	memset(table, 0, sizeof(*table));
251
252	if (nents == 0)
253		return -EINVAL;
254#ifndef ARCH_HAS_SG_CHAIN
255	if (WARN_ON_ONCE(nents > max_ents))
256		return -EINVAL;
257#endif
258
 
 
259	left = nents;
260	prv = NULL;
261	do {
262		unsigned int sg_size, alloc_size = left;
263
264		if (alloc_size > max_ents) {
265			alloc_size = max_ents;
266			sg_size = alloc_size - 1;
267		} else
268			sg_size = alloc_size;
269
270		left -= sg_size;
271
272		sg = alloc_fn(alloc_size, gfp_mask);
273		if (unlikely(!sg)) {
274			/*
275			 * Adjust entry count to reflect that the last
276			 * entry of the previous table won't be used for
277			 * linkage.  Without this, sg_kfree() may get
278			 * confused.
279			 */
280			if (prv)
281				table->nents = ++table->orig_nents;
282
283 			return -ENOMEM;
284		}
285
286		sg_init_table(sg, alloc_size);
287		table->nents = table->orig_nents += sg_size;
288
289		/*
290		 * If this is the first mapping, assign the sg table header.
291		 * If this is not the first mapping, chain previous part.
292		 */
293		if (prv)
294			sg_chain(prv, max_ents, sg);
295		else
296			table->sgl = sg;
297
298		/*
299		 * If no more entries after this one, mark the end
300		 */
301		if (!left)
302			sg_mark_end(&sg[sg_size - 1]);
303
 
 
 
 
 
 
 
 
304		prv = sg;
305	} while (left);
306
307	return 0;
308}
309EXPORT_SYMBOL(__sg_alloc_table);
310
311/**
312 * sg_alloc_table - Allocate and initialize an sg table
313 * @table:	The sg table header to use
314 * @nents:	Number of entries in sg list
315 * @gfp_mask:	GFP allocation mask
316 *
317 *  Description:
318 *    Allocate and initialize an sg table. If @nents@ is larger than
319 *    SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
320 *
321 **/
322int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
323{
324	int ret;
325
326	ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
327			       gfp_mask, sg_kmalloc);
328	if (unlikely(ret))
329		__sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
330
331	return ret;
332}
333EXPORT_SYMBOL(sg_alloc_table);
334
335/**
336 * sg_alloc_table_from_pages - Allocate and initialize an sg table from
337 *			       an array of pages
338 * @sgt:	The sg table header to use
339 * @pages:	Pointer to an array of page pointers
340 * @n_pages:	Number of pages in the pages array
341 * @offset:     Offset from start of the first page to the start of a buffer
342 * @size:       Number of valid bytes in the buffer (after offset)
343 * @gfp_mask:	GFP allocation mask
344 *
345 *  Description:
346 *    Allocate and initialize an sg table from a list of pages. Contiguous
347 *    ranges of the pages are squashed into a single scatterlist node. A user
348 *    may provide an offset at a start and a size of valid data in a buffer
349 *    specified by the page array. The returned sg table is released by
350 *    sg_free_table.
351 *
352 * Returns:
353 *   0 on success, negative error on failure
354 */
355int sg_alloc_table_from_pages(struct sg_table *sgt,
356	struct page **pages, unsigned int n_pages,
357	unsigned long offset, unsigned long size,
358	gfp_t gfp_mask)
359{
360	unsigned int chunks;
361	unsigned int i;
362	unsigned int cur_page;
363	int ret;
364	struct scatterlist *s;
365
366	/* compute number of contiguous chunks */
367	chunks = 1;
368	for (i = 1; i < n_pages; ++i)
369		if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1)
370			++chunks;
371
372	ret = sg_alloc_table(sgt, chunks, gfp_mask);
373	if (unlikely(ret))
374		return ret;
375
376	/* merging chunks and putting them into the scatterlist */
377	cur_page = 0;
378	for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
379		unsigned long chunk_size;
380		unsigned int j;
381
382		/* look for the end of the current chunk */
383		for (j = cur_page + 1; j < n_pages; ++j)
384			if (page_to_pfn(pages[j]) !=
385			    page_to_pfn(pages[j - 1]) + 1)
386				break;
387
388		chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
389		sg_set_page(s, pages[cur_page], min(size, chunk_size), offset);
390		size -= chunk_size;
391		offset = 0;
392		cur_page = j;
393	}
394
395	return 0;
396}
397EXPORT_SYMBOL(sg_alloc_table_from_pages);
398
399void __sg_page_iter_start(struct sg_page_iter *piter,
400			  struct scatterlist *sglist, unsigned int nents,
401			  unsigned long pgoffset)
402{
403	piter->__pg_advance = 0;
404	piter->__nents = nents;
405
406	piter->sg = sglist;
407	piter->sg_pgoffset = pgoffset;
408}
409EXPORT_SYMBOL(__sg_page_iter_start);
410
411static int sg_page_count(struct scatterlist *sg)
412{
413	return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
414}
415
416bool __sg_page_iter_next(struct sg_page_iter *piter)
417{
418	if (!piter->__nents || !piter->sg)
419		return false;
420
421	piter->sg_pgoffset += piter->__pg_advance;
422	piter->__pg_advance = 1;
423
424	while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
425		piter->sg_pgoffset -= sg_page_count(piter->sg);
426		piter->sg = sg_next(piter->sg);
427		if (!--piter->__nents || !piter->sg)
428			return false;
429	}
430
431	return true;
432}
433EXPORT_SYMBOL(__sg_page_iter_next);
434
435/**
436 * sg_miter_start - start mapping iteration over a sg list
437 * @miter: sg mapping iter to be started
438 * @sgl: sg list to iterate over
439 * @nents: number of sg entries
440 *
441 * Description:
442 *   Starts mapping iterator @miter.
443 *
444 * Context:
445 *   Don't care.
446 */
447void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
448		    unsigned int nents, unsigned int flags)
449{
450	memset(miter, 0, sizeof(struct sg_mapping_iter));
451
452	__sg_page_iter_start(&miter->piter, sgl, nents, 0);
 
 
453	WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
454	miter->__flags = flags;
455}
456EXPORT_SYMBOL(sg_miter_start);
457
458static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
459{
460	if (!miter->__remaining) {
461		struct scatterlist *sg;
462		unsigned long pgoffset;
463
464		if (!__sg_page_iter_next(&miter->piter))
465			return false;
466
467		sg = miter->piter.sg;
468		pgoffset = miter->piter.sg_pgoffset;
469
470		miter->__offset = pgoffset ? 0 : sg->offset;
471		miter->__remaining = sg->offset + sg->length -
472				(pgoffset << PAGE_SHIFT) - miter->__offset;
473		miter->__remaining = min_t(unsigned long, miter->__remaining,
474					   PAGE_SIZE - miter->__offset);
475	}
476
477	return true;
478}
479
480/**
481 * sg_miter_skip - reposition mapping iterator
482 * @miter: sg mapping iter to be skipped
483 * @offset: number of bytes to plus the current location
484 *
485 * Description:
486 *   Sets the offset of @miter to its current location plus @offset bytes.
487 *   If mapping iterator @miter has been proceeded by sg_miter_next(), this
488 *   stops @miter.
489 *
490 * Context:
491 *   Don't care if @miter is stopped, or not proceeded yet.
492 *   Otherwise, preemption disabled if the SG_MITER_ATOMIC is set.
493 *
494 * Returns:
495 *   true if @miter contains the valid mapping.  false if end of sg
496 *   list is reached.
497 */
498bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
499{
500	sg_miter_stop(miter);
501
502	while (offset) {
503		off_t consumed;
504
505		if (!sg_miter_get_next_page(miter))
506			return false;
507
508		consumed = min_t(off_t, offset, miter->__remaining);
509		miter->__offset += consumed;
510		miter->__remaining -= consumed;
511		offset -= consumed;
512	}
513
514	return true;
515}
516EXPORT_SYMBOL(sg_miter_skip);
517
518/**
519 * sg_miter_next - proceed mapping iterator to the next mapping
520 * @miter: sg mapping iter to proceed
521 *
522 * Description:
523 *   Proceeds @miter to the next mapping.  @miter should have been started
524 *   using sg_miter_start().  On successful return, @miter->page,
525 *   @miter->addr and @miter->length point to the current mapping.
 
526 *
527 * Context:
528 *   Preemption disabled if SG_MITER_ATOMIC.  Preemption must stay disabled
529 *   till @miter is stopped.  May sleep if !SG_MITER_ATOMIC.
530 *
531 * Returns:
532 *   true if @miter contains the next mapping.  false if end of sg
533 *   list is reached.
534 */
535bool sg_miter_next(struct sg_mapping_iter *miter)
536{
 
 
 
 
 
 
537	sg_miter_stop(miter);
538
539	/*
540	 * Get to the next page if necessary.
541	 * __remaining, __offset is adjusted by sg_miter_stop
542	 */
543	if (!sg_miter_get_next_page(miter))
544		return false;
 
 
545
546	miter->page = sg_page_iter_page(&miter->piter);
547	miter->consumed = miter->length = miter->__remaining;
 
 
 
 
 
 
548
549	if (miter->__flags & SG_MITER_ATOMIC)
550		miter->addr = kmap_atomic(miter->page) + miter->__offset;
551	else
552		miter->addr = kmap(miter->page) + miter->__offset;
553
554	return true;
555}
556EXPORT_SYMBOL(sg_miter_next);
557
558/**
559 * sg_miter_stop - stop mapping iteration
560 * @miter: sg mapping iter to be stopped
561 *
562 * Description:
563 *   Stops mapping iterator @miter.  @miter should have been started
564 *   started using sg_miter_start().  A stopped iteration can be
565 *   resumed by calling sg_miter_next() on it.  This is useful when
566 *   resources (kmap) need to be released during iteration.
567 *
568 * Context:
569 *   Preemption disabled if the SG_MITER_ATOMIC is set.  Don't care
570 *   otherwise.
571 */
572void sg_miter_stop(struct sg_mapping_iter *miter)
573{
574	WARN_ON(miter->consumed > miter->length);
575
576	/* drop resources from the last iteration */
577	if (miter->addr) {
578		miter->__offset += miter->consumed;
579		miter->__remaining -= miter->consumed;
580
581		if ((miter->__flags & SG_MITER_TO_SG) &&
582		    !PageSlab(miter->page))
583			flush_kernel_dcache_page(miter->page);
584
585		if (miter->__flags & SG_MITER_ATOMIC) {
586			WARN_ON_ONCE(preemptible());
587			kunmap_atomic(miter->addr);
588		} else
589			kunmap(miter->page);
590
591		miter->page = NULL;
592		miter->addr = NULL;
593		miter->length = 0;
594		miter->consumed = 0;
595	}
596}
597EXPORT_SYMBOL(sg_miter_stop);
598
599/**
600 * sg_copy_buffer - Copy data between a linear buffer and an SG list
601 * @sgl:		 The SG list
602 * @nents:		 Number of SG entries
603 * @buf:		 Where to copy from
604 * @buflen:		 The number of bytes to copy
605 * @skip:		 Number of bytes to skip before copying
606 * @to_buffer:		 transfer direction (true == from an sg list to a
607 *			 buffer, false == from a buffer to an sg list
608 *
609 * Returns the number of copied bytes.
610 *
611 **/
612static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
613			     void *buf, size_t buflen, off_t skip,
614			     bool to_buffer)
615{
616	unsigned int offset = 0;
617	struct sg_mapping_iter miter;
618	unsigned long flags;
619	unsigned int sg_flags = SG_MITER_ATOMIC;
620
621	if (to_buffer)
622		sg_flags |= SG_MITER_FROM_SG;
623	else
624		sg_flags |= SG_MITER_TO_SG;
625
626	sg_miter_start(&miter, sgl, nents, sg_flags);
627
628	if (!sg_miter_skip(&miter, skip))
629		return false;
630
631	local_irq_save(flags);
632
633	while (sg_miter_next(&miter) && offset < buflen) {
634		unsigned int len;
635
636		len = min(miter.length, buflen - offset);
637
638		if (to_buffer)
639			memcpy(buf + offset, miter.addr, len);
640		else
641			memcpy(miter.addr, buf + offset, len);
642
643		offset += len;
644	}
645
646	sg_miter_stop(&miter);
647
648	local_irq_restore(flags);
649	return offset;
650}
651
652/**
653 * sg_copy_from_buffer - Copy from a linear buffer to an SG list
654 * @sgl:		 The SG list
655 * @nents:		 Number of SG entries
656 * @buf:		 Where to copy from
657 * @buflen:		 The number of bytes to copy
658 *
659 * Returns the number of copied bytes.
660 *
661 **/
662size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
663			   void *buf, size_t buflen)
664{
665	return sg_copy_buffer(sgl, nents, buf, buflen, 0, false);
666}
667EXPORT_SYMBOL(sg_copy_from_buffer);
668
669/**
670 * sg_copy_to_buffer - Copy from an SG list to a linear buffer
671 * @sgl:		 The SG list
672 * @nents:		 Number of SG entries
673 * @buf:		 Where to copy to
674 * @buflen:		 The number of bytes to copy
675 *
676 * Returns the number of copied bytes.
677 *
678 **/
679size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
680			 void *buf, size_t buflen)
681{
682	return sg_copy_buffer(sgl, nents, buf, buflen, 0, true);
683}
684EXPORT_SYMBOL(sg_copy_to_buffer);
685
686/**
687 * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list
688 * @sgl:		 The SG list
689 * @nents:		 Number of SG entries
690 * @buf:		 Where to copy from
691 * @skip:		 Number of bytes to skip before copying
692 * @buflen:		 The number of bytes to copy
693 *
694 * Returns the number of copied bytes.
695 *
696 **/
697size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
698			    void *buf, size_t buflen, off_t skip)
699{
700	return sg_copy_buffer(sgl, nents, buf, buflen, skip, false);
701}
702EXPORT_SYMBOL(sg_pcopy_from_buffer);
703
704/**
705 * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer
706 * @sgl:		 The SG list
707 * @nents:		 Number of SG entries
708 * @buf:		 Where to copy to
709 * @skip:		 Number of bytes to skip before copying
710 * @buflen:		 The number of bytes to copy
711 *
712 * Returns the number of copied bytes.
713 *
714 **/
715size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
716			  void *buf, size_t buflen, off_t skip)
717{
718	return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
719}
720EXPORT_SYMBOL(sg_pcopy_to_buffer);