Linux Audio

Check our new training course

Loading...
  1/*
  2 *  arch/arm/common/dmabounce.c
  3 *
  4 *  Special dma_{map/unmap/dma_sync}_* routines for systems that have
  5 *  limited DMA windows. These functions utilize bounce buffers to
  6 *  copy data to/from buffers located outside the DMA region. This
  7 *  only works for systems in which DMA memory is at the bottom of
  8 *  RAM, the remainder of memory is at the top and the DMA memory
  9 *  can be marked as ZONE_DMA. Anything beyond that such as discontiguous
 10 *  DMA windows will require custom implementations that reserve memory
 11 *  areas at early bootup.
 12 *
 13 *  Original version by Brad Parker (brad@heeltoe.com)
 14 *  Re-written by Christopher Hoover <ch@murgatroid.com>
 15 *  Made generic by Deepak Saxena <dsaxena@plexity.net>
 16 *
 17 *  Copyright (C) 2002 Hewlett Packard Company.
 18 *  Copyright (C) 2004 MontaVista Software, Inc.
 19 *
 20 *  This program is free software; you can redistribute it and/or
 21 *  modify it under the terms of the GNU General Public License
 22 *  version 2 as published by the Free Software Foundation.
 23 */
 24
 25#include <linux/module.h>
 26#include <linux/init.h>
 27#include <linux/slab.h>
 28#include <linux/page-flags.h>
 29#include <linux/device.h>
 30#include <linux/dma-mapping.h>
 31#include <linux/dmapool.h>
 32#include <linux/list.h>
 33#include <linux/scatterlist.h>
 34
 35#include <asm/cacheflush.h>
 36
 37#undef STATS
 38
 39#ifdef STATS
 40#define DO_STATS(X) do { X ; } while (0)
 41#else
 42#define DO_STATS(X) do { } while (0)
 43#endif
 44
 45/* ************************************************** */
 46
 47struct safe_buffer {
 48	struct list_head node;
 49
 50	/* original request */
 51	void		*ptr;
 52	size_t		size;
 53	int		direction;
 54
 55	/* safe buffer info */
 56	struct dmabounce_pool *pool;
 57	void		*safe;
 58	dma_addr_t	safe_dma_addr;
 59};
 60
 61struct dmabounce_pool {
 62	unsigned long	size;
 63	struct dma_pool	*pool;
 64#ifdef STATS
 65	unsigned long	allocs;
 66#endif
 67};
 68
 69struct dmabounce_device_info {
 70	struct device *dev;
 71	struct list_head safe_buffers;
 72#ifdef STATS
 73	unsigned long total_allocs;
 74	unsigned long map_op_count;
 75	unsigned long bounce_count;
 76	int attr_res;
 77#endif
 78	struct dmabounce_pool	small;
 79	struct dmabounce_pool	large;
 80
 81	rwlock_t lock;
 82
 83	int (*needs_bounce)(struct device *, dma_addr_t, size_t);
 84};
 85
 86#ifdef STATS
 87static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr,
 88			      char *buf)
 89{
 90	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
 91	return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n",
 92		device_info->small.allocs,
 93		device_info->large.allocs,
 94		device_info->total_allocs - device_info->small.allocs -
 95			device_info->large.allocs,
 96		device_info->total_allocs,
 97		device_info->map_op_count,
 98		device_info->bounce_count);
 99}
100
101static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL);
102#endif
103
104
105/* allocate a 'safe' buffer and keep track of it */
106static inline struct safe_buffer *
107alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
108		  size_t size, enum dma_data_direction dir)
109{
110	struct safe_buffer *buf;
111	struct dmabounce_pool *pool;
112	struct device *dev = device_info->dev;
113	unsigned long flags;
114
115	dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
116		__func__, ptr, size, dir);
117
118	if (size <= device_info->small.size) {
119		pool = &device_info->small;
120	} else if (size <= device_info->large.size) {
121		pool = &device_info->large;
122	} else {
123		pool = NULL;
124	}
125
126	buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
127	if (buf == NULL) {
128		dev_warn(dev, "%s: kmalloc failed\n", __func__);
129		return NULL;
130	}
131
132	buf->ptr = ptr;
133	buf->size = size;
134	buf->direction = dir;
135	buf->pool = pool;
136
137	if (pool) {
138		buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC,
139					   &buf->safe_dma_addr);
140	} else {
141		buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr,
142					       GFP_ATOMIC);
143	}
144
145	if (buf->safe == NULL) {
146		dev_warn(dev,
147			 "%s: could not alloc dma memory (size=%d)\n",
148			 __func__, size);
149		kfree(buf);
150		return NULL;
151	}
152
153#ifdef STATS
154	if (pool)
155		pool->allocs++;
156	device_info->total_allocs++;
157#endif
158
159	write_lock_irqsave(&device_info->lock, flags);
160	list_add(&buf->node, &device_info->safe_buffers);
161	write_unlock_irqrestore(&device_info->lock, flags);
162
163	return buf;
164}
165
166/* determine if a buffer is from our "safe" pool */
167static inline struct safe_buffer *
168find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
169{
170	struct safe_buffer *b, *rb = NULL;
171	unsigned long flags;
172
173	read_lock_irqsave(&device_info->lock, flags);
174
175	list_for_each_entry(b, &device_info->safe_buffers, node)
176		if (b->safe_dma_addr <= safe_dma_addr &&
177		    b->safe_dma_addr + b->size > safe_dma_addr) {
178			rb = b;
179			break;
180		}
181
182	read_unlock_irqrestore(&device_info->lock, flags);
183	return rb;
184}
185
186static inline void
187free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf)
188{
189	unsigned long flags;
190
191	dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf);
192
193	write_lock_irqsave(&device_info->lock, flags);
194
195	list_del(&buf->node);
196
197	write_unlock_irqrestore(&device_info->lock, flags);
198
199	if (buf->pool)
200		dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr);
201	else
202		dma_free_coherent(device_info->dev, buf->size, buf->safe,
203				    buf->safe_dma_addr);
204
205	kfree(buf);
206}
207
208/* ************************************************** */
209
210static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
211		dma_addr_t dma_addr, const char *where)
212{
213	if (!dev || !dev->archdata.dmabounce)
214		return NULL;
215	if (dma_mapping_error(dev, dma_addr)) {
216		dev_err(dev, "Trying to %s invalid mapping\n", where);
217		return NULL;
218	}
219	return find_safe_buffer(dev->archdata.dmabounce, dma_addr);
220}
221
222static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
223{
224	if (!dev || !dev->archdata.dmabounce)
225		return 0;
226
227	if (dev->dma_mask) {
228		unsigned long limit, mask = *dev->dma_mask;
229
230		limit = (mask + 1) & ~mask;
231		if (limit && size > limit) {
232			dev_err(dev, "DMA mapping too big (requested %#x "
233				"mask %#Lx)\n", size, *dev->dma_mask);
234			return -E2BIG;
235		}
236
237		/* Figure out if we need to bounce from the DMA mask. */
238		if ((dma_addr | (dma_addr + size - 1)) & ~mask)
239			return 1;
240	}
241
242	return !!dev->archdata.dmabounce->needs_bounce(dev, dma_addr, size);
243}
244
245static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
246		enum dma_data_direction dir)
247{
248	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
249	struct safe_buffer *buf;
250
251	if (device_info)
252		DO_STATS ( device_info->map_op_count++ );
253
254	buf = alloc_safe_buffer(device_info, ptr, size, dir);
255	if (buf == NULL) {
256		dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
257		       __func__, ptr);
258		return DMA_ERROR_CODE;
259	}
260
261	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
262		__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
263		buf->safe, buf->safe_dma_addr);
264
265	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
266		dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
267			__func__, ptr, buf->safe, size);
268		memcpy(buf->safe, ptr, size);
269	}
270
271	return buf->safe_dma_addr;
272}
273
274static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
275		size_t size, enum dma_data_direction dir)
276{
277	BUG_ON(buf->size != size);
278	BUG_ON(buf->direction != dir);
279
280	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
281		__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
282		buf->safe, buf->safe_dma_addr);
283
284	DO_STATS(dev->archdata.dmabounce->bounce_count++);
285
286	if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
287		void *ptr = buf->ptr;
288
289		dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
290			__func__, buf->safe, ptr, size);
291		memcpy(ptr, buf->safe, size);
292
293		/*
294		 * Since we may have written to a page cache page,
295		 * we need to ensure that the data will be coherent
296		 * with user mappings.
297		 */
298		__cpuc_flush_dcache_area(ptr, size);
299	}
300	free_safe_buffer(dev->archdata.dmabounce, buf);
301}
302
303/* ************************************************** */
304
305/*
306 * see if a buffer address is in an 'unsafe' range.  if it is
307 * allocate a 'safe' buffer and copy the unsafe buffer into it.
308 * substitute the safe buffer for the unsafe one.
309 * (basically move the buffer from an unsafe area to a safe one)
310 */
311static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
312		unsigned long offset, size_t size, enum dma_data_direction dir,
313		struct dma_attrs *attrs)
314{
315	dma_addr_t dma_addr;
316	int ret;
317
318	dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
319		__func__, page, offset, size, dir);
320
321	dma_addr = pfn_to_dma(dev, page_to_pfn(page)) + offset;
322
323	ret = needs_bounce(dev, dma_addr, size);
324	if (ret < 0)
325		return DMA_ERROR_CODE;
326
327	if (ret == 0) {
328		arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
329		return dma_addr;
330	}
331
332	if (PageHighMem(page)) {
333		dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
334		return DMA_ERROR_CODE;
335	}
336
337	return map_single(dev, page_address(page) + offset, size, dir);
338}
339
340/*
341 * see if a mapped address was really a "safe" buffer and if so, copy
342 * the data from the safe buffer back to the unsafe buffer and free up
343 * the safe buffer.  (basically return things back to the way they
344 * should be)
345 */
346static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
347		enum dma_data_direction dir, struct dma_attrs *attrs)
348{
349	struct safe_buffer *buf;
350
351	dev_dbg(dev, "%s(dma=%#x,size=%d,dir=%x)\n",
352		__func__, dma_addr, size, dir);
353
354	buf = find_safe_buffer_dev(dev, dma_addr, __func__);
355	if (!buf) {
356		arm_dma_ops.sync_single_for_cpu(dev, dma_addr, size, dir);
357		return;
358	}
359
360	unmap_single(dev, buf, size, dir);
361}
362
363static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
364		size_t sz, enum dma_data_direction dir)
365{
366	struct safe_buffer *buf;
367	unsigned long off;
368
369	dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
370		__func__, addr, sz, dir);
371
372	buf = find_safe_buffer_dev(dev, addr, __func__);
373	if (!buf)
374		return 1;
375
376	off = addr - buf->safe_dma_addr;
377
378	BUG_ON(buf->direction != dir);
379
380	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
381		__func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
382		buf->safe, buf->safe_dma_addr);
383
384	DO_STATS(dev->archdata.dmabounce->bounce_count++);
385
386	if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
387		dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
388			__func__, buf->safe + off, buf->ptr + off, sz);
389		memcpy(buf->ptr + off, buf->safe + off, sz);
390	}
391	return 0;
392}
393
394static void dmabounce_sync_for_cpu(struct device *dev,
395		dma_addr_t handle, size_t size, enum dma_data_direction dir)
396{
397	if (!__dmabounce_sync_for_cpu(dev, handle, size, dir))
398		return;
399
400	arm_dma_ops.sync_single_for_cpu(dev, handle, size, dir);
401}
402
403static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
404		size_t sz, enum dma_data_direction dir)
405{
406	struct safe_buffer *buf;
407	unsigned long off;
408
409	dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
410		__func__, addr, sz, dir);
411
412	buf = find_safe_buffer_dev(dev, addr, __func__);
413	if (!buf)
414		return 1;
415
416	off = addr - buf->safe_dma_addr;
417
418	BUG_ON(buf->direction != dir);
419
420	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
421		__func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
422		buf->safe, buf->safe_dma_addr);
423
424	DO_STATS(dev->archdata.dmabounce->bounce_count++);
425
426	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
427		dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n",
428			__func__,buf->ptr + off, buf->safe + off, sz);
429		memcpy(buf->safe + off, buf->ptr + off, sz);
430	}
431	return 0;
432}
433
434static void dmabounce_sync_for_device(struct device *dev,
435		dma_addr_t handle, size_t size, enum dma_data_direction dir)
436{
437	if (!__dmabounce_sync_for_device(dev, handle, size, dir))
438		return;
439
440	arm_dma_ops.sync_single_for_device(dev, handle, size, dir);
441}
442
443static int dmabounce_set_mask(struct device *dev, u64 dma_mask)
444{
445	if (dev->archdata.dmabounce)
446		return 0;
447
448	return arm_dma_ops.set_dma_mask(dev, dma_mask);
449}
450
451static struct dma_map_ops dmabounce_ops = {
452	.alloc			= arm_dma_alloc,
453	.free			= arm_dma_free,
454	.mmap			= arm_dma_mmap,
455	.map_page		= dmabounce_map_page,
456	.unmap_page		= dmabounce_unmap_page,
457	.sync_single_for_cpu	= dmabounce_sync_for_cpu,
458	.sync_single_for_device	= dmabounce_sync_for_device,
459	.map_sg			= arm_dma_map_sg,
460	.unmap_sg		= arm_dma_unmap_sg,
461	.sync_sg_for_cpu	= arm_dma_sync_sg_for_cpu,
462	.sync_sg_for_device	= arm_dma_sync_sg_for_device,
463	.set_dma_mask		= dmabounce_set_mask,
464};
465
466static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
467		const char *name, unsigned long size)
468{
469	pool->size = size;
470	DO_STATS(pool->allocs = 0);
471	pool->pool = dma_pool_create(name, dev, size,
472				     0 /* byte alignment */,
473				     0 /* no page-crossing issues */);
474
475	return pool->pool ? 0 : -ENOMEM;
476}
477
478int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
479		unsigned long large_buffer_size,
480		int (*needs_bounce_fn)(struct device *, dma_addr_t, size_t))
481{
482	struct dmabounce_device_info *device_info;
483	int ret;
484
485	device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC);
486	if (!device_info) {
487		dev_err(dev,
488			"Could not allocated dmabounce_device_info\n");
489		return -ENOMEM;
490	}
491
492	ret = dmabounce_init_pool(&device_info->small, dev,
493				  "small_dmabounce_pool", small_buffer_size);
494	if (ret) {
495		dev_err(dev,
496			"dmabounce: could not allocate DMA pool for %ld byte objects\n",
497			small_buffer_size);
498		goto err_free;
499	}
500
501	if (large_buffer_size) {
502		ret = dmabounce_init_pool(&device_info->large, dev,
503					  "large_dmabounce_pool",
504					  large_buffer_size);
505		if (ret) {
506			dev_err(dev,
507				"dmabounce: could not allocate DMA pool for %ld byte objects\n",
508				large_buffer_size);
509			goto err_destroy;
510		}
511	}
512
513	device_info->dev = dev;
514	INIT_LIST_HEAD(&device_info->safe_buffers);
515	rwlock_init(&device_info->lock);
516	device_info->needs_bounce = needs_bounce_fn;
517
518#ifdef STATS
519	device_info->total_allocs = 0;
520	device_info->map_op_count = 0;
521	device_info->bounce_count = 0;
522	device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats);
523#endif
524
525	dev->archdata.dmabounce = device_info;
526	set_dma_ops(dev, &dmabounce_ops);
527
528	dev_info(dev, "dmabounce: registered device\n");
529
530	return 0;
531
532 err_destroy:
533	dma_pool_destroy(device_info->small.pool);
534 err_free:
535	kfree(device_info);
536	return ret;
537}
538EXPORT_SYMBOL(dmabounce_register_dev);
539
540void dmabounce_unregister_dev(struct device *dev)
541{
542	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
543
544	dev->archdata.dmabounce = NULL;
545	set_dma_ops(dev, NULL);
546
547	if (!device_info) {
548		dev_warn(dev,
549			 "Never registered with dmabounce but attempting"
550			 "to unregister!\n");
551		return;
552	}
553
554	if (!list_empty(&device_info->safe_buffers)) {
555		dev_err(dev,
556			"Removing from dmabounce with pending buffers!\n");
557		BUG();
558	}
559
560	if (device_info->small.pool)
561		dma_pool_destroy(device_info->small.pool);
562	if (device_info->large.pool)
563		dma_pool_destroy(device_info->large.pool);
564
565#ifdef STATS
566	if (device_info->attr_res == 0)
567		device_remove_file(dev, &dev_attr_dmabounce_stats);
568#endif
569
570	kfree(device_info);
571
572	dev_info(dev, "dmabounce: device unregistered\n");
573}
574EXPORT_SYMBOL(dmabounce_unregister_dev);
575
576MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");
577MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");
578MODULE_LICENSE("GPL");