Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1/*
  2 * Copyright 2020 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: Christian König
 23 */
 24
 25#include <linux/dma-buf-map.h>
 26#include <linux/io-mapping.h>
 27#include <linux/scatterlist.h>
 28
 29#include <drm/ttm/ttm_resource.h>
 30#include <drm/ttm/ttm_bo_driver.h>
 31
 32void ttm_resource_init(struct ttm_buffer_object *bo,
 33                       const struct ttm_place *place,
 34                       struct ttm_resource *res)
 35{
 36	res->start = 0;
 37	res->num_pages = PFN_UP(bo->base.size);
 38	res->mem_type = place->mem_type;
 39	res->placement = place->flags;
 40	res->bus.addr = NULL;
 41	res->bus.offset = 0;
 42	res->bus.is_iomem = false;
 43	res->bus.caching = ttm_cached;
 44}
 45EXPORT_SYMBOL(ttm_resource_init);
 46
 47int ttm_resource_alloc(struct ttm_buffer_object *bo,
 48		       const struct ttm_place *place,
 49		       struct ttm_resource **res_ptr)
 50{
 51	struct ttm_resource_manager *man =
 52		ttm_manager_type(bo->bdev, place->mem_type);
 53
 54	return man->func->alloc(man, bo, place, res_ptr);
 55}
 56
 57void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
 58{
 59	struct ttm_resource_manager *man;
 60
 61	if (!*res)
 62		return;
 63
 64	man = ttm_manager_type(bo->bdev, (*res)->mem_type);
 65	man->func->free(man, *res);
 66	*res = NULL;
 67}
 68EXPORT_SYMBOL(ttm_resource_free);
 69
 70/**
 71 * ttm_resource_manager_init
 72 *
 73 * @man: memory manager object to init
 74 * @p_size: size managed area in pages.
 75 *
 76 * Initialise core parts of a manager object.
 77 */
 78void ttm_resource_manager_init(struct ttm_resource_manager *man,
 79			       unsigned long p_size)
 80{
 81	unsigned i;
 82
 83	spin_lock_init(&man->move_lock);
 84	man->size = p_size;
 85
 86	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
 87		INIT_LIST_HEAD(&man->lru[i]);
 88	man->move = NULL;
 89}
 90EXPORT_SYMBOL(ttm_resource_manager_init);
 91
 92/*
 93 * ttm_resource_manager_evict_all
 94 *
 95 * @bdev - device to use
 96 * @man - manager to use
 97 *
 98 * Evict all the objects out of a memory manager until it is empty.
 99 * Part of memory manager cleanup sequence.
100 */
101int ttm_resource_manager_evict_all(struct ttm_device *bdev,
102				   struct ttm_resource_manager *man)
103{
104	struct ttm_operation_ctx ctx = {
105		.interruptible = false,
106		.no_wait_gpu = false,
107		.force_alloc = true
108	};
109	struct dma_fence *fence;
110	int ret;
111	unsigned i;
112
113	/*
114	 * Can't use standard list traversal since we're unlocking.
115	 */
116
117	spin_lock(&bdev->lru_lock);
118	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
119		while (!list_empty(&man->lru[i])) {
120			spin_unlock(&bdev->lru_lock);
121			ret = ttm_mem_evict_first(bdev, man, NULL, &ctx,
122						  NULL);
123			if (ret)
124				return ret;
125			spin_lock(&bdev->lru_lock);
126		}
127	}
128	spin_unlock(&bdev->lru_lock);
129
130	spin_lock(&man->move_lock);
131	fence = dma_fence_get(man->move);
132	spin_unlock(&man->move_lock);
133
134	if (fence) {
135		ret = dma_fence_wait(fence, false);
136		dma_fence_put(fence);
137		if (ret)
138			return ret;
139	}
140
141	return 0;
142}
143EXPORT_SYMBOL(ttm_resource_manager_evict_all);
144
145/**
146 * ttm_resource_manager_debug
147 *
148 * @man: manager type to dump.
149 * @p: printer to use for debug.
150 */
151void ttm_resource_manager_debug(struct ttm_resource_manager *man,
152				struct drm_printer *p)
153{
154	drm_printf(p, "  use_type: %d\n", man->use_type);
155	drm_printf(p, "  use_tt: %d\n", man->use_tt);
156	drm_printf(p, "  size: %llu\n", man->size);
157	if (man->func->debug)
158		man->func->debug(man, p);
159}
160EXPORT_SYMBOL(ttm_resource_manager_debug);
161
162static void ttm_kmap_iter_iomap_map_local(struct ttm_kmap_iter *iter,
163					  struct dma_buf_map *dmap,
164					  pgoff_t i)
165{
166	struct ttm_kmap_iter_iomap *iter_io =
167		container_of(iter, typeof(*iter_io), base);
168	void __iomem *addr;
169
170retry:
171	while (i >= iter_io->cache.end) {
172		iter_io->cache.sg = iter_io->cache.sg ?
173			sg_next(iter_io->cache.sg) : iter_io->st->sgl;
174		iter_io->cache.i = iter_io->cache.end;
175		iter_io->cache.end += sg_dma_len(iter_io->cache.sg) >>
176			PAGE_SHIFT;
177		iter_io->cache.offs = sg_dma_address(iter_io->cache.sg) -
178			iter_io->start;
179	}
180
181	if (i < iter_io->cache.i) {
182		iter_io->cache.end = 0;
183		iter_io->cache.sg = NULL;
184		goto retry;
185	}
186
187	addr = io_mapping_map_local_wc(iter_io->iomap, iter_io->cache.offs +
188				       (((resource_size_t)i - iter_io->cache.i)
189					<< PAGE_SHIFT));
190	dma_buf_map_set_vaddr_iomem(dmap, addr);
191}
192
193static void ttm_kmap_iter_iomap_unmap_local(struct ttm_kmap_iter *iter,
194					    struct dma_buf_map *map)
195{
196	io_mapping_unmap_local(map->vaddr_iomem);
197}
198
199static const struct ttm_kmap_iter_ops ttm_kmap_iter_io_ops = {
200	.map_local =  ttm_kmap_iter_iomap_map_local,
201	.unmap_local = ttm_kmap_iter_iomap_unmap_local,
202	.maps_tt = false,
203};
204
205/**
206 * ttm_kmap_iter_iomap_init - Initialize a struct ttm_kmap_iter_iomap
207 * @iter_io: The struct ttm_kmap_iter_iomap to initialize.
208 * @iomap: The struct io_mapping representing the underlying linear io_memory.
209 * @st: sg_table into @iomap, representing the memory of the struct
210 * ttm_resource.
211 * @start: Offset that needs to be subtracted from @st to make
212 * sg_dma_address(st->sgl) - @start == 0 for @iomap start.
213 *
214 * Return: Pointer to the embedded struct ttm_kmap_iter.
215 */
216struct ttm_kmap_iter *
217ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io,
218			 struct io_mapping *iomap,
219			 struct sg_table *st,
220			 resource_size_t start)
221{
222	iter_io->base.ops = &ttm_kmap_iter_io_ops;
223	iter_io->iomap = iomap;
224	iter_io->st = st;
225	iter_io->start = start;
226	memset(&iter_io->cache, 0, sizeof(iter_io->cache));
227
228	return &iter_io->base;
229}
230EXPORT_SYMBOL(ttm_kmap_iter_iomap_init);
231
232/**
233 * DOC: Linear io iterator
234 *
235 * This code should die in the not too near future. Best would be if we could
236 * make io-mapping use memremap for all io memory, and have memremap
237 * implement a kmap_local functionality. We could then strip a huge amount of
238 * code. These linear io iterators are implemented to mimic old functionality,
239 * and they don't use kmap_local semantics at all internally. Rather ioremap or
240 * friends, and at least on 32-bit they add global TLB flushes and points
241 * of failure.
242 */
243
244static void ttm_kmap_iter_linear_io_map_local(struct ttm_kmap_iter *iter,
245					      struct dma_buf_map *dmap,
246					      pgoff_t i)
247{
248	struct ttm_kmap_iter_linear_io *iter_io =
249		container_of(iter, typeof(*iter_io), base);
250
251	*dmap = iter_io->dmap;
252	dma_buf_map_incr(dmap, i * PAGE_SIZE);
253}
254
255static const struct ttm_kmap_iter_ops ttm_kmap_iter_linear_io_ops = {
256	.map_local =  ttm_kmap_iter_linear_io_map_local,
257	.maps_tt = false,
258};
259
260/**
261 * ttm_kmap_iter_linear_io_init - Initialize an iterator for linear io memory
262 * @iter_io: The iterator to initialize
263 * @bdev: The TTM device
264 * @mem: The ttm resource representing the iomap.
265 *
266 * This function is for internal TTM use only. It sets up a memcpy kmap iterator
267 * pointing at a linear chunk of io memory.
268 *
269 * Return: A pointer to the embedded struct ttm_kmap_iter or error pointer on
270 * failure.
271 */
272struct ttm_kmap_iter *
273ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io,
274			     struct ttm_device *bdev,
275			     struct ttm_resource *mem)
276{
277	int ret;
278
279	ret = ttm_mem_io_reserve(bdev, mem);
280	if (ret)
281		goto out_err;
282	if (!mem->bus.is_iomem) {
283		ret = -EINVAL;
284		goto out_io_free;
285	}
286
287	if (mem->bus.addr) {
288		dma_buf_map_set_vaddr(&iter_io->dmap, mem->bus.addr);
289		iter_io->needs_unmap = false;
290	} else {
291		size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
292
293		iter_io->needs_unmap = true;
294		memset(&iter_io->dmap, 0, sizeof(iter_io->dmap));
295		if (mem->bus.caching == ttm_write_combined)
296			dma_buf_map_set_vaddr_iomem(&iter_io->dmap,
297						    ioremap_wc(mem->bus.offset,
298							       bus_size));
299		else if (mem->bus.caching == ttm_cached)
300			dma_buf_map_set_vaddr(&iter_io->dmap,
301					      memremap(mem->bus.offset, bus_size,
302						       MEMREMAP_WB |
303						       MEMREMAP_WT |
304						       MEMREMAP_WC));
305
306		/* If uncached requested or if mapping cached or wc failed */
307		if (dma_buf_map_is_null(&iter_io->dmap))
308			dma_buf_map_set_vaddr_iomem(&iter_io->dmap,
309						    ioremap(mem->bus.offset,
310							    bus_size));
311
312		if (dma_buf_map_is_null(&iter_io->dmap)) {
313			ret = -ENOMEM;
314			goto out_io_free;
315		}
316	}
317
318	iter_io->base.ops = &ttm_kmap_iter_linear_io_ops;
319	return &iter_io->base;
320
321out_io_free:
322	ttm_mem_io_free(bdev, mem);
323out_err:
324	return ERR_PTR(ret);
325}
326
327/**
328 * ttm_kmap_iter_linear_io_fini - Clean up an iterator for linear io memory
329 * @iter_io: The iterator to initialize
330 * @bdev: The TTM device
331 * @mem: The ttm resource representing the iomap.
332 *
333 * This function is for internal TTM use only. It cleans up a memcpy kmap
334 * iterator initialized by ttm_kmap_iter_linear_io_init.
335 */
336void
337ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io,
338			     struct ttm_device *bdev,
339			     struct ttm_resource *mem)
340{
341	if (iter_io->needs_unmap && dma_buf_map_is_set(&iter_io->dmap)) {
342		if (iter_io->dmap.is_iomem)
343			iounmap(iter_io->dmap.vaddr_iomem);
344		else
345			memunmap(iter_io->dmap.vaddr);
346	}
347
348	ttm_mem_io_free(bdev, mem);
349}