Linux Audio

Check our new training course

Loading...
v5.4
  1/*
  2 * Copyright 2014 Advanced Micro Devices, Inc.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sub license, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 20 *
 21 * The above copyright notice and this permission notice (including the
 22 * next paragraph) shall be included in all copies or substantial portions
 23 * of the Software.
 24 *
 25 */
 26/*
 27 * Authors:
 28 *    Christian König <christian.koenig@amd.com>
 29 */
 30
 31#include <linux/firmware.h>
 32#include <linux/module.h>
 33#include <linux/mmu_notifier.h>
 34
 35#include <drm/drm.h>
 36
 37#include "radeon.h"
 38
 39struct radeon_mn {
 40	struct mmu_notifier	mn;
 41
 42	/* objects protected by lock */
 43	struct mutex		lock;
 44	struct rb_root_cached	objects;
 45};
 46
 47struct radeon_mn_node {
 48	struct interval_tree_node	it;
 49	struct list_head		bos;
 50};
 51
 52/**
 53 * radeon_mn_invalidate_range_start - callback to notify about mm change
 54 *
 55 * @mn: our notifier
 56 * @mn: the mm this callback is about
 57 * @start: start of updated range
 58 * @end: end of updated range
 59 *
 60 * We block for all BOs between start and end to be idle and
 61 * unmap them by move them into system domain again.
 62 */
 63static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
 64				const struct mmu_notifier_range *range)
 
 65{
 66	struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
 67	struct ttm_operation_ctx ctx = { false, false };
 68	struct interval_tree_node *it;
 69	unsigned long end;
 70	int ret = 0;
 71
 72	/* notification is exclusive, but interval is inclusive */
 73	end = range->end - 1;
 74
 75	/* TODO we should be able to split locking for interval tree and
 76	 * the tear down.
 77	 */
 78	if (mmu_notifier_range_blockable(range))
 79		mutex_lock(&rmn->lock);
 80	else if (!mutex_trylock(&rmn->lock))
 81		return -EAGAIN;
 82
 83	it = interval_tree_iter_first(&rmn->objects, range->start, end);
 84	while (it) {
 85		struct radeon_mn_node *node;
 86		struct radeon_bo *bo;
 87		long r;
 88
 89		if (!mmu_notifier_range_blockable(range)) {
 90			ret = -EAGAIN;
 91			goto out_unlock;
 92		}
 93
 94		node = container_of(it, struct radeon_mn_node, it);
 95		it = interval_tree_iter_next(it, range->start, end);
 96
 97		list_for_each_entry(bo, &node->bos, mn_list) {
 98
 99			if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
100				continue;
101
102			r = radeon_bo_reserve(bo, true);
103			if (r) {
104				DRM_ERROR("(%ld) failed to reserve user bo\n", r);
105				continue;
106			}
107
108			r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
109				true, false, MAX_SCHEDULE_TIMEOUT);
110			if (r <= 0)
111				DRM_ERROR("(%ld) failed to wait for user bo\n", r);
112
113			radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
114			r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
115			if (r)
116				DRM_ERROR("(%ld) failed to validate user bo\n", r);
117
118			radeon_bo_unreserve(bo);
119		}
 
 
120	}
121	
122out_unlock:
123	mutex_unlock(&rmn->lock);
124
125	return ret;
126}
 
 
 
 
 
 
 
127
128static void radeon_mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
129{
130	struct mmu_notifier_range range = {
131		.mm = mm,
132		.start = 0,
133		.end = ULONG_MAX,
134		.flags = 0,
135		.event = MMU_NOTIFY_UNMAP,
136	};
137
138	radeon_mn_invalidate_range_start(mn, &range);
139}
140
141static struct mmu_notifier *radeon_mn_alloc_notifier(struct mm_struct *mm)
142{
143	struct radeon_mn *rmn;
144
145	rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
146	if (!rmn)
147		return ERR_PTR(-ENOMEM);
148
149	mutex_init(&rmn->lock);
150	rmn->objects = RB_ROOT_CACHED;
151	return &rmn->mn;
152}
153
154static void radeon_mn_free_notifier(struct mmu_notifier *mn)
155{
156	kfree(container_of(mn, struct radeon_mn, mn));
157}
158
159static const struct mmu_notifier_ops radeon_mn_ops = {
160	.release = radeon_mn_release,
161	.invalidate_range_start = radeon_mn_invalidate_range_start,
162	.alloc_notifier = radeon_mn_alloc_notifier,
163	.free_notifier = radeon_mn_free_notifier,
164};
165
166/**
167 * radeon_mn_register - register a BO for notifier updates
168 *
169 * @bo: radeon buffer object
170 * @addr: userptr addr we should monitor
171 *
172 * Registers an MMU notifier for the given BO at the specified address.
173 * Returns 0 on success, -ERRNO if anything goes wrong.
174 */
175int radeon_mn_register(struct radeon_bo *bo, unsigned long addr)
176{
177	unsigned long end = addr + radeon_bo_size(bo) - 1;
178	struct mmu_notifier *mn;
179	struct radeon_mn *rmn;
180	struct radeon_mn_node *node = NULL;
181	struct list_head bos;
182	struct interval_tree_node *it;
183
184	mn = mmu_notifier_get(&radeon_mn_ops, current->mm);
185	if (IS_ERR(mn))
186		return PTR_ERR(mn);
187	rmn = container_of(mn, struct radeon_mn, mn);
188
189	INIT_LIST_HEAD(&bos);
190
191	mutex_lock(&rmn->lock);
192
193	while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
194		kfree(node);
195		node = container_of(it, struct radeon_mn_node, it);
196		interval_tree_remove(&node->it, &rmn->objects);
197		addr = min(it->start, addr);
198		end = max(it->last, end);
199		list_splice(&node->bos, &bos);
200	}
201
202	if (!node) {
203		node = kmalloc(sizeof(struct radeon_mn_node), GFP_KERNEL);
204		if (!node) {
205			mutex_unlock(&rmn->lock);
206			return -ENOMEM;
207		}
208	}
209
210	bo->mn = rmn;
211
212	node->it.start = addr;
213	node->it.last = end;
214	INIT_LIST_HEAD(&node->bos);
215	list_splice(&bos, &node->bos);
216	list_add(&bo->mn_list, &node->bos);
217
218	interval_tree_insert(&node->it, &rmn->objects);
219
220	mutex_unlock(&rmn->lock);
221
 
 
 
 
 
 
 
 
 
 
 
 
222	return 0;
223}
224
225/**
226 * radeon_mn_unregister - unregister a BO for notifier updates
227 *
228 * @bo: radeon buffer object
229 *
230 * Remove any registration of MMU notifier updates from the buffer object.
231 */
232void radeon_mn_unregister(struct radeon_bo *bo)
233{
234	struct radeon_mn *rmn = bo->mn;
235	struct list_head *head;
236
237	if (!rmn)
238		return;
239
240	mutex_lock(&rmn->lock);
241	/* save the next list entry for later */
242	head = bo->mn_list.next;
243
244	list_del(&bo->mn_list);
245
246	if (list_empty(head)) {
247		struct radeon_mn_node *node;
248		node = container_of(head, struct radeon_mn_node, bos);
249		interval_tree_remove(&node->it, &rmn->objects);
250		kfree(node);
251	}
252
253	mutex_unlock(&rmn->lock);
254
255	mmu_notifier_put(&rmn->mn);
256	bo->mn = NULL;
257}
v6.8
  1/*
  2 * Copyright 2014 Advanced Micro Devices, Inc.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sub license, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 20 *
 21 * The above copyright notice and this permission notice (including the
 22 * next paragraph) shall be included in all copies or substantial portions
 23 * of the Software.
 24 *
 25 */
 26/*
 27 * Authors:
 28 *    Christian König <christian.koenig@amd.com>
 29 */
 30
 31#include <linux/firmware.h>
 32#include <linux/module.h>
 33#include <linux/mmu_notifier.h>
 34
 35#include <drm/drm.h>
 36
 37#include "radeon.h"
 38
 
 
 
 
 
 
 
 
 
 
 
 
 
 39/**
 40 * radeon_mn_invalidate - callback to notify about mm change
 41 *
 42 * @mn: our notifier
 43 * @range: the VMA under invalidation
 44 * @cur_seq: Value to pass to mmu_interval_set_seq()
 
 45 *
 46 * We block for all BOs between start and end to be idle and
 47 * unmap them by move them into system domain again.
 48 */
 49static bool radeon_mn_invalidate(struct mmu_interval_notifier *mn,
 50				 const struct mmu_notifier_range *range,
 51				 unsigned long cur_seq)
 52{
 53	struct radeon_bo *bo = container_of(mn, struct radeon_bo, notifier);
 54	struct ttm_operation_ctx ctx = { false, false };
 55	long r;
 
 
 56
 57	if (!bo->tbo.ttm || !radeon_ttm_tt_is_bound(bo->tbo.bdev, bo->tbo.ttm))
 58		return true;
 59
 60	if (!mmu_notifier_range_blockable(range))
 61		return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 62
 63	r = radeon_bo_reserve(bo, true);
 64	if (r) {
 65		DRM_ERROR("(%ld) failed to reserve user bo\n", r);
 66		return true;
 67	}
 
 
 
 68
 69	r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
 70				  false, MAX_SCHEDULE_TIMEOUT);
 71	if (r <= 0)
 72		DRM_ERROR("(%ld) failed to wait for user bo\n", r);
 73
 74	radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
 75	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 76	if (r)
 77		DRM_ERROR("(%ld) failed to validate user bo\n", r);
 78
 79	radeon_bo_unreserve(bo);
 80	return true;
 
 
 
 
 
 
 
 
 
 81}
 82
 83static const struct mmu_interval_notifier_ops radeon_mn_ops = {
 84	.invalidate = radeon_mn_invalidate,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 85};
 86
 87/**
 88 * radeon_mn_register - register a BO for notifier updates
 89 *
 90 * @bo: radeon buffer object
 91 * @addr: userptr addr we should monitor
 92 *
 93 * Registers an MMU notifier for the given BO at the specified address.
 94 * Returns 0 on success, -ERRNO if anything goes wrong.
 95 */
 96int radeon_mn_register(struct radeon_bo *bo, unsigned long addr)
 97{
 98	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 99
100	ret = mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
101					   radeon_bo_size(bo), &radeon_mn_ops);
102	if (ret)
103		return ret;
104
105	/*
106	 * FIXME: radeon appears to allow get_user_pages to run during
107	 * invalidate_range_start/end, which is not a safe way to read the
108	 * PTEs. It should use the mmu_interval_read_begin() scheme around the
109	 * get_user_pages to ensure that the PTEs are read properly
110	 */
111	mmu_interval_read_begin(&bo->notifier);
112	return 0;
113}
114
115/**
116 * radeon_mn_unregister - unregister a BO for notifier updates
117 *
118 * @bo: radeon buffer object
119 *
120 * Remove any registration of MMU notifier updates from the buffer object.
121 */
122void radeon_mn_unregister(struct radeon_bo *bo)
123{
124	if (!bo->notifier.mm)
 
 
 
125		return;
126	mmu_interval_notifier_remove(&bo->notifier);
127	bo->notifier.mm = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128}