Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2016 Red Hat
  4 * Author: Rob Clark <robdclark@gmail.com>
 
 
 
 
 
 
 
 
 
 
 
 
  5 */
  6
  7#include "msm_drv.h"
  8#include "msm_gem.h"
  9#include "msm_mmu.h"
 10
 11static void
 12msm_gem_address_space_destroy(struct kref *kref)
 13{
 14	struct msm_gem_address_space *aspace = container_of(kref,
 15			struct msm_gem_address_space, kref);
 16
 17	drm_mm_takedown(&aspace->mm);
 18	if (aspace->mmu)
 19		aspace->mmu->funcs->destroy(aspace->mmu);
 20	put_pid(aspace->pid);
 21	kfree(aspace);
 22}
 23
 24
 25void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
 26{
 27	if (aspace)
 28		kref_put(&aspace->kref, msm_gem_address_space_destroy);
 29}
 30
 31struct msm_gem_address_space *
 32msm_gem_address_space_get(struct msm_gem_address_space *aspace)
 33{
 34	if (!IS_ERR_OR_NULL(aspace))
 35		kref_get(&aspace->kref);
 36
 37	return aspace;
 38}
 39
 40/* Actually unmap memory for the vma */
 41void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
 42		struct msm_gem_vma *vma)
 43{
 44	unsigned size = vma->node.size << PAGE_SHIFT;
 45
 46	/* Print a message if we try to purge a vma in use */
 47	if (WARN_ON(vma->inuse > 0))
 48		return;
 49
 50	/* Don't do anything if the memory isn't mapped */
 51	if (!vma->mapped)
 52		return;
 53
 54	if (aspace->mmu)
 55		aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
 56
 57	vma->mapped = false;
 58}
 59
 60/* Remove reference counts for the mapping */
 61void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
 62		struct msm_gem_vma *vma)
 63{
 64	if (!WARN_ON(!vma->iova))
 65		vma->inuse--;
 66}
 67
 68int
 69msm_gem_map_vma(struct msm_gem_address_space *aspace,
 70		struct msm_gem_vma *vma, int prot,
 71		struct sg_table *sgt, int npages)
 72{
 73	unsigned size = npages << PAGE_SHIFT;
 74	int ret = 0;
 75
 76	if (WARN_ON(!vma->iova))
 77		return -EINVAL;
 78
 79	/* Increase the usage counter */
 80	vma->inuse++;
 81
 82	if (vma->mapped)
 83		return 0;
 84
 85	vma->mapped = true;
 86
 87	if (aspace && aspace->mmu)
 88		ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
 89				size, prot);
 90
 91	if (ret) {
 92		vma->mapped = false;
 93		vma->inuse--;
 94	}
 95
 96	return ret;
 97}
 98
 99/* Close an iova.  Warn if it is still in use */
100void msm_gem_close_vma(struct msm_gem_address_space *aspace,
101		struct msm_gem_vma *vma)
102{
103	if (WARN_ON(vma->inuse > 0 || vma->mapped))
104		return;
105
106	spin_lock(&aspace->lock);
107	if (vma->iova)
108		drm_mm_remove_node(&vma->node);
109	spin_unlock(&aspace->lock);
110
111	vma->iova = 0;
112
113	msm_gem_address_space_put(aspace);
114}
115
116/* Initialize a new vma and allocate an iova for it */
117int msm_gem_init_vma(struct msm_gem_address_space *aspace,
118		struct msm_gem_vma *vma, int npages,
119		u64 range_start, u64 range_end)
120{
121	int ret;
122
123	if (WARN_ON(vma->iova))
124		return -EBUSY;
125
126	spin_lock(&aspace->lock);
127	ret = drm_mm_insert_node_in_range(&aspace->mm, &vma->node, npages, 0,
128		0, range_start, range_end, 0);
 
 
 
 
129	spin_unlock(&aspace->lock);
130
131	if (ret)
132		return ret;
133
134	vma->iova = vma->node.start << PAGE_SHIFT;
135	vma->mapped = false;
136
 
 
 
 
 
 
 
137	kref_get(&aspace->kref);
138
139	return 0;
140}
141
142struct msm_gem_address_space *
143msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
144		u64 va_start, u64 size)
145{
146	struct msm_gem_address_space *aspace;
147
148	if (IS_ERR(mmu))
149		return ERR_CAST(mmu);
150
151	aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
152	if (!aspace)
153		return ERR_PTR(-ENOMEM);
154
155	spin_lock_init(&aspace->lock);
156	aspace->name = name;
157	aspace->mmu = mmu;
158
159	drm_mm_init(&aspace->mm, va_start >> PAGE_SHIFT, size >> PAGE_SHIFT);
 
160
161	kref_init(&aspace->kref);
162
163	return aspace;
164}
v4.17
 
  1/*
  2 * Copyright (C) 2016 Red Hat
  3 * Author: Rob Clark <robdclark@gmail.com>
  4 *
  5 * This program is free software; you can redistribute it and/or modify it
  6 * under the terms of the GNU General Public License version 2 as published by
  7 * the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it will be useful, but WITHOUT
 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 12 * more details.
 13 *
 14 * You should have received a copy of the GNU General Public License along with
 15 * this program.  If not, see <http://www.gnu.org/licenses/>.
 16 */
 17
 18#include "msm_drv.h"
 19#include "msm_gem.h"
 20#include "msm_mmu.h"
 21
 22static void
 23msm_gem_address_space_destroy(struct kref *kref)
 24{
 25	struct msm_gem_address_space *aspace = container_of(kref,
 26			struct msm_gem_address_space, kref);
 27
 28	drm_mm_takedown(&aspace->mm);
 29	if (aspace->mmu)
 30		aspace->mmu->funcs->destroy(aspace->mmu);
 
 31	kfree(aspace);
 32}
 33
 34
 35void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
 36{
 37	if (aspace)
 38		kref_put(&aspace->kref, msm_gem_address_space_destroy);
 39}
 40
 41void
 42msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
 43		struct msm_gem_vma *vma, struct sg_table *sgt)
 
 
 
 
 
 
 
 
 
 44{
 45	if (!aspace || !vma->iova)
 
 
 
 46		return;
 47
 48	if (aspace->mmu) {
 49		unsigned size = vma->node.size << PAGE_SHIFT;
 50		aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 51	}
 52
 
 
 
 
 
 
 
 
 
 
 53	spin_lock(&aspace->lock);
 54	drm_mm_remove_node(&vma->node);
 
 55	spin_unlock(&aspace->lock);
 56
 57	vma->iova = 0;
 58
 59	msm_gem_address_space_put(aspace);
 60}
 61
 62int
 63msm_gem_map_vma(struct msm_gem_address_space *aspace,
 64		struct msm_gem_vma *vma, struct sg_table *sgt, int npages)
 
 65{
 66	int ret;
 67
 
 
 
 68	spin_lock(&aspace->lock);
 69	if (WARN_ON(drm_mm_node_allocated(&vma->node))) {
 70		spin_unlock(&aspace->lock);
 71		return 0;
 72	}
 73
 74	ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages);
 75	spin_unlock(&aspace->lock);
 76
 77	if (ret)
 78		return ret;
 79
 80	vma->iova = vma->node.start << PAGE_SHIFT;
 
 81
 82	if (aspace->mmu) {
 83		unsigned size = npages << PAGE_SHIFT;
 84		ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
 85				size, IOMMU_READ | IOMMU_WRITE);
 86	}
 87
 88	/* Get a reference to the aspace to keep it around */
 89	kref_get(&aspace->kref);
 90
 91	return ret;
 92}
 93
 94struct msm_gem_address_space *
 95msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
 96		const char *name)
 97{
 98	struct msm_gem_address_space *aspace;
 99	u64 size = domain->geometry.aperture_end -
100		domain->geometry.aperture_start;
 
101
102	aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
103	if (!aspace)
104		return ERR_PTR(-ENOMEM);
105
106	spin_lock_init(&aspace->lock);
107	aspace->name = name;
108	aspace->mmu = msm_iommu_new(dev, domain);
109
110	drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT),
111		size >> PAGE_SHIFT);
112
113	kref_init(&aspace->kref);
114
115	return aspace;
116}