Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2016 Red Hat
  4 * Author: Rob Clark <robdclark@gmail.com>
  5 */
  6
  7#include "msm_drv.h"
 
  8#include "msm_gem.h"
  9#include "msm_mmu.h"
 10
 11static void
 12msm_gem_address_space_destroy(struct kref *kref)
 13{
 14	struct msm_gem_address_space *aspace = container_of(kref,
 15			struct msm_gem_address_space, kref);
 16
 17	drm_mm_takedown(&aspace->mm);
 18	if (aspace->mmu)
 19		aspace->mmu->funcs->destroy(aspace->mmu);
 
 20	kfree(aspace);
 21}
 22
 23
 24void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
 25{
 26	if (aspace)
 27		kref_put(&aspace->kref, msm_gem_address_space_destroy);
 28}
 29
 30/* Actually unmap memory for the vma */
 31void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
 32		struct msm_gem_vma *vma)
 33{
 34	unsigned size = vma->node.size << PAGE_SHIFT;
 
 35
 36	/* Print a message if we try to purge a vma in use */
 37	if (WARN_ON(vma->inuse > 0))
 38		return;
 
 
 
 
 
 39
 40	/* Don't do anything if the memory isn't mapped */
 41	if (!vma->mapped)
 42		return;
 43
 44	if (aspace->mmu)
 45		aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
 46
 47	vma->mapped = false;
 48}
 49
 50/* Remove reference counts for the mapping */
 51void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
 52		struct msm_gem_vma *vma)
 53{
 54	if (!WARN_ON(!vma->iova))
 55		vma->inuse--;
 56}
 57
 58int
 59msm_gem_map_vma(struct msm_gem_address_space *aspace,
 60		struct msm_gem_vma *vma, int prot,
 61		struct sg_table *sgt, int npages)
 62{
 63	unsigned size = npages << PAGE_SHIFT;
 64	int ret = 0;
 65
 66	if (WARN_ON(!vma->iova))
 67		return -EINVAL;
 68
 69	/* Increase the usage counter */
 70	vma->inuse++;
 71
 72	if (vma->mapped)
 73		return 0;
 74
 75	vma->mapped = true;
 76
 77	if (aspace && aspace->mmu)
 78		ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
 79				size, prot);
 80
 81	if (ret)
 
 
 
 
 
 
 
 
 
 
 
 82		vma->mapped = false;
 
 83
 84	return ret;
 85}
 86
 87/* Close an iova.  Warn if it is still in use */
 88void msm_gem_close_vma(struct msm_gem_address_space *aspace,
 89		struct msm_gem_vma *vma)
 90{
 91	if (WARN_ON(vma->inuse > 0 || vma->mapped))
 92		return;
 
 93
 94	spin_lock(&aspace->lock);
 95	if (vma->iova)
 96		drm_mm_remove_node(&vma->node);
 97	spin_unlock(&aspace->lock);
 98
 99	vma->iova = 0;
100
101	msm_gem_address_space_put(aspace);
102}
103
 
 
 
 
 
 
 
 
 
 
 
 
 
104/* Initialize a new vma and allocate an iova for it */
105int msm_gem_init_vma(struct msm_gem_address_space *aspace,
106		struct msm_gem_vma *vma, int npages)
107{
 
108	int ret;
109
110	if (WARN_ON(vma->iova))
 
 
 
111		return -EBUSY;
112
113	spin_lock(&aspace->lock);
114	ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages);
 
 
115	spin_unlock(&aspace->lock);
116
117	if (ret)
118		return ret;
119
120	vma->iova = vma->node.start << PAGE_SHIFT;
121	vma->mapped = false;
122
123	kref_get(&aspace->kref);
124
125	return 0;
126}
127
128
129struct msm_gem_address_space *
130msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
131		const char *name)
132{
133	struct msm_gem_address_space *aspace;
134	u64 size = domain->geometry.aperture_end -
135		domain->geometry.aperture_start;
136
137	aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
138	if (!aspace)
139		return ERR_PTR(-ENOMEM);
140
141	spin_lock_init(&aspace->lock);
142	aspace->name = name;
143	aspace->mmu = msm_iommu_new(dev, domain);
144
145	drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT),
146		size >> PAGE_SHIFT);
147
148	kref_init(&aspace->kref);
149
150	return aspace;
151}
152
153struct msm_gem_address_space *
154msm_gem_address_space_create_a2xx(struct device *dev, struct msm_gpu *gpu,
155		const char *name, uint64_t va_start, uint64_t va_end)
156{
157	struct msm_gem_address_space *aspace;
158	u64 size = va_end - va_start;
159
160	aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
161	if (!aspace)
162		return ERR_PTR(-ENOMEM);
163
164	spin_lock_init(&aspace->lock);
165	aspace->name = name;
166	aspace->mmu = msm_gpummu_new(dev, gpu);
 
 
167
168	drm_mm_init(&aspace->mm, (va_start >> PAGE_SHIFT),
169		size >> PAGE_SHIFT);
170
171	kref_init(&aspace->kref);
172
173	return aspace;
174}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2016 Red Hat
  4 * Author: Rob Clark <robdclark@gmail.com>
  5 */
  6
  7#include "msm_drv.h"
  8#include "msm_fence.h"
  9#include "msm_gem.h"
 10#include "msm_mmu.h"
 11
 12static void
 13msm_gem_address_space_destroy(struct kref *kref)
 14{
 15	struct msm_gem_address_space *aspace = container_of(kref,
 16			struct msm_gem_address_space, kref);
 17
 18	drm_mm_takedown(&aspace->mm);
 19	if (aspace->mmu)
 20		aspace->mmu->funcs->destroy(aspace->mmu);
 21	put_pid(aspace->pid);
 22	kfree(aspace);
 23}
 24
 25
 26void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
 27{
 28	if (aspace)
 29		kref_put(&aspace->kref, msm_gem_address_space_destroy);
 30}
 31
 32struct msm_gem_address_space *
 33msm_gem_address_space_get(struct msm_gem_address_space *aspace)
 
 34{
 35	if (!IS_ERR_OR_NULL(aspace))
 36		kref_get(&aspace->kref);
 37
 38	return aspace;
 39}
 40
 41/* Actually unmap memory for the vma */
 42void msm_gem_vma_purge(struct msm_gem_vma *vma)
 43{
 44	struct msm_gem_address_space *aspace = vma->aspace;
 45	unsigned size = vma->node.size;
 46
 47	/* Don't do anything if the memory isn't mapped */
 48	if (!vma->mapped)
 49		return;
 50
 51	aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
 
 52
 53	vma->mapped = false;
 54}
 55
 56/* Map and pin vma: */
 
 
 
 
 
 
 
 57int
 58msm_gem_vma_map(struct msm_gem_vma *vma, int prot,
 59		struct sg_table *sgt, int size)
 
 60{
 61	struct msm_gem_address_space *aspace = vma->aspace;
 62	int ret;
 63
 64	if (GEM_WARN_ON(!vma->iova))
 65		return -EINVAL;
 66
 
 
 
 67	if (vma->mapped)
 68		return 0;
 69
 70	vma->mapped = true;
 71
 72	if (!aspace)
 73		return 0;
 
 74
 75	/*
 76	 * NOTE: iommu/io-pgtable can allocate pages, so we cannot hold
 77	 * a lock across map/unmap which is also used in the job_run()
 78	 * path, as this can cause deadlock in job_run() vs shrinker/
 79	 * reclaim.
 80	 *
 81	 * Revisit this if we can come up with a scheme to pre-alloc pages
 82	 * for the pgtable in map/unmap ops.
 83	 */
 84	ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, size, prot);
 85
 86	if (ret) {
 87		vma->mapped = false;
 88	}
 89
 90	return ret;
 91}
 92
 93/* Close an iova.  Warn if it is still in use */
 94void msm_gem_vma_close(struct msm_gem_vma *vma)
 
 95{
 96	struct msm_gem_address_space *aspace = vma->aspace;
 97
 98	GEM_WARN_ON(vma->mapped);
 99
100	spin_lock(&aspace->lock);
101	if (vma->iova)
102		drm_mm_remove_node(&vma->node);
103	spin_unlock(&aspace->lock);
104
105	vma->iova = 0;
106
107	msm_gem_address_space_put(aspace);
108}
109
110struct msm_gem_vma *msm_gem_vma_new(struct msm_gem_address_space *aspace)
111{
112	struct msm_gem_vma *vma;
113
114	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
115	if (!vma)
116		return NULL;
117
118	vma->aspace = aspace;
119
120	return vma;
121}
122
123/* Initialize a new vma and allocate an iova for it */
124int msm_gem_vma_init(struct msm_gem_vma *vma, int size,
125		u64 range_start, u64 range_end)
126{
127	struct msm_gem_address_space *aspace = vma->aspace;
128	int ret;
129
130	if (GEM_WARN_ON(!aspace))
131		return -EINVAL;
132
133	if (GEM_WARN_ON(vma->iova))
134		return -EBUSY;
135
136	spin_lock(&aspace->lock);
137	ret = drm_mm_insert_node_in_range(&aspace->mm, &vma->node,
138					  size, PAGE_SIZE, 0,
139					  range_start, range_end, 0);
140	spin_unlock(&aspace->lock);
141
142	if (ret)
143		return ret;
144
145	vma->iova = vma->node.start;
146	vma->mapped = false;
147
148	kref_get(&aspace->kref);
149
150	return 0;
151}
152
 
153struct msm_gem_address_space *
154msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
155		u64 va_start, u64 size)
156{
157	struct msm_gem_address_space *aspace;
 
 
 
 
 
 
158
159	if (IS_ERR(mmu))
160		return ERR_CAST(mmu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161
162	aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
163	if (!aspace)
164		return ERR_PTR(-ENOMEM);
165
166	spin_lock_init(&aspace->lock);
167	aspace->name = name;
168	aspace->mmu = mmu;
169	aspace->va_start = va_start;
170	aspace->va_size  = size;
171
172	drm_mm_init(&aspace->mm, va_start, size);
 
173
174	kref_init(&aspace->kref);
175
176	return aspace;
177}