Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2016 Red Hat
  4 * Author: Rob Clark <robdclark@gmail.com>
  5 */
  6
  7#include "msm_drv.h"
  8#include "msm_gem.h"
  9#include "msm_mmu.h"
 10
 11static void
 12msm_gem_address_space_destroy(struct kref *kref)
 13{
 14	struct msm_gem_address_space *aspace = container_of(kref,
 15			struct msm_gem_address_space, kref);
 16
 17	drm_mm_takedown(&aspace->mm);
 18	if (aspace->mmu)
 19		aspace->mmu->funcs->destroy(aspace->mmu);
 
 20	kfree(aspace);
 21}
 22
 23
 24void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
 25{
 26	if (aspace)
 27		kref_put(&aspace->kref, msm_gem_address_space_destroy);
 28}
 29
 
 
 
 
 
 
 
 
 
 30/* Actually unmap memory for the vma */
 31void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
 32		struct msm_gem_vma *vma)
 33{
 34	unsigned size = vma->node.size << PAGE_SHIFT;
 35
 36	/* Print a message if we try to purge a vma in use */
 37	if (WARN_ON(vma->inuse > 0))
 38		return;
 39
 40	/* Don't do anything if the memory isn't mapped */
 41	if (!vma->mapped)
 42		return;
 43
 44	if (aspace->mmu)
 45		aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
 46
 47	vma->mapped = false;
 48}
 49
 50/* Remove reference counts for the mapping */
 51void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
 52		struct msm_gem_vma *vma)
 53{
 54	if (!WARN_ON(!vma->iova))
 55		vma->inuse--;
 56}
 57
 58int
 59msm_gem_map_vma(struct msm_gem_address_space *aspace,
 60		struct msm_gem_vma *vma, int prot,
 61		struct sg_table *sgt, int npages)
 62{
 63	unsigned size = npages << PAGE_SHIFT;
 64	int ret = 0;
 65
 66	if (WARN_ON(!vma->iova))
 67		return -EINVAL;
 68
 69	/* Increase the usage counter */
 70	vma->inuse++;
 71
 72	if (vma->mapped)
 73		return 0;
 74
 75	vma->mapped = true;
 76
 77	if (aspace && aspace->mmu)
 78		ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
 79				size, prot);
 80
 81	if (ret)
 82		vma->mapped = false;
 
 
 83
 84	return ret;
 85}
 86
 87/* Close an iova.  Warn if it is still in use */
 88void msm_gem_close_vma(struct msm_gem_address_space *aspace,
 89		struct msm_gem_vma *vma)
 90{
 91	if (WARN_ON(vma->inuse > 0 || vma->mapped))
 92		return;
 93
 94	spin_lock(&aspace->lock);
 95	if (vma->iova)
 96		drm_mm_remove_node(&vma->node);
 97	spin_unlock(&aspace->lock);
 98
 99	vma->iova = 0;
100
101	msm_gem_address_space_put(aspace);
102}
103
104/* Initialize a new vma and allocate an iova for it */
105int msm_gem_init_vma(struct msm_gem_address_space *aspace,
106		struct msm_gem_vma *vma, int npages)
 
107{
108	int ret;
109
110	if (WARN_ON(vma->iova))
111		return -EBUSY;
112
113	spin_lock(&aspace->lock);
114	ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages);
 
115	spin_unlock(&aspace->lock);
116
117	if (ret)
118		return ret;
119
120	vma->iova = vma->node.start << PAGE_SHIFT;
121	vma->mapped = false;
122
123	kref_get(&aspace->kref);
124
125	return 0;
126}
127
128
129struct msm_gem_address_space *
130msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
131		const char *name)
132{
133	struct msm_gem_address_space *aspace;
134	u64 size = domain->geometry.aperture_end -
135		domain->geometry.aperture_start;
136
137	aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
138	if (!aspace)
139		return ERR_PTR(-ENOMEM);
140
141	spin_lock_init(&aspace->lock);
142	aspace->name = name;
143	aspace->mmu = msm_iommu_new(dev, domain);
144
145	drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT),
146		size >> PAGE_SHIFT);
147
148	kref_init(&aspace->kref);
149
150	return aspace;
151}
152
153struct msm_gem_address_space *
154msm_gem_address_space_create_a2xx(struct device *dev, struct msm_gpu *gpu,
155		const char *name, uint64_t va_start, uint64_t va_end)
156{
157	struct msm_gem_address_space *aspace;
158	u64 size = va_end - va_start;
159
160	aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
161	if (!aspace)
162		return ERR_PTR(-ENOMEM);
163
164	spin_lock_init(&aspace->lock);
165	aspace->name = name;
166	aspace->mmu = msm_gpummu_new(dev, gpu);
167
168	drm_mm_init(&aspace->mm, (va_start >> PAGE_SHIFT),
169		size >> PAGE_SHIFT);
170
171	kref_init(&aspace->kref);
172
173	return aspace;
174}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2016 Red Hat
  4 * Author: Rob Clark <robdclark@gmail.com>
  5 */
  6
  7#include "msm_drv.h"
  8#include "msm_gem.h"
  9#include "msm_mmu.h"
 10
 11static void
 12msm_gem_address_space_destroy(struct kref *kref)
 13{
 14	struct msm_gem_address_space *aspace = container_of(kref,
 15			struct msm_gem_address_space, kref);
 16
 17	drm_mm_takedown(&aspace->mm);
 18	if (aspace->mmu)
 19		aspace->mmu->funcs->destroy(aspace->mmu);
 20	put_pid(aspace->pid);
 21	kfree(aspace);
 22}
 23
 24
 25void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
 26{
 27	if (aspace)
 28		kref_put(&aspace->kref, msm_gem_address_space_destroy);
 29}
 30
 31struct msm_gem_address_space *
 32msm_gem_address_space_get(struct msm_gem_address_space *aspace)
 33{
 34	if (!IS_ERR_OR_NULL(aspace))
 35		kref_get(&aspace->kref);
 36
 37	return aspace;
 38}
 39
 40/* Actually unmap memory for the vma */
 41void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
 42		struct msm_gem_vma *vma)
 43{
 44	unsigned size = vma->node.size << PAGE_SHIFT;
 45
 46	/* Print a message if we try to purge a vma in use */
 47	if (WARN_ON(vma->inuse > 0))
 48		return;
 49
 50	/* Don't do anything if the memory isn't mapped */
 51	if (!vma->mapped)
 52		return;
 53
 54	if (aspace->mmu)
 55		aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
 56
 57	vma->mapped = false;
 58}
 59
 60/* Remove reference counts for the mapping */
 61void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
 62		struct msm_gem_vma *vma)
 63{
 64	if (!WARN_ON(!vma->iova))
 65		vma->inuse--;
 66}
 67
 68int
 69msm_gem_map_vma(struct msm_gem_address_space *aspace,
 70		struct msm_gem_vma *vma, int prot,
 71		struct sg_table *sgt, int npages)
 72{
 73	unsigned size = npages << PAGE_SHIFT;
 74	int ret = 0;
 75
 76	if (WARN_ON(!vma->iova))
 77		return -EINVAL;
 78
 79	/* Increase the usage counter */
 80	vma->inuse++;
 81
 82	if (vma->mapped)
 83		return 0;
 84
 85	vma->mapped = true;
 86
 87	if (aspace && aspace->mmu)
 88		ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
 89				size, prot);
 90
 91	if (ret) {
 92		vma->mapped = false;
 93		vma->inuse--;
 94	}
 95
 96	return ret;
 97}
 98
 99/* Close an iova.  Warn if it is still in use */
100void msm_gem_close_vma(struct msm_gem_address_space *aspace,
101		struct msm_gem_vma *vma)
102{
103	if (WARN_ON(vma->inuse > 0 || vma->mapped))
104		return;
105
106	spin_lock(&aspace->lock);
107	if (vma->iova)
108		drm_mm_remove_node(&vma->node);
109	spin_unlock(&aspace->lock);
110
111	vma->iova = 0;
112
113	msm_gem_address_space_put(aspace);
114}
115
116/* Initialize a new vma and allocate an iova for it */
117int msm_gem_init_vma(struct msm_gem_address_space *aspace,
118		struct msm_gem_vma *vma, int npages,
119		u64 range_start, u64 range_end)
120{
121	int ret;
122
123	if (WARN_ON(vma->iova))
124		return -EBUSY;
125
126	spin_lock(&aspace->lock);
127	ret = drm_mm_insert_node_in_range(&aspace->mm, &vma->node, npages, 0,
128		0, range_start, range_end, 0);
129	spin_unlock(&aspace->lock);
130
131	if (ret)
132		return ret;
133
134	vma->iova = vma->node.start << PAGE_SHIFT;
135	vma->mapped = false;
136
137	kref_get(&aspace->kref);
138
139	return 0;
140}
141
 
142struct msm_gem_address_space *
143msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
144		u64 va_start, u64 size)
145{
146	struct msm_gem_address_space *aspace;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
148	if (IS_ERR(mmu))
149		return ERR_CAST(mmu);
 
 
 
 
 
 
 
150
151	aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
152	if (!aspace)
153		return ERR_PTR(-ENOMEM);
154
155	spin_lock_init(&aspace->lock);
156	aspace->name = name;
157	aspace->mmu = mmu;
158
159	drm_mm_init(&aspace->mm, va_start >> PAGE_SHIFT, size >> PAGE_SHIFT);
 
160
161	kref_init(&aspace->kref);
162
163	return aspace;
164}