Linux Audio

Check our new training course

Loading...
v4.17
  1/*
  2 * Copyright 2016 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: Christian König
 23 */
 24
 25#include <drm/drmP.h>
 26#include "amdgpu.h"
 27
 28struct amdgpu_vram_mgr {
 29	struct drm_mm mm;
 30	spinlock_t lock;
 31	atomic64_t usage;
 32	atomic64_t vis_usage;
 33};
 34
 35/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 36 * amdgpu_vram_mgr_init - init VRAM manager and DRM MM
 37 *
 38 * @man: TTM memory type manager
 39 * @p_size: maximum size of VRAM
 40 *
 41 * Allocate and initialize the VRAM manager.
 42 */
 43static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
 44				unsigned long p_size)
 45{
 
 46	struct amdgpu_vram_mgr *mgr;
 
 47
 48	mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
 49	if (!mgr)
 50		return -ENOMEM;
 51
 52	drm_mm_init(&mgr->mm, 0, p_size);
 53	spin_lock_init(&mgr->lock);
 54	man->priv = mgr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 55	return 0;
 56}
 57
 58/**
 59 * amdgpu_vram_mgr_fini - free and destroy VRAM manager
 60 *
 61 * @man: TTM memory type manager
 62 *
 63 * Destroy and free the VRAM manager, returns -EBUSY if ranges are still
 64 * allocated inside it.
 65 */
 66static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
 67{
 
 68	struct amdgpu_vram_mgr *mgr = man->priv;
 69
 70	spin_lock(&mgr->lock);
 71	drm_mm_takedown(&mgr->mm);
 72	spin_unlock(&mgr->lock);
 73	kfree(mgr);
 74	man->priv = NULL;
 
 
 
 
 75	return 0;
 76}
 77
 78/**
 79 * amdgpu_vram_mgr_vis_size - Calculate visible node size
 80 *
 81 * @adev: amdgpu device structure
 82 * @node: MM node structure
 83 *
 84 * Calculate how many bytes of the MM node are inside visible VRAM
 85 */
 86static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
 87				    struct drm_mm_node *node)
 88{
 89	uint64_t start = node->start << PAGE_SHIFT;
 90	uint64_t end = (node->size + node->start) << PAGE_SHIFT;
 91
 92	if (start >= adev->gmc.visible_vram_size)
 93		return 0;
 94
 95	return (end > adev->gmc.visible_vram_size ?
 96		adev->gmc.visible_vram_size : end) - start;
 97}
 98
 99/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100 * amdgpu_vram_mgr_new - allocate new ranges
101 *
102 * @man: TTM memory type manager
103 * @tbo: TTM BO we need this range for
104 * @place: placement flags and restrictions
105 * @mem: the resulting mem object
106 *
107 * Allocate VRAM for the given BO.
108 */
109static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
110			       struct ttm_buffer_object *tbo,
111			       const struct ttm_place *place,
112			       struct ttm_mem_reg *mem)
113{
114	struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
115	struct amdgpu_vram_mgr *mgr = man->priv;
116	struct drm_mm *mm = &mgr->mm;
117	struct drm_mm_node *nodes;
118	enum drm_mm_insert_mode mode;
119	unsigned long lpfn, num_nodes, pages_per_node, pages_left;
120	uint64_t usage = 0, vis_usage = 0;
121	unsigned i;
122	int r;
123
124	lpfn = place->lpfn;
125	if (!lpfn)
126		lpfn = man->size;
127
128	if (place->flags & TTM_PL_FLAG_CONTIGUOUS ||
129	    amdgpu_vram_page_split == -1) {
 
 
 
 
 
 
 
130		pages_per_node = ~0ul;
131		num_nodes = 1;
132	} else {
133		pages_per_node = max((uint32_t)amdgpu_vram_page_split,
134				     mem->page_alignment);
 
 
 
 
 
135		num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
136	}
137
138	nodes = kcalloc(num_nodes, sizeof(*nodes), GFP_KERNEL);
139	if (!nodes)
 
 
140		return -ENOMEM;
 
141
142	mode = DRM_MM_INSERT_BEST;
143	if (place->flags & TTM_PL_FLAG_TOPDOWN)
144		mode = DRM_MM_INSERT_HIGH;
145
146	mem->start = 0;
147	pages_left = mem->num_pages;
148
149	spin_lock(&mgr->lock);
150	for (i = 0; i < num_nodes; ++i) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151		unsigned long pages = min(pages_left, pages_per_node);
152		uint32_t alignment = mem->page_alignment;
153		unsigned long start;
154
155		if (pages == pages_per_node)
156			alignment = pages_per_node;
157
158		r = drm_mm_insert_node_in_range(mm, &nodes[i],
159						pages, alignment, 0,
160						place->fpfn, lpfn,
161						mode);
162		if (unlikely(r))
163			goto error;
164
165		usage += nodes[i].size << PAGE_SHIFT;
166		vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]);
167
168		/* Calculate a virtual BO start address to easily check if
169		 * everything is CPU accessible.
170		 */
171		start = nodes[i].start + nodes[i].size;
172		if (start > mem->num_pages)
173			start -= mem->num_pages;
174		else
175			start = 0;
176		mem->start = max(mem->start, start);
177		pages_left -= pages;
178	}
179	spin_unlock(&mgr->lock);
180
181	atomic64_add(usage, &mgr->usage);
182	atomic64_add(vis_usage, &mgr->vis_usage);
183
184	mem->mm_node = nodes;
185
186	return 0;
187
188error:
189	while (i--)
190		drm_mm_remove_node(&nodes[i]);
191	spin_unlock(&mgr->lock);
 
192
193	kfree(nodes);
194	return r == -ENOSPC ? 0 : r;
195}
196
197/**
198 * amdgpu_vram_mgr_del - free ranges
199 *
200 * @man: TTM memory type manager
201 * @tbo: TTM BO we need this range for
202 * @place: placement flags and restrictions
203 * @mem: TTM memory object
204 *
205 * Free the allocated VRAM again.
206 */
207static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
208				struct ttm_mem_reg *mem)
209{
210	struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
211	struct amdgpu_vram_mgr *mgr = man->priv;
212	struct drm_mm_node *nodes = mem->mm_node;
213	uint64_t usage = 0, vis_usage = 0;
214	unsigned pages = mem->num_pages;
215
216	if (!mem->mm_node)
217		return;
218
219	spin_lock(&mgr->lock);
220	while (pages) {
221		pages -= nodes->size;
222		drm_mm_remove_node(nodes);
223		usage += nodes->size << PAGE_SHIFT;
224		vis_usage += amdgpu_vram_mgr_vis_size(adev, nodes);
225		++nodes;
226	}
227	spin_unlock(&mgr->lock);
228
229	atomic64_sub(usage, &mgr->usage);
230	atomic64_sub(vis_usage, &mgr->vis_usage);
231
232	kfree(mem->mm_node);
233	mem->mm_node = NULL;
234}
235
236/**
237 * amdgpu_vram_mgr_usage - how many bytes are used in this domain
238 *
239 * @man: TTM memory type manager
240 *
241 * Returns how many bytes are used in this domain.
242 */
243uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man)
244{
245	struct amdgpu_vram_mgr *mgr = man->priv;
246
247	return atomic64_read(&mgr->usage);
248}
249
250/**
251 * amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part
252 *
253 * @man: TTM memory type manager
254 *
255 * Returns how many bytes are used in the visible part of VRAM
256 */
257uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man)
258{
259	struct amdgpu_vram_mgr *mgr = man->priv;
260
261	return atomic64_read(&mgr->vis_usage);
262}
263
264/**
265 * amdgpu_vram_mgr_debug - dump VRAM table
266 *
267 * @man: TTM memory type manager
268 * @printer: DRM printer to use
269 *
270 * Dump the table content using printk.
271 */
272static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man,
273				  struct drm_printer *printer)
274{
275	struct amdgpu_vram_mgr *mgr = man->priv;
276
277	spin_lock(&mgr->lock);
278	drm_mm_print(&mgr->mm, printer);
279	spin_unlock(&mgr->lock);
280
281	drm_printf(printer, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
282		   man->size, amdgpu_vram_mgr_usage(man) >> 20,
283		   amdgpu_vram_mgr_vis_usage(man) >> 20);
284}
285
286const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = {
287	.init		= amdgpu_vram_mgr_init,
288	.takedown	= amdgpu_vram_mgr_fini,
289	.get_node	= amdgpu_vram_mgr_new,
290	.put_node	= amdgpu_vram_mgr_del,
291	.debug		= amdgpu_vram_mgr_debug
292};
v5.4
  1/*
  2 * Copyright 2016 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: Christian König
 23 */
 24
 
 25#include "amdgpu.h"
 26
 27struct amdgpu_vram_mgr {
 28	struct drm_mm mm;
 29	spinlock_t lock;
 30	atomic64_t usage;
 31	atomic64_t vis_usage;
 32};
 33
 34/**
 35 * DOC: mem_info_vram_total
 36 *
 37 * The amdgpu driver provides a sysfs API for reporting current total VRAM
 38 * available on the device
 39 * The file mem_info_vram_total is used for this and returns the total
 40 * amount of VRAM in bytes
 41 */
 42static ssize_t amdgpu_mem_info_vram_total_show(struct device *dev,
 43		struct device_attribute *attr, char *buf)
 44{
 45	struct drm_device *ddev = dev_get_drvdata(dev);
 46	struct amdgpu_device *adev = ddev->dev_private;
 47
 48	return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.real_vram_size);
 49}
 50
 51/**
 52 * DOC: mem_info_vis_vram_total
 53 *
 54 * The amdgpu driver provides a sysfs API for reporting current total
 55 * visible VRAM available on the device
 56 * The file mem_info_vis_vram_total is used for this and returns the total
 57 * amount of visible VRAM in bytes
 58 */
 59static ssize_t amdgpu_mem_info_vis_vram_total_show(struct device *dev,
 60		struct device_attribute *attr, char *buf)
 61{
 62	struct drm_device *ddev = dev_get_drvdata(dev);
 63	struct amdgpu_device *adev = ddev->dev_private;
 64
 65	return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.visible_vram_size);
 66}
 67
 68/**
 69 * DOC: mem_info_vram_used
 70 *
 71 * The amdgpu driver provides a sysfs API for reporting current total VRAM
 72 * available on the device
 73 * The file mem_info_vram_used is used for this and returns the total
 74 * amount of currently used VRAM in bytes
 75 */
 76static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev,
 77		struct device_attribute *attr, char *buf)
 78{
 79	struct drm_device *ddev = dev_get_drvdata(dev);
 80	struct amdgpu_device *adev = ddev->dev_private;
 81
 82	return snprintf(buf, PAGE_SIZE, "%llu\n",
 83		amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]));
 84}
 85
 86/**
 87 * DOC: mem_info_vis_vram_used
 88 *
 89 * The amdgpu driver provides a sysfs API for reporting current total of
 90 * used visible VRAM
 91 * The file mem_info_vis_vram_used is used for this and returns the total
 92 * amount of currently used visible VRAM in bytes
 93 */
 94static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev,
 95		struct device_attribute *attr, char *buf)
 96{
 97	struct drm_device *ddev = dev_get_drvdata(dev);
 98	struct amdgpu_device *adev = ddev->dev_private;
 99
100	return snprintf(buf, PAGE_SIZE, "%llu\n",
101		amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]));
102}
103
104static DEVICE_ATTR(mem_info_vram_total, S_IRUGO,
105		   amdgpu_mem_info_vram_total_show, NULL);
106static DEVICE_ATTR(mem_info_vis_vram_total, S_IRUGO,
107		   amdgpu_mem_info_vis_vram_total_show,NULL);
108static DEVICE_ATTR(mem_info_vram_used, S_IRUGO,
109		   amdgpu_mem_info_vram_used_show, NULL);
110static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO,
111		   amdgpu_mem_info_vis_vram_used_show, NULL);
112
113/**
114 * amdgpu_vram_mgr_init - init VRAM manager and DRM MM
115 *
116 * @man: TTM memory type manager
117 * @p_size: maximum size of VRAM
118 *
119 * Allocate and initialize the VRAM manager.
120 */
121static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
122				unsigned long p_size)
123{
124	struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
125	struct amdgpu_vram_mgr *mgr;
126	int ret;
127
128	mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
129	if (!mgr)
130		return -ENOMEM;
131
132	drm_mm_init(&mgr->mm, 0, p_size);
133	spin_lock_init(&mgr->lock);
134	man->priv = mgr;
135
136	/* Add the two VRAM-related sysfs files */
137	ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_total);
138	if (ret) {
139		DRM_ERROR("Failed to create device file mem_info_vram_total\n");
140		return ret;
141	}
142	ret = device_create_file(adev->dev, &dev_attr_mem_info_vis_vram_total);
143	if (ret) {
144		DRM_ERROR("Failed to create device file mem_info_vis_vram_total\n");
145		return ret;
146	}
147	ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_used);
148	if (ret) {
149		DRM_ERROR("Failed to create device file mem_info_vram_used\n");
150		return ret;
151	}
152	ret = device_create_file(adev->dev, &dev_attr_mem_info_vis_vram_used);
153	if (ret) {
154		DRM_ERROR("Failed to create device file mem_info_vis_vram_used\n");
155		return ret;
156	}
157
158	return 0;
159}
160
161/**
162 * amdgpu_vram_mgr_fini - free and destroy VRAM manager
163 *
164 * @man: TTM memory type manager
165 *
166 * Destroy and free the VRAM manager, returns -EBUSY if ranges are still
167 * allocated inside it.
168 */
169static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
170{
171	struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
172	struct amdgpu_vram_mgr *mgr = man->priv;
173
174	spin_lock(&mgr->lock);
175	drm_mm_takedown(&mgr->mm);
176	spin_unlock(&mgr->lock);
177	kfree(mgr);
178	man->priv = NULL;
179	device_remove_file(adev->dev, &dev_attr_mem_info_vram_total);
180	device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_total);
181	device_remove_file(adev->dev, &dev_attr_mem_info_vram_used);
182	device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_used);
183	return 0;
184}
185
186/**
187 * amdgpu_vram_mgr_vis_size - Calculate visible node size
188 *
189 * @adev: amdgpu device structure
190 * @node: MM node structure
191 *
192 * Calculate how many bytes of the MM node are inside visible VRAM
193 */
194static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
195				    struct drm_mm_node *node)
196{
197	uint64_t start = node->start << PAGE_SHIFT;
198	uint64_t end = (node->size + node->start) << PAGE_SHIFT;
199
200	if (start >= adev->gmc.visible_vram_size)
201		return 0;
202
203	return (end > adev->gmc.visible_vram_size ?
204		adev->gmc.visible_vram_size : end) - start;
205}
206
207/**
208 * amdgpu_vram_mgr_bo_visible_size - CPU visible BO size
209 *
210 * @bo: &amdgpu_bo buffer object (must be in VRAM)
211 *
212 * Returns:
213 * How much of the given &amdgpu_bo buffer object lies in CPU visible VRAM.
214 */
215u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
216{
217	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
218	struct ttm_mem_reg *mem = &bo->tbo.mem;
219	struct drm_mm_node *nodes = mem->mm_node;
220	unsigned pages = mem->num_pages;
221	u64 usage;
222
223	if (amdgpu_gmc_vram_full_visible(&adev->gmc))
224		return amdgpu_bo_size(bo);
225
226	if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
227		return 0;
228
229	for (usage = 0; nodes && pages; pages -= nodes->size, nodes++)
230		usage += amdgpu_vram_mgr_vis_size(adev, nodes);
231
232	return usage;
233}
234
235/**
236 * amdgpu_vram_mgr_virt_start - update virtual start address
237 *
238 * @mem: ttm_mem_reg to update
239 * @node: just allocated node
240 *
241 * Calculate a virtual BO start address to easily check if everything is CPU
242 * accessible.
243 */
244static void amdgpu_vram_mgr_virt_start(struct ttm_mem_reg *mem,
245				       struct drm_mm_node *node)
246{
247	unsigned long start;
248
249	start = node->start + node->size;
250	if (start > mem->num_pages)
251		start -= mem->num_pages;
252	else
253		start = 0;
254	mem->start = max(mem->start, start);
255}
256
257/**
258 * amdgpu_vram_mgr_new - allocate new ranges
259 *
260 * @man: TTM memory type manager
261 * @tbo: TTM BO we need this range for
262 * @place: placement flags and restrictions
263 * @mem: the resulting mem object
264 *
265 * Allocate VRAM for the given BO.
266 */
267static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
268			       struct ttm_buffer_object *tbo,
269			       const struct ttm_place *place,
270			       struct ttm_mem_reg *mem)
271{
272	struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
273	struct amdgpu_vram_mgr *mgr = man->priv;
274	struct drm_mm *mm = &mgr->mm;
275	struct drm_mm_node *nodes;
276	enum drm_mm_insert_mode mode;
277	unsigned long lpfn, num_nodes, pages_per_node, pages_left;
278	uint64_t vis_usage = 0, mem_bytes;
279	unsigned i;
280	int r;
281
282	lpfn = place->lpfn;
283	if (!lpfn)
284		lpfn = man->size;
285
286	/* bail out quickly if there's likely not enough VRAM for this BO */
287	mem_bytes = (u64)mem->num_pages << PAGE_SHIFT;
288	if (atomic64_add_return(mem_bytes, &mgr->usage) > adev->gmc.mc_vram_size) {
289		atomic64_sub(mem_bytes, &mgr->usage);
290		mem->mm_node = NULL;
291		return 0;
292	}
293
294	if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
295		pages_per_node = ~0ul;
296		num_nodes = 1;
297	} else {
298#ifdef CONFIG_TRANSPARENT_HUGEPAGE
299		pages_per_node = HPAGE_PMD_NR;
300#else
301		/* default to 2MB */
302		pages_per_node = (2UL << (20UL - PAGE_SHIFT));
303#endif
304		pages_per_node = max((uint32_t)pages_per_node, mem->page_alignment);
305		num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
306	}
307
308	nodes = kvmalloc_array((uint32_t)num_nodes, sizeof(*nodes),
309			       GFP_KERNEL | __GFP_ZERO);
310	if (!nodes) {
311		atomic64_sub(mem_bytes, &mgr->usage);
312		return -ENOMEM;
313	}
314
315	mode = DRM_MM_INSERT_BEST;
316	if (place->flags & TTM_PL_FLAG_TOPDOWN)
317		mode = DRM_MM_INSERT_HIGH;
318
319	mem->start = 0;
320	pages_left = mem->num_pages;
321
322	spin_lock(&mgr->lock);
323	for (i = 0; pages_left >= pages_per_node; ++i) {
324		unsigned long pages = rounddown_pow_of_two(pages_left);
325
326		r = drm_mm_insert_node_in_range(mm, &nodes[i], pages,
327						pages_per_node, 0,
328						place->fpfn, lpfn,
329						mode);
330		if (unlikely(r))
331			break;
332
333		vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]);
334		amdgpu_vram_mgr_virt_start(mem, &nodes[i]);
335		pages_left -= pages;
336	}
337
338	for (; pages_left; ++i) {
339		unsigned long pages = min(pages_left, pages_per_node);
340		uint32_t alignment = mem->page_alignment;
 
341
342		if (pages == pages_per_node)
343			alignment = pages_per_node;
344
345		r = drm_mm_insert_node_in_range(mm, &nodes[i],
346						pages, alignment, 0,
347						place->fpfn, lpfn,
348						mode);
349		if (unlikely(r))
350			goto error;
351
 
352		vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]);
353		amdgpu_vram_mgr_virt_start(mem, &nodes[i]);
 
 
 
 
 
 
 
 
 
354		pages_left -= pages;
355	}
356	spin_unlock(&mgr->lock);
357
 
358	atomic64_add(vis_usage, &mgr->vis_usage);
359
360	mem->mm_node = nodes;
361
362	return 0;
363
364error:
365	while (i--)
366		drm_mm_remove_node(&nodes[i]);
367	spin_unlock(&mgr->lock);
368	atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage);
369
370	kvfree(nodes);
371	return r == -ENOSPC ? 0 : r;
372}
373
374/**
375 * amdgpu_vram_mgr_del - free ranges
376 *
377 * @man: TTM memory type manager
378 * @tbo: TTM BO we need this range for
379 * @place: placement flags and restrictions
380 * @mem: TTM memory object
381 *
382 * Free the allocated VRAM again.
383 */
384static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
385				struct ttm_mem_reg *mem)
386{
387	struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
388	struct amdgpu_vram_mgr *mgr = man->priv;
389	struct drm_mm_node *nodes = mem->mm_node;
390	uint64_t usage = 0, vis_usage = 0;
391	unsigned pages = mem->num_pages;
392
393	if (!mem->mm_node)
394		return;
395
396	spin_lock(&mgr->lock);
397	while (pages) {
398		pages -= nodes->size;
399		drm_mm_remove_node(nodes);
400		usage += nodes->size << PAGE_SHIFT;
401		vis_usage += amdgpu_vram_mgr_vis_size(adev, nodes);
402		++nodes;
403	}
404	spin_unlock(&mgr->lock);
405
406	atomic64_sub(usage, &mgr->usage);
407	atomic64_sub(vis_usage, &mgr->vis_usage);
408
409	kvfree(mem->mm_node);
410	mem->mm_node = NULL;
411}
412
413/**
414 * amdgpu_vram_mgr_usage - how many bytes are used in this domain
415 *
416 * @man: TTM memory type manager
417 *
418 * Returns how many bytes are used in this domain.
419 */
420uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man)
421{
422	struct amdgpu_vram_mgr *mgr = man->priv;
423
424	return atomic64_read(&mgr->usage);
425}
426
427/**
428 * amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part
429 *
430 * @man: TTM memory type manager
431 *
432 * Returns how many bytes are used in the visible part of VRAM
433 */
434uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man)
435{
436	struct amdgpu_vram_mgr *mgr = man->priv;
437
438	return atomic64_read(&mgr->vis_usage);
439}
440
441/**
442 * amdgpu_vram_mgr_debug - dump VRAM table
443 *
444 * @man: TTM memory type manager
445 * @printer: DRM printer to use
446 *
447 * Dump the table content using printk.
448 */
449static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man,
450				  struct drm_printer *printer)
451{
452	struct amdgpu_vram_mgr *mgr = man->priv;
453
454	spin_lock(&mgr->lock);
455	drm_mm_print(&mgr->mm, printer);
456	spin_unlock(&mgr->lock);
457
458	drm_printf(printer, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
459		   man->size, amdgpu_vram_mgr_usage(man) >> 20,
460		   amdgpu_vram_mgr_vis_usage(man) >> 20);
461}
462
463const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = {
464	.init		= amdgpu_vram_mgr_init,
465	.takedown	= amdgpu_vram_mgr_fini,
466	.get_node	= amdgpu_vram_mgr_new,
467	.put_node	= amdgpu_vram_mgr_del,
468	.debug		= amdgpu_vram_mgr_debug
469};