Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Apr 14-17, 2025
Register
Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0 OR MIT
  2/**************************************************************************
  3 *
  4 * Copyright 2007-2010 VMware, Inc., Palo Alto, CA., USA
 
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 27/*
 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 29 */
 30
 31#include "vmwgfx_drv.h"
 32#include <drm/ttm/ttm_module.h>
 33#include <drm/ttm/ttm_bo_driver.h>
 34#include <drm/ttm/ttm_placement.h>
 35#include <linux/idr.h>
 36#include <linux/spinlock.h>
 37#include <linux/kernel.h>
 38
 39struct vmwgfx_gmrid_man {
 40	spinlock_t lock;
 41	struct ida gmr_ida;
 42	uint32_t max_gmr_ids;
 43	uint32_t max_gmr_pages;
 44	uint32_t used_gmr_pages;
 45};
 46
 47static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
 48				  struct ttm_buffer_object *bo,
 49				  const struct ttm_place *place,
 50				  struct ttm_mem_reg *mem)
 51{
 52	struct vmwgfx_gmrid_man *gman =
 53		(struct vmwgfx_gmrid_man *)man->priv;
 
 54	int id;
 55
 56	mem->mm_node = NULL;
 57
 58	id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
 59	if (id < 0)
 60		return (id != -ENOMEM ? 0 : id);
 61
 62	spin_lock(&gman->lock);
 63
 64	if (gman->max_gmr_pages > 0) {
 65		gman->used_gmr_pages += bo->num_pages;
 66		if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
 67			goto nospace;
 68	}
 69
 70	mem->mm_node = gman;
 71	mem->start = id;
 72	mem->num_pages = bo->num_pages;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 73
 74	spin_unlock(&gman->lock);
 75	return 0;
 76
 77nospace:
 
 
 78	gman->used_gmr_pages -= bo->num_pages;
 79	spin_unlock(&gman->lock);
 80	ida_free(&gman->gmr_ida, id);
 81	return 0;
 82}
 83
 84static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
 85				   struct ttm_mem_reg *mem)
 86{
 87	struct vmwgfx_gmrid_man *gman =
 88		(struct vmwgfx_gmrid_man *)man->priv;
 89
 90	if (mem->mm_node) {
 91		ida_free(&gman->gmr_ida, mem->start);
 92		spin_lock(&gman->lock);
 
 93		gman->used_gmr_pages -= mem->num_pages;
 94		spin_unlock(&gman->lock);
 95		mem->mm_node = NULL;
 96	}
 97}
 98
 99static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
100			      unsigned long p_size)
101{
102	struct vmw_private *dev_priv =
103		container_of(man->bdev, struct vmw_private, bdev);
104	struct vmwgfx_gmrid_man *gman =
105		kzalloc(sizeof(*gman), GFP_KERNEL);
106
107	if (unlikely(!gman))
108		return -ENOMEM;
109
110	spin_lock_init(&gman->lock);
111	gman->used_gmr_pages = 0;
112	ida_init(&gman->gmr_ida);
113
114	switch (p_size) {
115	case VMW_PL_GMR:
116		gman->max_gmr_ids = dev_priv->max_gmr_ids;
117		gman->max_gmr_pages = dev_priv->max_gmr_pages;
118		break;
119	case VMW_PL_MOB:
120		gman->max_gmr_ids = VMWGFX_NUM_MOB;
121		gman->max_gmr_pages = dev_priv->max_mob_pages;
122		break;
123	default:
124		BUG();
125	}
126	man->priv = (void *) gman;
127	return 0;
128}
129
130static int vmw_gmrid_man_takedown(struct ttm_mem_type_manager *man)
131{
132	struct vmwgfx_gmrid_man *gman =
133		(struct vmwgfx_gmrid_man *)man->priv;
134
135	if (gman) {
136		ida_destroy(&gman->gmr_ida);
137		kfree(gman);
138	}
139	return 0;
140}
141
142static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
143				struct drm_printer *printer)
144{
145	drm_printf(printer, "No debug info available for the GMR id manager\n");
 
146}
147
148const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
149	.init = vmw_gmrid_man_init,
150	.takedown = vmw_gmrid_man_takedown,
151	.get_node = vmw_gmrid_man_get_node,
152	.put_node = vmw_gmrid_man_put_node,
153	.debug = vmw_gmrid_man_debug
154};
v4.10.11
 
  1/**************************************************************************
  2 *
  3 * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
  4 * All Rights Reserved.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 27/*
 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 29 */
 30
 31#include "vmwgfx_drv.h"
 32#include <drm/ttm/ttm_module.h>
 33#include <drm/ttm/ttm_bo_driver.h>
 34#include <drm/ttm/ttm_placement.h>
 35#include <linux/idr.h>
 36#include <linux/spinlock.h>
 37#include <linux/kernel.h>
 38
 39struct vmwgfx_gmrid_man {
 40	spinlock_t lock;
 41	struct ida gmr_ida;
 42	uint32_t max_gmr_ids;
 43	uint32_t max_gmr_pages;
 44	uint32_t used_gmr_pages;
 45};
 46
 47static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
 48				  struct ttm_buffer_object *bo,
 49				  const struct ttm_place *place,
 50				  struct ttm_mem_reg *mem)
 51{
 52	struct vmwgfx_gmrid_man *gman =
 53		(struct vmwgfx_gmrid_man *)man->priv;
 54	int ret = 0;
 55	int id;
 56
 57	mem->mm_node = NULL;
 58
 
 
 
 
 59	spin_lock(&gman->lock);
 60
 61	if (gman->max_gmr_pages > 0) {
 62		gman->used_gmr_pages += bo->num_pages;
 63		if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
 64			goto out_err_locked;
 65	}
 66
 67	do {
 68		spin_unlock(&gman->lock);
 69		if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0)) {
 70			ret = -ENOMEM;
 71			goto out_err;
 72		}
 73		spin_lock(&gman->lock);
 74
 75		ret = ida_get_new(&gman->gmr_ida, &id);
 76		if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) {
 77			ida_remove(&gman->gmr_ida, id);
 78			ret = 0;
 79			goto out_err_locked;
 80		}
 81	} while (ret == -EAGAIN);
 82
 83	if (likely(ret == 0)) {
 84		mem->mm_node = gman;
 85		mem->start = id;
 86		mem->num_pages = bo->num_pages;
 87	} else
 88		goto out_err_locked;
 89
 90	spin_unlock(&gman->lock);
 91	return 0;
 92
 93out_err:
 94	spin_lock(&gman->lock);
 95out_err_locked:
 96	gman->used_gmr_pages -= bo->num_pages;
 97	spin_unlock(&gman->lock);
 98	return ret;
 
 99}
100
101static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
102				   struct ttm_mem_reg *mem)
103{
104	struct vmwgfx_gmrid_man *gman =
105		(struct vmwgfx_gmrid_man *)man->priv;
106
107	if (mem->mm_node) {
 
108		spin_lock(&gman->lock);
109		ida_remove(&gman->gmr_ida, mem->start);
110		gman->used_gmr_pages -= mem->num_pages;
111		spin_unlock(&gman->lock);
112		mem->mm_node = NULL;
113	}
114}
115
116static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
117			      unsigned long p_size)
118{
119	struct vmw_private *dev_priv =
120		container_of(man->bdev, struct vmw_private, bdev);
121	struct vmwgfx_gmrid_man *gman =
122		kzalloc(sizeof(*gman), GFP_KERNEL);
123
124	if (unlikely(gman == NULL))
125		return -ENOMEM;
126
127	spin_lock_init(&gman->lock);
128	gman->used_gmr_pages = 0;
129	ida_init(&gman->gmr_ida);
130
131	switch (p_size) {
132	case VMW_PL_GMR:
133		gman->max_gmr_ids = dev_priv->max_gmr_ids;
134		gman->max_gmr_pages = dev_priv->max_gmr_pages;
135		break;
136	case VMW_PL_MOB:
137		gman->max_gmr_ids = VMWGFX_NUM_MOB;
138		gman->max_gmr_pages = dev_priv->max_mob_pages;
139		break;
140	default:
141		BUG();
142	}
143	man->priv = (void *) gman;
144	return 0;
145}
146
147static int vmw_gmrid_man_takedown(struct ttm_mem_type_manager *man)
148{
149	struct vmwgfx_gmrid_man *gman =
150		(struct vmwgfx_gmrid_man *)man->priv;
151
152	if (gman) {
153		ida_destroy(&gman->gmr_ida);
154		kfree(gman);
155	}
156	return 0;
157}
158
159static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
160				const char *prefix)
161{
162	printk(KERN_INFO "%s: No debug info available for the GMR "
163	       "id manager.\n", prefix);
164}
165
166const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
167	vmw_gmrid_man_init,
168	vmw_gmrid_man_takedown,
169	vmw_gmrid_man_get_node,
170	vmw_gmrid_man_put_node,
171	vmw_gmrid_man_debug
172};