Linux Audio

Check our new training course

Loading...
v3.1
  1/**************************************************************************
  2 *
  3 * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
  4 * All Rights Reserved.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 27/*
 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 29 */
 30
 31#include "ttm/ttm_module.h"
 32#include "ttm/ttm_bo_driver.h"
 33#include "ttm/ttm_placement.h"
 34#include "drm_mm.h"
 35#include <linux/slab.h>
 36#include <linux/spinlock.h>
 37#include <linux/module.h>
 38
 39/**
 40 * Currently we use a spinlock for the lock, but a mutex *may* be
 41 * more appropriate to reduce scheduling latency if the range manager
 42 * ends up with very fragmented allocation patterns.
 43 */
 44
 45struct ttm_range_manager {
 46	struct drm_mm mm;
 47	spinlock_t lock;
 48};
 49
 50static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
 51			       struct ttm_buffer_object *bo,
 52			       struct ttm_placement *placement,
 53			       struct ttm_mem_reg *mem)
 54{
 55	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
 56	struct drm_mm *mm = &rman->mm;
 57	struct drm_mm_node *node = NULL;
 
 58	unsigned long lpfn;
 59	int ret;
 60
 61	lpfn = placement->lpfn;
 62	if (!lpfn)
 63		lpfn = man->size;
 64	do {
 65		ret = drm_mm_pre_get(mm);
 66		if (unlikely(ret))
 67			return ret;
 68
 69		spin_lock(&rman->lock);
 70		node = drm_mm_search_free_in_range(mm,
 71					mem->num_pages, mem->page_alignment,
 72					placement->fpfn, lpfn, 1);
 73		if (unlikely(node == NULL)) {
 74			spin_unlock(&rman->lock);
 75			return 0;
 76		}
 77		node = drm_mm_get_block_atomic_range(node, mem->num_pages,
 78						     mem->page_alignment,
 79						     placement->fpfn,
 80						     lpfn);
 81		spin_unlock(&rman->lock);
 82	} while (node == NULL);
 
 
 
 
 
 
 
 83
 84	mem->mm_node = node;
 85	mem->start = node->start;
 86	return 0;
 87}
 88
 89static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
 90				struct ttm_mem_reg *mem)
 91{
 92	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
 93
 94	if (mem->mm_node) {
 95		spin_lock(&rman->lock);
 96		drm_mm_put_block(mem->mm_node);
 97		spin_unlock(&rman->lock);
 
 
 98		mem->mm_node = NULL;
 99	}
100}
101
102static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
103			   unsigned long p_size)
104{
105	struct ttm_range_manager *rman;
106	int ret;
107
108	rman = kzalloc(sizeof(*rman), GFP_KERNEL);
109	if (!rman)
110		return -ENOMEM;
111
112	ret = drm_mm_init(&rman->mm, 0, p_size);
113	if (ret) {
114		kfree(rman);
115		return ret;
116	}
117
118	spin_lock_init(&rman->lock);
119	man->priv = rman;
120	return 0;
121}
122
123static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
124{
125	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
126	struct drm_mm *mm = &rman->mm;
127
128	spin_lock(&rman->lock);
129	if (drm_mm_clean(mm)) {
130		drm_mm_takedown(mm);
131		spin_unlock(&rman->lock);
132		kfree(rman);
133		man->priv = NULL;
134		return 0;
135	}
136	spin_unlock(&rman->lock);
137	return -EBUSY;
138}
139
140static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
141			     const char *prefix)
142{
143	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
144
145	spin_lock(&rman->lock);
146	drm_mm_debug_table(&rman->mm, prefix);
147	spin_unlock(&rman->lock);
148}
149
150const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
151	ttm_bo_man_init,
152	ttm_bo_man_takedown,
153	ttm_bo_man_get_node,
154	ttm_bo_man_put_node,
155	ttm_bo_man_debug
156};
157EXPORT_SYMBOL(ttm_bo_manager_func);
v4.17
  1/**************************************************************************
  2 *
  3 * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
  4 * All Rights Reserved.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 27/*
 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 29 */
 30
 31#include <drm/ttm/ttm_module.h>
 32#include <drm/ttm/ttm_bo_driver.h>
 33#include <drm/ttm/ttm_placement.h>
 34#include <drm/drm_mm.h>
 35#include <linux/slab.h>
 36#include <linux/spinlock.h>
 37#include <linux/module.h>
 38
 39/**
 40 * Currently we use a spinlock for the lock, but a mutex *may* be
 41 * more appropriate to reduce scheduling latency if the range manager
 42 * ends up with very fragmented allocation patterns.
 43 */
 44
 45struct ttm_range_manager {
 46	struct drm_mm mm;
 47	spinlock_t lock;
 48};
 49
 50static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
 51			       struct ttm_buffer_object *bo,
 52			       const struct ttm_place *place,
 53			       struct ttm_mem_reg *mem)
 54{
 55	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
 56	struct drm_mm *mm = &rman->mm;
 57	struct drm_mm_node *node;
 58	enum drm_mm_insert_mode mode;
 59	unsigned long lpfn;
 60	int ret;
 61
 62	lpfn = place->lpfn;
 63	if (!lpfn)
 64		lpfn = man->size;
 
 
 
 
 65
 66	node = kzalloc(sizeof(*node), GFP_KERNEL);
 67	if (!node)
 68		return -ENOMEM;
 69
 70	mode = DRM_MM_INSERT_BEST;
 71	if (place->flags & TTM_PL_FLAG_TOPDOWN)
 72		mode = DRM_MM_INSERT_HIGH;
 73
 74	spin_lock(&rman->lock);
 75	ret = drm_mm_insert_node_in_range(mm, node,
 76					  mem->num_pages,
 77					  mem->page_alignment, 0,
 78					  place->fpfn, lpfn, mode);
 79	spin_unlock(&rman->lock);
 80
 81	if (unlikely(ret)) {
 82		kfree(node);
 83	} else {
 84		mem->mm_node = node;
 85		mem->start = node->start;
 86	}
 87
 
 
 88	return 0;
 89}
 90
 91static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
 92				struct ttm_mem_reg *mem)
 93{
 94	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
 95
 96	if (mem->mm_node) {
 97		spin_lock(&rman->lock);
 98		drm_mm_remove_node(mem->mm_node);
 99		spin_unlock(&rman->lock);
100
101		kfree(mem->mm_node);
102		mem->mm_node = NULL;
103	}
104}
105
106static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
107			   unsigned long p_size)
108{
109	struct ttm_range_manager *rman;
 
110
111	rman = kzalloc(sizeof(*rman), GFP_KERNEL);
112	if (!rman)
113		return -ENOMEM;
114
115	drm_mm_init(&rman->mm, 0, p_size);
 
 
 
 
 
116	spin_lock_init(&rman->lock);
117	man->priv = rman;
118	return 0;
119}
120
121static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
122{
123	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
124	struct drm_mm *mm = &rman->mm;
125
126	spin_lock(&rman->lock);
127	if (drm_mm_clean(mm)) {
128		drm_mm_takedown(mm);
129		spin_unlock(&rman->lock);
130		kfree(rman);
131		man->priv = NULL;
132		return 0;
133	}
134	spin_unlock(&rman->lock);
135	return -EBUSY;
136}
137
138static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
139			     struct drm_printer *printer)
140{
141	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
142
143	spin_lock(&rman->lock);
144	drm_mm_print(&rman->mm, printer);
145	spin_unlock(&rman->lock);
146}
147
148const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
149	.init = ttm_bo_man_init,
150	.takedown = ttm_bo_man_takedown,
151	.get_node = ttm_bo_man_get_node,
152	.put_node = ttm_bo_man_put_node,
153	.debug = ttm_bo_man_debug
154};
155EXPORT_SYMBOL(ttm_bo_manager_func);