Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * Copyright © 2014 Intel Corporation
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice (including the next
 12 * paragraph) shall be included in all copies or substantial portions of the
 13 * Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 21 * IN THE SOFTWARE.
 22 *
 23 */
 24
 25#include "i915_drv.h"
 26#include "i915_gem_batch_pool.h"
 27
 28/**
 29 * DOC: batch pool
 30 *
 31 * In order to submit batch buffers as 'secure', the software command parser
 32 * must ensure that a batch buffer cannot be modified after parsing. It does
 33 * this by copying the user provided batch buffer contents to a kernel owned
 34 * buffer from which the hardware will actually execute, and by carefully
 35 * managing the address space bindings for such buffers.
 36 *
 37 * The batch pool framework provides a mechanism for the driver to manage a
 38 * set of scratch buffers to use for this purpose. The framework can be
 39 * extended to support other uses cases should they arise.
 40 */
 41
 42/**
 43 * i915_gem_batch_pool_init() - initialize a batch buffer pool
 44 * @dev: the drm device
 45 * @pool: the batch buffer pool
 46 */
 47void i915_gem_batch_pool_init(struct drm_device *dev,
 48			      struct i915_gem_batch_pool *pool)
 49{
 50	int n;
 51
 52	pool->dev = dev;
 53
 54	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
 55		INIT_LIST_HEAD(&pool->cache_list[n]);
 56}
 57
 58/**
 59 * i915_gem_batch_pool_fini() - clean up a batch buffer pool
 60 * @pool: the pool to clean up
 61 *
 62 * Note: Callers must hold the struct_mutex.
 63 */
 64void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
 65{
 66	int n;
 67
 68	WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
 69
 70	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
 71		while (!list_empty(&pool->cache_list[n])) {
 72			struct drm_i915_gem_object *obj =
 73				list_first_entry(&pool->cache_list[n],
 74						 struct drm_i915_gem_object,
 75						 batch_pool_link);
 76
 77			list_del(&obj->batch_pool_link);
 78			drm_gem_object_unreference(&obj->base);
 79		}
 
 
 
 80	}
 81}
 82
 83/**
 84 * i915_gem_batch_pool_get() - allocate a buffer from the pool
 85 * @pool: the batch buffer pool
 86 * @size: the minimum desired size of the returned buffer
 87 *
 88 * Returns an inactive buffer from @pool with at least @size bytes,
 89 * with the pages pinned. The caller must i915_gem_object_unpin_pages()
 90 * on the returned object.
 91 *
 92 * Note: Callers must hold the struct_mutex
 93 *
 94 * Return: the buffer object or an error pointer
 95 */
 96struct drm_i915_gem_object *
 97i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
 98			size_t size)
 99{
100	struct drm_i915_gem_object *obj = NULL;
101	struct drm_i915_gem_object *tmp, *next;
102	struct list_head *list;
103	int n;
104
105	WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
106
107	/* Compute a power-of-two bucket, but throw everything greater than
108	 * 16KiB into the same bucket: i.e. the the buckets hold objects of
109	 * (1 page, 2 pages, 4 pages, 8+ pages).
110	 */
111	n = fls(size >> PAGE_SHIFT) - 1;
112	if (n >= ARRAY_SIZE(pool->cache_list))
113		n = ARRAY_SIZE(pool->cache_list) - 1;
114	list = &pool->cache_list[n];
115
116	list_for_each_entry_safe(tmp, next, list, batch_pool_link) {
117		/* The batches are strictly LRU ordered */
118		if (tmp->active)
119			break;
120
121		/* While we're looping, do some clean up */
122		if (tmp->madv == __I915_MADV_PURGED) {
123			list_del(&tmp->batch_pool_link);
124			drm_gem_object_unreference(&tmp->base);
125			continue;
126		}
127
128		if (tmp->base.size >= size) {
129			obj = tmp;
130			break;
131		}
132	}
133
134	if (obj == NULL) {
135		int ret;
 
 
 
 
 
 
 
 
 
 
 
 
136
137		obj = i915_gem_alloc_object(pool->dev, size);
138		if (obj == NULL)
139			return ERR_PTR(-ENOMEM);
140
141		ret = i915_gem_object_get_pages(obj);
142		if (ret)
143			return ERR_PTR(ret);
144
145		obj->madv = I915_MADV_DONTNEED;
 
146	}
147
 
 
 
 
 
 
 
 
 
148	list_move_tail(&obj->batch_pool_link, list);
149	i915_gem_object_pin_pages(obj);
150	return obj;
151}
v4.17
  1/*
  2 * Copyright © 2014 Intel Corporation
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice (including the next
 12 * paragraph) shall be included in all copies or substantial portions of the
 13 * Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 21 * IN THE SOFTWARE.
 22 *
 23 */
 24
 25#include "i915_drv.h"
 26#include "i915_gem_batch_pool.h"
 27
 28/**
 29 * DOC: batch pool
 30 *
 31 * In order to submit batch buffers as 'secure', the software command parser
 32 * must ensure that a batch buffer cannot be modified after parsing. It does
 33 * this by copying the user provided batch buffer contents to a kernel owned
 34 * buffer from which the hardware will actually execute, and by carefully
 35 * managing the address space bindings for such buffers.
 36 *
 37 * The batch pool framework provides a mechanism for the driver to manage a
 38 * set of scratch buffers to use for this purpose. The framework can be
 39 * extended to support other uses cases should they arise.
 40 */
 41
 42/**
 43 * i915_gem_batch_pool_init() - initialize a batch buffer pool
 44 * @engine: the associated request submission engine
 45 * @pool: the batch buffer pool
 46 */
 47void i915_gem_batch_pool_init(struct intel_engine_cs *engine,
 48			      struct i915_gem_batch_pool *pool)
 49{
 50	int n;
 51
 52	pool->engine = engine;
 53
 54	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
 55		INIT_LIST_HEAD(&pool->cache_list[n]);
 56}
 57
 58/**
 59 * i915_gem_batch_pool_fini() - clean up a batch buffer pool
 60 * @pool: the pool to clean up
 61 *
 62 * Note: Callers must hold the struct_mutex.
 63 */
 64void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
 65{
 66	int n;
 67
 68	lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
 69
 70	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
 71		struct drm_i915_gem_object *obj, *next;
 
 
 
 
 72
 73		list_for_each_entry_safe(obj, next,
 74					 &pool->cache_list[n],
 75					 batch_pool_link)
 76			__i915_gem_object_release_unless_active(obj);
 77
 78		INIT_LIST_HEAD(&pool->cache_list[n]);
 79	}
 80}
 81
 82/**
 83 * i915_gem_batch_pool_get() - allocate a buffer from the pool
 84 * @pool: the batch buffer pool
 85 * @size: the minimum desired size of the returned buffer
 86 *
 87 * Returns an inactive buffer from @pool with at least @size bytes,
 88 * with the pages pinned. The caller must i915_gem_object_unpin_pages()
 89 * on the returned object.
 90 *
 91 * Note: Callers must hold the struct_mutex
 92 *
 93 * Return: the buffer object or an error pointer
 94 */
 95struct drm_i915_gem_object *
 96i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
 97			size_t size)
 98{
 99	struct drm_i915_gem_object *obj;
 
100	struct list_head *list;
101	int n, ret;
102
103	lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
104
105	/* Compute a power-of-two bucket, but throw everything greater than
106	 * 16KiB into the same bucket: i.e. the the buckets hold objects of
107	 * (1 page, 2 pages, 4 pages, 8+ pages).
108	 */
109	n = fls(size >> PAGE_SHIFT) - 1;
110	if (n >= ARRAY_SIZE(pool->cache_list))
111		n = ARRAY_SIZE(pool->cache_list) - 1;
112	list = &pool->cache_list[n];
113
114	list_for_each_entry(obj, list, batch_pool_link) {
115		/* The batches are strictly LRU ordered */
116		if (i915_gem_object_is_active(obj)) {
117			struct reservation_object *resv = obj->resv;
118
119			if (!reservation_object_test_signaled_rcu(resv, true))
120				break;
 
 
 
 
121
122			i915_retire_requests(pool->engine->i915);
123			GEM_BUG_ON(i915_gem_object_is_active(obj));
 
 
 
124
125			/*
126			 * The object is now idle, clear the array of shared
127			 * fences before we add a new request. Although, we
128			 * remain on the same engine, we may be on a different
129			 * timeline and so may continually grow the array,
130			 * trapping a reference to all the old fences, rather
131			 * than replace the existing fence.
132			 */
133			if (rcu_access_pointer(resv->fence)) {
134				reservation_object_lock(resv, NULL);
135				reservation_object_add_excl_fence(resv, NULL);
136				reservation_object_unlock(resv);
137			}
138		}
139
140		GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->resv,
141								 true));
 
 
 
 
 
142
143		if (obj->base.size >= size)
144			goto found;
145	}
146
147	obj = i915_gem_object_create_internal(pool->engine->i915, size);
148	if (IS_ERR(obj))
149		return obj;
150
151found:
152	ret = i915_gem_object_pin_pages(obj);
153	if (ret)
154		return ERR_PTR(ret);
155
156	list_move_tail(&obj->batch_pool_link, list);
 
157	return obj;
158}