Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2014-2018 Intel Corporation
  4 */
  5
  6#include "gem/i915_gem_internal.h"
  7#include "gem/i915_gem_object.h"
  8
  9#include "i915_drv.h"
 10#include "intel_engine_pm.h"
 11#include "intel_gt_buffer_pool.h"
 12
 13static struct list_head *
 14bucket_for_size(struct intel_gt_buffer_pool *pool, size_t sz)
 15{
 16	int n;
 17
 18	/*
 19	 * Compute a power-of-two bucket, but throw everything greater than
 20	 * 16KiB into the same bucket: i.e. the buckets hold objects of
 21	 * (1 page, 2 pages, 4 pages, 8+ pages).
 22	 */
 23	n = fls(sz >> PAGE_SHIFT) - 1;
 24	if (n >= ARRAY_SIZE(pool->cache_list))
 25		n = ARRAY_SIZE(pool->cache_list) - 1;
 26
 27	return &pool->cache_list[n];
 28}
 29
 30static void node_free(struct intel_gt_buffer_pool_node *node)
 31{
 32	i915_gem_object_put(node->obj);
 33	i915_active_fini(&node->active);
 34	kfree_rcu(node, rcu);
 35}
 36
 37static bool pool_free_older_than(struct intel_gt_buffer_pool *pool, long keep)
 38{
 39	struct intel_gt_buffer_pool_node *node, *stale = NULL;
 40	bool active = false;
 41	int n;
 42
 43	/* Free buffers that have not been used in the past second */
 44	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
 45		struct list_head *list = &pool->cache_list[n];
 46
 47		if (list_empty(list))
 48			continue;
 49
 50		if (spin_trylock_irq(&pool->lock)) {
 51			struct list_head *pos;
 52
 53			/* Most recent at head; oldest at tail */
 54			list_for_each_prev(pos, list) {
 55				unsigned long age;
 56
 57				node = list_entry(pos, typeof(*node), link);
 58
 59				age = READ_ONCE(node->age);
 60				if (!age || jiffies - age < keep)
 61					break;
 62
 63				/* Check we are the first to claim this node */
 64				if (!xchg(&node->age, 0))
 65					break;
 66
 67				node->free = stale;
 68				stale = node;
 69			}
 70			if (!list_is_last(pos, list))
 71				__list_del_many(pos, list);
 72
 73			spin_unlock_irq(&pool->lock);
 74		}
 75
 76		active |= !list_empty(list);
 77	}
 78
 79	while ((node = stale)) {
 80		stale = stale->free;
 81		node_free(node);
 82	}
 83
 84	return active;
 85}
 86
 87static void pool_free_work(struct work_struct *wrk)
 88{
 89	struct intel_gt_buffer_pool *pool =
 90		container_of(wrk, typeof(*pool), work.work);
 
 91
 92	if (pool_free_older_than(pool, HZ))
 93		schedule_delayed_work(&pool->work,
 94				      round_jiffies_up_relative(HZ));
 95}
 96
 97static void pool_retire(struct i915_active *ref)
 98{
 99	struct intel_gt_buffer_pool_node *node =
100		container_of(ref, typeof(*node), active);
101	struct intel_gt_buffer_pool *pool = node->pool;
 
102	struct list_head *list = bucket_for_size(pool, node->obj->base.size);
103	unsigned long flags;
104
105	if (node->pinned) {
106		i915_gem_object_unpin_pages(node->obj);
107
108		/* Return this object to the shrinker pool */
109		i915_gem_object_make_purgeable(node->obj);
110		node->pinned = false;
111	}
112
113	GEM_BUG_ON(node->age);
114	spin_lock_irqsave(&pool->lock, flags);
115	list_add_rcu(&node->link, list);
116	WRITE_ONCE(node->age, jiffies ?: 1); /* 0 reserved for active nodes */
117	spin_unlock_irqrestore(&pool->lock, flags);
118
119	schedule_delayed_work(&pool->work,
120			      round_jiffies_up_relative(HZ));
121}
122
123void intel_gt_buffer_pool_mark_used(struct intel_gt_buffer_pool_node *node)
124{
125	assert_object_held(node->obj);
126
127	if (node->pinned)
128		return;
129
130	__i915_gem_object_pin_pages(node->obj);
131	/* Hide this pinned object from the shrinker until retired */
132	i915_gem_object_make_unshrinkable(node->obj);
133	node->pinned = true;
134}
135
136static struct intel_gt_buffer_pool_node *
137node_create(struct intel_gt_buffer_pool *pool, size_t sz,
138	    enum i915_map_type type)
139{
140	struct intel_gt *gt = container_of(pool, struct intel_gt, buffer_pool);
141	struct intel_gt_buffer_pool_node *node;
142	struct drm_i915_gem_object *obj;
143
144	node = kmalloc(sizeof(*node),
145		       GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
146	if (!node)
147		return ERR_PTR(-ENOMEM);
148
149	node->age = 0;
150	node->pool = pool;
151	node->pinned = false;
152	i915_active_init(&node->active, NULL, pool_retire, 0);
153
154	obj = i915_gem_object_create_internal(gt->i915, sz);
155	if (IS_ERR(obj)) {
156		i915_active_fini(&node->active);
157		kfree(node);
158		return ERR_CAST(obj);
159	}
160
161	i915_gem_object_set_readonly(obj);
162
163	node->type = type;
164	node->obj = obj;
165	return node;
166}
167
168struct intel_gt_buffer_pool_node *
169intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size,
170			 enum i915_map_type type)
171{
172	struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
173	struct intel_gt_buffer_pool_node *node;
174	struct list_head *list;
175	int ret;
176
177	size = PAGE_ALIGN(size);
178	list = bucket_for_size(pool, size);
179
180	rcu_read_lock();
181	list_for_each_entry_rcu(node, list, link) {
182		unsigned long age;
183
184		if (node->obj->base.size < size)
185			continue;
186
187		if (node->type != type)
188			continue;
189
190		age = READ_ONCE(node->age);
191		if (!age)
192			continue;
193
194		if (cmpxchg(&node->age, age, 0) == age) {
195			spin_lock_irq(&pool->lock);
196			list_del_rcu(&node->link);
197			spin_unlock_irq(&pool->lock);
198			break;
199		}
200	}
201	rcu_read_unlock();
202
203	if (&node->link == list) {
204		node = node_create(pool, size, type);
205		if (IS_ERR(node))
206			return node;
207	}
208
209	ret = i915_active_acquire(&node->active);
210	if (ret) {
211		node_free(node);
212		return ERR_PTR(ret);
213	}
214
215	return node;
216}
217
218void intel_gt_init_buffer_pool(struct intel_gt *gt)
219{
220	struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
221	int n;
222
223	spin_lock_init(&pool->lock);
224	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
225		INIT_LIST_HEAD(&pool->cache_list[n]);
226	INIT_DELAYED_WORK(&pool->work, pool_free_work);
227}
228
229void intel_gt_flush_buffer_pool(struct intel_gt *gt)
230{
231	struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
232
233	do {
234		while (pool_free_older_than(pool, 0))
235			;
236	} while (cancel_delayed_work_sync(&pool->work));
237}
238
239void intel_gt_fini_buffer_pool(struct intel_gt *gt)
240{
241	struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
242	int n;
243
244	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
245		GEM_BUG_ON(!list_empty(&pool->cache_list[n]));
246}
v6.9.4
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2014-2018 Intel Corporation
  4 */
  5
  6#include "gem/i915_gem_internal.h"
  7#include "gem/i915_gem_object.h"
  8
  9#include "i915_drv.h"
 10#include "intel_engine_pm.h"
 11#include "intel_gt_buffer_pool.h"
 12
 13static struct list_head *
 14bucket_for_size(struct intel_gt_buffer_pool *pool, size_t sz)
 15{
 16	int n;
 17
 18	/*
 19	 * Compute a power-of-two bucket, but throw everything greater than
 20	 * 16KiB into the same bucket: i.e. the buckets hold objects of
 21	 * (1 page, 2 pages, 4 pages, 8+ pages).
 22	 */
 23	n = fls(sz >> PAGE_SHIFT) - 1;
 24	if (n >= ARRAY_SIZE(pool->cache_list))
 25		n = ARRAY_SIZE(pool->cache_list) - 1;
 26
 27	return &pool->cache_list[n];
 28}
 29
 30static void node_free(struct intel_gt_buffer_pool_node *node)
 31{
 32	i915_gem_object_put(node->obj);
 33	i915_active_fini(&node->active);
 34	kfree_rcu(node, rcu);
 35}
 36
 37static bool pool_free_older_than(struct intel_gt_buffer_pool *pool, long keep)
 38{
 39	struct intel_gt_buffer_pool_node *node, *stale = NULL;
 40	bool active = false;
 41	int n;
 42
 43	/* Free buffers that have not been used in the past second */
 44	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
 45		struct list_head *list = &pool->cache_list[n];
 46
 47		if (list_empty(list))
 48			continue;
 49
 50		if (spin_trylock_irq(&pool->lock)) {
 51			struct list_head *pos;
 52
 53			/* Most recent at head; oldest at tail */
 54			list_for_each_prev(pos, list) {
 55				unsigned long age;
 56
 57				node = list_entry(pos, typeof(*node), link);
 58
 59				age = READ_ONCE(node->age);
 60				if (!age || jiffies - age < keep)
 61					break;
 62
 63				/* Check we are the first to claim this node */
 64				if (!xchg(&node->age, 0))
 65					break;
 66
 67				node->free = stale;
 68				stale = node;
 69			}
 70			if (!list_is_last(pos, list))
 71				__list_del_many(pos, list);
 72
 73			spin_unlock_irq(&pool->lock);
 74		}
 75
 76		active |= !list_empty(list);
 77	}
 78
 79	while ((node = stale)) {
 80		stale = stale->free;
 81		node_free(node);
 82	}
 83
 84	return active;
 85}
 86
 87static void pool_free_work(struct work_struct *wrk)
 88{
 89	struct intel_gt_buffer_pool *pool =
 90		container_of(wrk, typeof(*pool), work.work);
 91	struct intel_gt *gt = container_of(pool, struct intel_gt, buffer_pool);
 92
 93	if (pool_free_older_than(pool, HZ))
 94		queue_delayed_work(gt->i915->unordered_wq, &pool->work,
 95				   round_jiffies_up_relative(HZ));
 96}
 97
 98static void pool_retire(struct i915_active *ref)
 99{
100	struct intel_gt_buffer_pool_node *node =
101		container_of(ref, typeof(*node), active);
102	struct intel_gt_buffer_pool *pool = node->pool;
103	struct intel_gt *gt = container_of(pool, struct intel_gt, buffer_pool);
104	struct list_head *list = bucket_for_size(pool, node->obj->base.size);
105	unsigned long flags;
106
107	if (node->pinned) {
108		i915_gem_object_unpin_pages(node->obj);
109
110		/* Return this object to the shrinker pool */
111		i915_gem_object_make_purgeable(node->obj);
112		node->pinned = false;
113	}
114
115	GEM_BUG_ON(node->age);
116	spin_lock_irqsave(&pool->lock, flags);
117	list_add_rcu(&node->link, list);
118	WRITE_ONCE(node->age, jiffies ?: 1); /* 0 reserved for active nodes */
119	spin_unlock_irqrestore(&pool->lock, flags);
120
121	queue_delayed_work(gt->i915->unordered_wq, &pool->work,
122			   round_jiffies_up_relative(HZ));
123}
124
125void intel_gt_buffer_pool_mark_used(struct intel_gt_buffer_pool_node *node)
126{
127	assert_object_held(node->obj);
128
129	if (node->pinned)
130		return;
131
132	__i915_gem_object_pin_pages(node->obj);
133	/* Hide this pinned object from the shrinker until retired */
134	i915_gem_object_make_unshrinkable(node->obj);
135	node->pinned = true;
136}
137
138static struct intel_gt_buffer_pool_node *
139node_create(struct intel_gt_buffer_pool *pool, size_t sz,
140	    enum i915_map_type type)
141{
142	struct intel_gt *gt = container_of(pool, struct intel_gt, buffer_pool);
143	struct intel_gt_buffer_pool_node *node;
144	struct drm_i915_gem_object *obj;
145
146	node = kmalloc(sizeof(*node),
147		       GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
148	if (!node)
149		return ERR_PTR(-ENOMEM);
150
151	node->age = 0;
152	node->pool = pool;
153	node->pinned = false;
154	i915_active_init(&node->active, NULL, pool_retire, 0);
155
156	obj = i915_gem_object_create_internal(gt->i915, sz);
157	if (IS_ERR(obj)) {
158		i915_active_fini(&node->active);
159		kfree(node);
160		return ERR_CAST(obj);
161	}
162
163	i915_gem_object_set_readonly(obj);
164
165	node->type = type;
166	node->obj = obj;
167	return node;
168}
169
170struct intel_gt_buffer_pool_node *
171intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size,
172			 enum i915_map_type type)
173{
174	struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
175	struct intel_gt_buffer_pool_node *node;
176	struct list_head *list;
177	int ret;
178
179	size = PAGE_ALIGN(size);
180	list = bucket_for_size(pool, size);
181
182	rcu_read_lock();
183	list_for_each_entry_rcu(node, list, link) {
184		unsigned long age;
185
186		if (node->obj->base.size < size)
187			continue;
188
189		if (node->type != type)
190			continue;
191
192		age = READ_ONCE(node->age);
193		if (!age)
194			continue;
195
196		if (cmpxchg(&node->age, age, 0) == age) {
197			spin_lock_irq(&pool->lock);
198			list_del_rcu(&node->link);
199			spin_unlock_irq(&pool->lock);
200			break;
201		}
202	}
203	rcu_read_unlock();
204
205	if (&node->link == list) {
206		node = node_create(pool, size, type);
207		if (IS_ERR(node))
208			return node;
209	}
210
211	ret = i915_active_acquire(&node->active);
212	if (ret) {
213		node_free(node);
214		return ERR_PTR(ret);
215	}
216
217	return node;
218}
219
220void intel_gt_init_buffer_pool(struct intel_gt *gt)
221{
222	struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
223	int n;
224
225	spin_lock_init(&pool->lock);
226	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
227		INIT_LIST_HEAD(&pool->cache_list[n]);
228	INIT_DELAYED_WORK(&pool->work, pool_free_work);
229}
230
231void intel_gt_flush_buffer_pool(struct intel_gt *gt)
232{
233	struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
234
235	do {
236		while (pool_free_older_than(pool, 0))
237			;
238	} while (cancel_delayed_work_sync(&pool->work));
239}
240
241void intel_gt_fini_buffer_pool(struct intel_gt *gt)
242{
243	struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
244	int n;
245
246	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
247		GEM_BUG_ON(!list_empty(&pool->cache_list[n]));
248}