Loading...
Note: File does not exist in v4.6.
1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2014-2018 Intel Corporation
5 */
6
7#include "gem/i915_gem_object.h"
8
9#include "i915_drv.h"
10#include "intel_engine_pm.h"
11#include "intel_engine_pool.h"
12
13static struct intel_engine_cs *to_engine(struct intel_engine_pool *pool)
14{
15 return container_of(pool, struct intel_engine_cs, pool);
16}
17
18static struct list_head *
19bucket_for_size(struct intel_engine_pool *pool, size_t sz)
20{
21 int n;
22
23 /*
24 * Compute a power-of-two bucket, but throw everything greater than
25 * 16KiB into the same bucket: i.e. the buckets hold objects of
26 * (1 page, 2 pages, 4 pages, 8+ pages).
27 */
28 n = fls(sz >> PAGE_SHIFT) - 1;
29 if (n >= ARRAY_SIZE(pool->cache_list))
30 n = ARRAY_SIZE(pool->cache_list) - 1;
31
32 return &pool->cache_list[n];
33}
34
35static void node_free(struct intel_engine_pool_node *node)
36{
37 i915_gem_object_put(node->obj);
38 i915_active_fini(&node->active);
39 kfree(node);
40}
41
42static int pool_active(struct i915_active *ref)
43{
44 struct intel_engine_pool_node *node =
45 container_of(ref, typeof(*node), active);
46 struct dma_resv *resv = node->obj->base.resv;
47 int err;
48
49 if (dma_resv_trylock(resv)) {
50 dma_resv_add_excl_fence(resv, NULL);
51 dma_resv_unlock(resv);
52 }
53
54 err = i915_gem_object_pin_pages(node->obj);
55 if (err)
56 return err;
57
58 /* Hide this pinned object from the shrinker until retired */
59 i915_gem_object_make_unshrinkable(node->obj);
60
61 return 0;
62}
63
64static void pool_retire(struct i915_active *ref)
65{
66 struct intel_engine_pool_node *node =
67 container_of(ref, typeof(*node), active);
68 struct intel_engine_pool *pool = node->pool;
69 struct list_head *list = bucket_for_size(pool, node->obj->base.size);
70 unsigned long flags;
71
72 GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool)));
73
74 i915_gem_object_unpin_pages(node->obj);
75
76 /* Return this object to the shrinker pool */
77 i915_gem_object_make_purgeable(node->obj);
78
79 spin_lock_irqsave(&pool->lock, flags);
80 list_add(&node->link, list);
81 spin_unlock_irqrestore(&pool->lock, flags);
82}
83
84static struct intel_engine_pool_node *
85node_create(struct intel_engine_pool *pool, size_t sz)
86{
87 struct intel_engine_cs *engine = to_engine(pool);
88 struct intel_engine_pool_node *node;
89 struct drm_i915_gem_object *obj;
90
91 node = kmalloc(sizeof(*node),
92 GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
93 if (!node)
94 return ERR_PTR(-ENOMEM);
95
96 node->pool = pool;
97 i915_active_init(engine->i915, &node->active, pool_active, pool_retire);
98
99 obj = i915_gem_object_create_internal(engine->i915, sz);
100 if (IS_ERR(obj)) {
101 i915_active_fini(&node->active);
102 kfree(node);
103 return ERR_CAST(obj);
104 }
105
106 i915_gem_object_set_readonly(obj);
107
108 node->obj = obj;
109 return node;
110}
111
112struct intel_engine_pool_node *
113intel_engine_pool_get(struct intel_engine_pool *pool, size_t size)
114{
115 struct intel_engine_pool_node *node;
116 struct list_head *list;
117 unsigned long flags;
118 int ret;
119
120 GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool)));
121
122 size = PAGE_ALIGN(size);
123 list = bucket_for_size(pool, size);
124
125 spin_lock_irqsave(&pool->lock, flags);
126 list_for_each_entry(node, list, link) {
127 if (node->obj->base.size < size)
128 continue;
129 list_del(&node->link);
130 break;
131 }
132 spin_unlock_irqrestore(&pool->lock, flags);
133
134 if (&node->link == list) {
135 node = node_create(pool, size);
136 if (IS_ERR(node))
137 return node;
138 }
139
140 ret = i915_active_acquire(&node->active);
141 if (ret) {
142 node_free(node);
143 return ERR_PTR(ret);
144 }
145
146 return node;
147}
148
149void intel_engine_pool_init(struct intel_engine_pool *pool)
150{
151 int n;
152
153 spin_lock_init(&pool->lock);
154 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
155 INIT_LIST_HEAD(&pool->cache_list[n]);
156}
157
158void intel_engine_pool_park(struct intel_engine_pool *pool)
159{
160 int n;
161
162 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
163 struct list_head *list = &pool->cache_list[n];
164 struct intel_engine_pool_node *node, *nn;
165
166 list_for_each_entry_safe(node, nn, list, link)
167 node_free(node);
168
169 INIT_LIST_HEAD(list);
170 }
171}
172
173void intel_engine_pool_fini(struct intel_engine_pool *pool)
174{
175 int n;
176
177 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
178 GEM_BUG_ON(!list_empty(&pool->cache_list[n]));
179}