Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2016 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7#include "msm_drv.h"
8#include "msm_gem.h"
9#include "msm_gpu.h"
10#include "msm_gpu_trace.h"
11
12/* Default disabled for now until it has some more testing on the different
13 * iommu combinations that can be paired with the driver:
14 */
15bool enable_eviction = false;
16MODULE_PARM_DESC(enable_eviction, "Enable swappable GEM buffers");
17module_param(enable_eviction, bool, 0600);
18
19static bool can_swap(void)
20{
21 return enable_eviction && get_nr_swap_pages() > 0;
22}
23
24static unsigned long
25msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
26{
27 struct msm_drm_private *priv =
28 container_of(shrinker, struct msm_drm_private, shrinker);
29 unsigned count = priv->shrinkable_count;
30
31 if (can_swap())
32 count += priv->evictable_count;
33
34 return count;
35}
36
37static bool
38purge(struct msm_gem_object *msm_obj)
39{
40 if (!is_purgeable(msm_obj))
41 return false;
42
43 /*
44 * This will move the obj out of still_in_list to
45 * the purged list
46 */
47 msm_gem_purge(&msm_obj->base);
48
49 return true;
50}
51
52static bool
53evict(struct msm_gem_object *msm_obj)
54{
55 if (is_unevictable(msm_obj))
56 return false;
57
58 msm_gem_evict(&msm_obj->base);
59
60 return true;
61}
62
63static unsigned long
64scan(struct msm_drm_private *priv, unsigned nr_to_scan, struct list_head *list,
65 bool (*shrink)(struct msm_gem_object *msm_obj))
66{
67 unsigned freed = 0;
68 struct list_head still_in_list;
69
70 INIT_LIST_HEAD(&still_in_list);
71
72 mutex_lock(&priv->mm_lock);
73
74 while (freed < nr_to_scan) {
75 struct msm_gem_object *msm_obj = list_first_entry_or_null(
76 list, typeof(*msm_obj), mm_list);
77
78 if (!msm_obj)
79 break;
80
81 list_move_tail(&msm_obj->mm_list, &still_in_list);
82
83 /*
84 * If it is in the process of being freed, msm_gem_free_object
85 * can be blocked on mm_lock waiting to remove it. So just
86 * skip it.
87 */
88 if (!kref_get_unless_zero(&msm_obj->base.refcount))
89 continue;
90
91 /*
92 * Now that we own a reference, we can drop mm_lock for the
93 * rest of the loop body, to reduce contention with the
94 * retire_submit path (which could make more objects purgeable)
95 */
96
97 mutex_unlock(&priv->mm_lock);
98
99 /*
100 * Note that this still needs to be trylock, since we can
101 * hit shrinker in response to trying to get backing pages
102 * for this obj (ie. while it's lock is already held)
103 */
104 if (!msm_gem_trylock(&msm_obj->base))
105 goto tail;
106
107 if (shrink(msm_obj))
108 freed += msm_obj->base.size >> PAGE_SHIFT;
109
110 msm_gem_unlock(&msm_obj->base);
111
112tail:
113 drm_gem_object_put(&msm_obj->base);
114 mutex_lock(&priv->mm_lock);
115 }
116
117 list_splice_tail(&still_in_list, list);
118 mutex_unlock(&priv->mm_lock);
119
120 return freed;
121}
122
123static unsigned long
124msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
125{
126 struct msm_drm_private *priv =
127 container_of(shrinker, struct msm_drm_private, shrinker);
128 unsigned long freed;
129
130 freed = scan(priv, sc->nr_to_scan, &priv->inactive_dontneed, purge);
131
132 if (freed > 0)
133 trace_msm_gem_purge(freed << PAGE_SHIFT);
134
135 if (can_swap() && freed < sc->nr_to_scan) {
136 int evicted = scan(priv, sc->nr_to_scan - freed,
137 &priv->inactive_willneed, evict);
138
139 if (evicted > 0)
140 trace_msm_gem_evict(evicted << PAGE_SHIFT);
141
142 freed += evicted;
143 }
144
145 return (freed > 0) ? freed : SHRINK_STOP;
146}
147
148#ifdef CONFIG_DEBUG_FS
149unsigned long
150msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan)
151{
152 struct msm_drm_private *priv = dev->dev_private;
153 struct shrink_control sc = {
154 .nr_to_scan = nr_to_scan,
155 };
156 int ret;
157
158 fs_reclaim_acquire(GFP_KERNEL);
159 ret = msm_gem_shrinker_scan(&priv->shrinker, &sc);
160 fs_reclaim_release(GFP_KERNEL);
161
162 return ret;
163}
164#endif
165
166/* since we don't know any better, lets bail after a few
167 * and if necessary the shrinker will be invoked again.
168 * Seems better than unmapping *everything*
169 */
170static const int vmap_shrink_limit = 15;
171
172static bool
173vmap_shrink(struct msm_gem_object *msm_obj)
174{
175 if (!is_vunmapable(msm_obj))
176 return false;
177
178 msm_gem_vunmap(&msm_obj->base);
179
180 return true;
181}
182
183static int
184msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
185{
186 struct msm_drm_private *priv =
187 container_of(nb, struct msm_drm_private, vmap_notifier);
188 struct list_head *mm_lists[] = {
189 &priv->inactive_dontneed,
190 &priv->inactive_willneed,
191 priv->gpu ? &priv->gpu->active_list : NULL,
192 NULL,
193 };
194 unsigned idx, unmapped = 0;
195
196 for (idx = 0; mm_lists[idx] && unmapped < vmap_shrink_limit; idx++) {
197 unmapped += scan(priv, vmap_shrink_limit - unmapped,
198 mm_lists[idx], vmap_shrink);
199 }
200
201 *(unsigned long *)ptr += unmapped;
202
203 if (unmapped > 0)
204 trace_msm_gem_purge_vmaps(unmapped);
205
206 return NOTIFY_DONE;
207}
208
209/**
210 * msm_gem_shrinker_init - Initialize msm shrinker
211 * @dev: drm device
212 *
213 * This function registers and sets up the msm shrinker.
214 */
215void msm_gem_shrinker_init(struct drm_device *dev)
216{
217 struct msm_drm_private *priv = dev->dev_private;
218 priv->shrinker.count_objects = msm_gem_shrinker_count;
219 priv->shrinker.scan_objects = msm_gem_shrinker_scan;
220 priv->shrinker.seeks = DEFAULT_SEEKS;
221 WARN_ON(register_shrinker(&priv->shrinker));
222
223 priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
224 WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
225}
226
227/**
228 * msm_gem_shrinker_cleanup - Clean up msm shrinker
229 * @dev: drm device
230 *
231 * This function unregisters the msm shrinker.
232 */
233void msm_gem_shrinker_cleanup(struct drm_device *dev)
234{
235 struct msm_drm_private *priv = dev->dev_private;
236
237 if (priv->shrinker.nr_deferred) {
238 WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
239 unregister_shrinker(&priv->shrinker);
240 }
241}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2016 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7#include <linux/vmalloc.h>
8#include <linux/sched/mm.h>
9
10#include "msm_drv.h"
11#include "msm_gem.h"
12#include "msm_gpu.h"
13#include "msm_gpu_trace.h"
14
15/* Default disabled for now until it has some more testing on the different
16 * iommu combinations that can be paired with the driver:
17 */
18static bool enable_eviction = true;
19MODULE_PARM_DESC(enable_eviction, "Enable swappable GEM buffers");
20module_param(enable_eviction, bool, 0600);
21
22static bool can_swap(void)
23{
24 return enable_eviction && get_nr_swap_pages() > 0;
25}
26
27static bool can_block(struct shrink_control *sc)
28{
29 if (!(sc->gfp_mask & __GFP_DIRECT_RECLAIM))
30 return false;
31 return current_is_kswapd() || (sc->gfp_mask & __GFP_RECLAIM);
32}
33
34static unsigned long
35msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
36{
37 struct msm_drm_private *priv = shrinker->private_data;
38 unsigned count = priv->lru.dontneed.count;
39
40 if (can_swap())
41 count += priv->lru.willneed.count;
42
43 return count;
44}
45
46static bool
47purge(struct drm_gem_object *obj)
48{
49 if (!is_purgeable(to_msm_bo(obj)))
50 return false;
51
52 if (msm_gem_active(obj))
53 return false;
54
55 msm_gem_purge(obj);
56
57 return true;
58}
59
60static bool
61evict(struct drm_gem_object *obj)
62{
63 if (is_unevictable(to_msm_bo(obj)))
64 return false;
65
66 if (msm_gem_active(obj))
67 return false;
68
69 msm_gem_evict(obj);
70
71 return true;
72}
73
74static bool
75wait_for_idle(struct drm_gem_object *obj)
76{
77 enum dma_resv_usage usage = dma_resv_usage_rw(true);
78 return dma_resv_wait_timeout(obj->resv, usage, false, 10) > 0;
79}
80
81static bool
82active_purge(struct drm_gem_object *obj)
83{
84 if (!wait_for_idle(obj))
85 return false;
86
87 return purge(obj);
88}
89
90static bool
91active_evict(struct drm_gem_object *obj)
92{
93 if (!wait_for_idle(obj))
94 return false;
95
96 return evict(obj);
97}
98
99static unsigned long
100msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
101{
102 struct msm_drm_private *priv = shrinker->private_data;
103 struct {
104 struct drm_gem_lru *lru;
105 bool (*shrink)(struct drm_gem_object *obj);
106 bool cond;
107 unsigned long freed;
108 unsigned long remaining;
109 } stages[] = {
110 /* Stages of progressively more aggressive/expensive reclaim: */
111 { &priv->lru.dontneed, purge, true },
112 { &priv->lru.willneed, evict, can_swap() },
113 { &priv->lru.dontneed, active_purge, can_block(sc) },
114 { &priv->lru.willneed, active_evict, can_swap() && can_block(sc) },
115 };
116 long nr = sc->nr_to_scan;
117 unsigned long freed = 0;
118 unsigned long remaining = 0;
119
120 for (unsigned i = 0; (nr > 0) && (i < ARRAY_SIZE(stages)); i++) {
121 if (!stages[i].cond)
122 continue;
123 stages[i].freed =
124 drm_gem_lru_scan(stages[i].lru, nr,
125 &stages[i].remaining,
126 stages[i].shrink);
127 nr -= stages[i].freed;
128 freed += stages[i].freed;
129 remaining += stages[i].remaining;
130 }
131
132 if (freed) {
133 trace_msm_gem_shrink(sc->nr_to_scan, stages[0].freed,
134 stages[1].freed, stages[2].freed,
135 stages[3].freed);
136 }
137
138 return (freed > 0 && remaining > 0) ? freed : SHRINK_STOP;
139}
140
141#ifdef CONFIG_DEBUG_FS
142unsigned long
143msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan)
144{
145 struct msm_drm_private *priv = dev->dev_private;
146 struct shrink_control sc = {
147 .nr_to_scan = nr_to_scan,
148 };
149 unsigned long ret = SHRINK_STOP;
150
151 fs_reclaim_acquire(GFP_KERNEL);
152 if (priv->shrinker)
153 ret = msm_gem_shrinker_scan(priv->shrinker, &sc);
154 fs_reclaim_release(GFP_KERNEL);
155
156 return ret;
157}
158#endif
159
160/* since we don't know any better, lets bail after a few
161 * and if necessary the shrinker will be invoked again.
162 * Seems better than unmapping *everything*
163 */
164static const int vmap_shrink_limit = 15;
165
166static bool
167vmap_shrink(struct drm_gem_object *obj)
168{
169 if (!is_vunmapable(to_msm_bo(obj)))
170 return false;
171
172 msm_gem_vunmap(obj);
173
174 return true;
175}
176
177static int
178msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
179{
180 struct msm_drm_private *priv =
181 container_of(nb, struct msm_drm_private, vmap_notifier);
182 struct drm_gem_lru *lrus[] = {
183 &priv->lru.dontneed,
184 &priv->lru.willneed,
185 &priv->lru.pinned,
186 NULL,
187 };
188 unsigned idx, unmapped = 0;
189 unsigned long remaining = 0;
190
191 for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) {
192 unmapped += drm_gem_lru_scan(lrus[idx],
193 vmap_shrink_limit - unmapped,
194 &remaining,
195 vmap_shrink);
196 }
197
198 *(unsigned long *)ptr += unmapped;
199
200 if (unmapped > 0)
201 trace_msm_gem_purge_vmaps(unmapped);
202
203 return NOTIFY_DONE;
204}
205
206/**
207 * msm_gem_shrinker_init - Initialize msm shrinker
208 * @dev: drm device
209 *
210 * This function registers and sets up the msm shrinker.
211 */
212int msm_gem_shrinker_init(struct drm_device *dev)
213{
214 struct msm_drm_private *priv = dev->dev_private;
215
216 priv->shrinker = shrinker_alloc(0, "drm-msm_gem");
217 if (!priv->shrinker)
218 return -ENOMEM;
219
220 priv->shrinker->count_objects = msm_gem_shrinker_count;
221 priv->shrinker->scan_objects = msm_gem_shrinker_scan;
222 priv->shrinker->private_data = priv;
223
224 shrinker_register(priv->shrinker);
225
226 priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
227 WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
228
229 return 0;
230}
231
232/**
233 * msm_gem_shrinker_cleanup - Clean up msm shrinker
234 * @dev: drm device
235 *
236 * This function unregisters the msm shrinker.
237 */
238void msm_gem_shrinker_cleanup(struct drm_device *dev)
239{
240 struct msm_drm_private *priv = dev->dev_private;
241
242 if (priv->shrinker) {
243 WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
244 shrinker_free(priv->shrinker);
245 }
246}