Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2016 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7#include "msm_drv.h"
8#include "msm_gem.h"
9
10static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
11{
12 /* NOTE: we are *closer* to being able to get rid of
13 * mutex_trylock_recursive().. the msm_gem code itself does
14 * not need struct_mutex, although codepaths that can trigger
15 * shrinker are still called in code-paths that hold the
16 * struct_mutex.
17 *
18 * Also, msm_obj->madv is protected by struct_mutex.
19 *
20 * The next step is probably split out a seperate lock for
21 * protecting inactive_list, so that shrinker does not need
22 * struct_mutex.
23 */
24 switch (mutex_trylock_recursive(&dev->struct_mutex)) {
25 case MUTEX_TRYLOCK_FAILED:
26 return false;
27
28 case MUTEX_TRYLOCK_SUCCESS:
29 *unlock = true;
30 return true;
31
32 case MUTEX_TRYLOCK_RECURSIVE:
33 *unlock = false;
34 return true;
35 }
36
37 BUG();
38}
39
40static unsigned long
41msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
42{
43 struct msm_drm_private *priv =
44 container_of(shrinker, struct msm_drm_private, shrinker);
45 struct drm_device *dev = priv->dev;
46 struct msm_gem_object *msm_obj;
47 unsigned long count = 0;
48 bool unlock;
49
50 if (!msm_gem_shrinker_lock(dev, &unlock))
51 return 0;
52
53 list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
54 if (is_purgeable(msm_obj))
55 count += msm_obj->base.size >> PAGE_SHIFT;
56 }
57
58 if (unlock)
59 mutex_unlock(&dev->struct_mutex);
60
61 return count;
62}
63
64static unsigned long
65msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
66{
67 struct msm_drm_private *priv =
68 container_of(shrinker, struct msm_drm_private, shrinker);
69 struct drm_device *dev = priv->dev;
70 struct msm_gem_object *msm_obj;
71 unsigned long freed = 0;
72 bool unlock;
73
74 if (!msm_gem_shrinker_lock(dev, &unlock))
75 return SHRINK_STOP;
76
77 list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
78 if (freed >= sc->nr_to_scan)
79 break;
80 if (is_purgeable(msm_obj)) {
81 msm_gem_purge(&msm_obj->base, OBJ_LOCK_SHRINKER);
82 freed += msm_obj->base.size >> PAGE_SHIFT;
83 }
84 }
85
86 if (unlock)
87 mutex_unlock(&dev->struct_mutex);
88
89 if (freed > 0)
90 pr_info_ratelimited("Purging %lu bytes\n", freed << PAGE_SHIFT);
91
92 return freed;
93}
94
95static int
96msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
97{
98 struct msm_drm_private *priv =
99 container_of(nb, struct msm_drm_private, vmap_notifier);
100 struct drm_device *dev = priv->dev;
101 struct msm_gem_object *msm_obj;
102 unsigned unmapped = 0;
103 bool unlock;
104
105 if (!msm_gem_shrinker_lock(dev, &unlock))
106 return NOTIFY_DONE;
107
108 list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
109 if (is_vunmapable(msm_obj)) {
110 msm_gem_vunmap(&msm_obj->base, OBJ_LOCK_SHRINKER);
111 /* since we don't know any better, lets bail after a few
112 * and if necessary the shrinker will be invoked again.
113 * Seems better than unmapping *everything*
114 */
115 if (++unmapped >= 15)
116 break;
117 }
118 }
119
120 if (unlock)
121 mutex_unlock(&dev->struct_mutex);
122
123 *(unsigned long *)ptr += unmapped;
124
125 if (unmapped > 0)
126 pr_info_ratelimited("Purging %u vmaps\n", unmapped);
127
128 return NOTIFY_DONE;
129}
130
131/**
132 * msm_gem_shrinker_init - Initialize msm shrinker
133 * @dev_priv: msm device
134 *
135 * This function registers and sets up the msm shrinker.
136 */
137void msm_gem_shrinker_init(struct drm_device *dev)
138{
139 struct msm_drm_private *priv = dev->dev_private;
140 priv->shrinker.count_objects = msm_gem_shrinker_count;
141 priv->shrinker.scan_objects = msm_gem_shrinker_scan;
142 priv->shrinker.seeks = DEFAULT_SEEKS;
143 WARN_ON(register_shrinker(&priv->shrinker));
144
145 priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
146 WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
147}
148
149/**
150 * msm_gem_shrinker_cleanup - Clean up msm shrinker
151 * @dev_priv: msm device
152 *
153 * This function unregisters the msm shrinker.
154 */
155void msm_gem_shrinker_cleanup(struct drm_device *dev)
156{
157 struct msm_drm_private *priv = dev->dev_private;
158
159 if (priv->shrinker.nr_deferred) {
160 WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
161 unregister_shrinker(&priv->shrinker);
162 }
163}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2016 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7#include <linux/vmalloc.h>
8#include <linux/sched/mm.h>
9
10#include "msm_drv.h"
11#include "msm_gem.h"
12#include "msm_gpu.h"
13#include "msm_gpu_trace.h"
14
15/* Default disabled for now until it has some more testing on the different
16 * iommu combinations that can be paired with the driver:
17 */
18static bool enable_eviction = true;
19MODULE_PARM_DESC(enable_eviction, "Enable swappable GEM buffers");
20module_param(enable_eviction, bool, 0600);
21
22static bool can_swap(void)
23{
24 return enable_eviction && get_nr_swap_pages() > 0;
25}
26
27static bool can_block(struct shrink_control *sc)
28{
29 if (!(sc->gfp_mask & __GFP_DIRECT_RECLAIM))
30 return false;
31 return current_is_kswapd() || (sc->gfp_mask & __GFP_RECLAIM);
32}
33
34static unsigned long
35msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
36{
37 struct msm_drm_private *priv =
38 container_of(shrinker, struct msm_drm_private, shrinker);
39 unsigned count = priv->lru.dontneed.count;
40
41 if (can_swap())
42 count += priv->lru.willneed.count;
43
44 return count;
45}
46
47static bool
48purge(struct drm_gem_object *obj)
49{
50 if (!is_purgeable(to_msm_bo(obj)))
51 return false;
52
53 if (msm_gem_active(obj))
54 return false;
55
56 msm_gem_purge(obj);
57
58 return true;
59}
60
61static bool
62evict(struct drm_gem_object *obj)
63{
64 if (is_unevictable(to_msm_bo(obj)))
65 return false;
66
67 if (msm_gem_active(obj))
68 return false;
69
70 msm_gem_evict(obj);
71
72 return true;
73}
74
75static bool
76wait_for_idle(struct drm_gem_object *obj)
77{
78 enum dma_resv_usage usage = dma_resv_usage_rw(true);
79 return dma_resv_wait_timeout(obj->resv, usage, false, 1000) > 0;
80}
81
82static bool
83active_purge(struct drm_gem_object *obj)
84{
85 if (!wait_for_idle(obj))
86 return false;
87
88 return purge(obj);
89}
90
91static bool
92active_evict(struct drm_gem_object *obj)
93{
94 if (!wait_for_idle(obj))
95 return false;
96
97 return evict(obj);
98}
99
100static unsigned long
101msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
102{
103 struct msm_drm_private *priv =
104 container_of(shrinker, struct msm_drm_private, shrinker);
105 struct {
106 struct drm_gem_lru *lru;
107 bool (*shrink)(struct drm_gem_object *obj);
108 bool cond;
109 unsigned long freed;
110 } stages[] = {
111 /* Stages of progressively more aggressive/expensive reclaim: */
112 { &priv->lru.dontneed, purge, true },
113 { &priv->lru.willneed, evict, can_swap() },
114 { &priv->lru.dontneed, active_purge, can_block(sc) },
115 { &priv->lru.willneed, active_evict, can_swap() && can_block(sc) },
116 };
117 long nr = sc->nr_to_scan;
118 unsigned long freed = 0;
119
120 for (unsigned i = 0; (nr > 0) && (i < ARRAY_SIZE(stages)); i++) {
121 if (!stages[i].cond)
122 continue;
123 stages[i].freed =
124 drm_gem_lru_scan(stages[i].lru, nr, stages[i].shrink);
125 nr -= stages[i].freed;
126 freed += stages[i].freed;
127 }
128
129 if (freed) {
130 trace_msm_gem_shrink(sc->nr_to_scan, stages[0].freed,
131 stages[1].freed, stages[2].freed,
132 stages[3].freed);
133 }
134
135 return (freed > 0) ? freed : SHRINK_STOP;
136}
137
138#ifdef CONFIG_DEBUG_FS
139unsigned long
140msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan)
141{
142 struct msm_drm_private *priv = dev->dev_private;
143 struct shrink_control sc = {
144 .nr_to_scan = nr_to_scan,
145 };
146 int ret;
147
148 fs_reclaim_acquire(GFP_KERNEL);
149 ret = msm_gem_shrinker_scan(&priv->shrinker, &sc);
150 fs_reclaim_release(GFP_KERNEL);
151
152 return ret;
153}
154#endif
155
156/* since we don't know any better, lets bail after a few
157 * and if necessary the shrinker will be invoked again.
158 * Seems better than unmapping *everything*
159 */
160static const int vmap_shrink_limit = 15;
161
162static bool
163vmap_shrink(struct drm_gem_object *obj)
164{
165 if (!is_vunmapable(to_msm_bo(obj)))
166 return false;
167
168 msm_gem_vunmap(obj);
169
170 return true;
171}
172
173static int
174msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
175{
176 struct msm_drm_private *priv =
177 container_of(nb, struct msm_drm_private, vmap_notifier);
178 struct drm_gem_lru *lrus[] = {
179 &priv->lru.dontneed,
180 &priv->lru.willneed,
181 &priv->lru.pinned,
182 NULL,
183 };
184 unsigned idx, unmapped = 0;
185
186 for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) {
187 unmapped += drm_gem_lru_scan(lrus[idx],
188 vmap_shrink_limit - unmapped,
189 vmap_shrink);
190 }
191
192 *(unsigned long *)ptr += unmapped;
193
194 if (unmapped > 0)
195 trace_msm_gem_purge_vmaps(unmapped);
196
197 return NOTIFY_DONE;
198}
199
200/**
201 * msm_gem_shrinker_init - Initialize msm shrinker
202 * @dev: drm device
203 *
204 * This function registers and sets up the msm shrinker.
205 */
206void msm_gem_shrinker_init(struct drm_device *dev)
207{
208 struct msm_drm_private *priv = dev->dev_private;
209 priv->shrinker.count_objects = msm_gem_shrinker_count;
210 priv->shrinker.scan_objects = msm_gem_shrinker_scan;
211 priv->shrinker.seeks = DEFAULT_SEEKS;
212 WARN_ON(register_shrinker(&priv->shrinker, "drm-msm_gem"));
213
214 priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
215 WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
216}
217
218/**
219 * msm_gem_shrinker_cleanup - Clean up msm shrinker
220 * @dev: drm device
221 *
222 * This function unregisters the msm shrinker.
223 */
224void msm_gem_shrinker_cleanup(struct drm_device *dev)
225{
226 struct msm_drm_private *priv = dev->dev_private;
227
228 if (priv->shrinker.nr_deferred) {
229 WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
230 unregister_shrinker(&priv->shrinker);
231 }
232}