Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2016 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7#include <linux/vmalloc.h>
8#include <linux/sched/mm.h>
9
10#include "msm_drv.h"
11#include "msm_gem.h"
12#include "msm_gpu.h"
13#include "msm_gpu_trace.h"
14
15/* Default disabled for now until it has some more testing on the different
16 * iommu combinations that can be paired with the driver:
17 */
18static bool enable_eviction = true;
19MODULE_PARM_DESC(enable_eviction, "Enable swappable GEM buffers");
20module_param(enable_eviction, bool, 0600);
21
22static bool can_swap(void)
23{
24 return enable_eviction && get_nr_swap_pages() > 0;
25}
26
27static bool can_block(struct shrink_control *sc)
28{
29 if (!(sc->gfp_mask & __GFP_DIRECT_RECLAIM))
30 return false;
31 return current_is_kswapd() || (sc->gfp_mask & __GFP_RECLAIM);
32}
33
34static unsigned long
35msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
36{
37 struct msm_drm_private *priv = shrinker->private_data;
38 unsigned count = priv->lru.dontneed.count;
39
40 if (can_swap())
41 count += priv->lru.willneed.count;
42
43 return count;
44}
45
46static bool
47purge(struct drm_gem_object *obj)
48{
49 if (!is_purgeable(to_msm_bo(obj)))
50 return false;
51
52 if (msm_gem_active(obj))
53 return false;
54
55 msm_gem_purge(obj);
56
57 return true;
58}
59
60static bool
61evict(struct drm_gem_object *obj)
62{
63 if (is_unevictable(to_msm_bo(obj)))
64 return false;
65
66 if (msm_gem_active(obj))
67 return false;
68
69 msm_gem_evict(obj);
70
71 return true;
72}
73
74static bool
75wait_for_idle(struct drm_gem_object *obj)
76{
77 enum dma_resv_usage usage = dma_resv_usage_rw(true);
78 return dma_resv_wait_timeout(obj->resv, usage, false, 10) > 0;
79}
80
81static bool
82active_purge(struct drm_gem_object *obj)
83{
84 if (!wait_for_idle(obj))
85 return false;
86
87 return purge(obj);
88}
89
90static bool
91active_evict(struct drm_gem_object *obj)
92{
93 if (!wait_for_idle(obj))
94 return false;
95
96 return evict(obj);
97}
98
99static unsigned long
100msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
101{
102 struct msm_drm_private *priv = shrinker->private_data;
103 struct {
104 struct drm_gem_lru *lru;
105 bool (*shrink)(struct drm_gem_object *obj);
106 bool cond;
107 unsigned long freed;
108 unsigned long remaining;
109 } stages[] = {
110 /* Stages of progressively more aggressive/expensive reclaim: */
111 { &priv->lru.dontneed, purge, true },
112 { &priv->lru.willneed, evict, can_swap() },
113 { &priv->lru.dontneed, active_purge, can_block(sc) },
114 { &priv->lru.willneed, active_evict, can_swap() && can_block(sc) },
115 };
116 long nr = sc->nr_to_scan;
117 unsigned long freed = 0;
118 unsigned long remaining = 0;
119
120 for (unsigned i = 0; (nr > 0) && (i < ARRAY_SIZE(stages)); i++) {
121 if (!stages[i].cond)
122 continue;
123 stages[i].freed =
124 drm_gem_lru_scan(stages[i].lru, nr,
125 &stages[i].remaining,
126 stages[i].shrink);
127 nr -= stages[i].freed;
128 freed += stages[i].freed;
129 remaining += stages[i].remaining;
130 }
131
132 if (freed) {
133 trace_msm_gem_shrink(sc->nr_to_scan, stages[0].freed,
134 stages[1].freed, stages[2].freed,
135 stages[3].freed);
136 }
137
138 return (freed > 0 && remaining > 0) ? freed : SHRINK_STOP;
139}
140
141#ifdef CONFIG_DEBUG_FS
142unsigned long
143msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan)
144{
145 struct msm_drm_private *priv = dev->dev_private;
146 struct shrink_control sc = {
147 .nr_to_scan = nr_to_scan,
148 };
149 unsigned long ret = SHRINK_STOP;
150
151 fs_reclaim_acquire(GFP_KERNEL);
152 if (priv->shrinker)
153 ret = msm_gem_shrinker_scan(priv->shrinker, &sc);
154 fs_reclaim_release(GFP_KERNEL);
155
156 return ret;
157}
158#endif
159
160/* since we don't know any better, lets bail after a few
161 * and if necessary the shrinker will be invoked again.
162 * Seems better than unmapping *everything*
163 */
164static const int vmap_shrink_limit = 15;
165
166static bool
167vmap_shrink(struct drm_gem_object *obj)
168{
169 if (!is_vunmapable(to_msm_bo(obj)))
170 return false;
171
172 msm_gem_vunmap(obj);
173
174 return true;
175}
176
177static int
178msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
179{
180 struct msm_drm_private *priv =
181 container_of(nb, struct msm_drm_private, vmap_notifier);
182 struct drm_gem_lru *lrus[] = {
183 &priv->lru.dontneed,
184 &priv->lru.willneed,
185 &priv->lru.pinned,
186 NULL,
187 };
188 unsigned idx, unmapped = 0;
189 unsigned long remaining = 0;
190
191 for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) {
192 unmapped += drm_gem_lru_scan(lrus[idx],
193 vmap_shrink_limit - unmapped,
194 &remaining,
195 vmap_shrink);
196 }
197
198 *(unsigned long *)ptr += unmapped;
199
200 if (unmapped > 0)
201 trace_msm_gem_purge_vmaps(unmapped);
202
203 return NOTIFY_DONE;
204}
205
206/**
207 * msm_gem_shrinker_init - Initialize msm shrinker
208 * @dev: drm device
209 *
210 * This function registers and sets up the msm shrinker.
211 */
212int msm_gem_shrinker_init(struct drm_device *dev)
213{
214 struct msm_drm_private *priv = dev->dev_private;
215
216 priv->shrinker = shrinker_alloc(0, "drm-msm_gem");
217 if (!priv->shrinker)
218 return -ENOMEM;
219
220 priv->shrinker->count_objects = msm_gem_shrinker_count;
221 priv->shrinker->scan_objects = msm_gem_shrinker_scan;
222 priv->shrinker->private_data = priv;
223
224 shrinker_register(priv->shrinker);
225
226 priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
227 WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
228
229 return 0;
230}
231
232/**
233 * msm_gem_shrinker_cleanup - Clean up msm shrinker
234 * @dev: drm device
235 *
236 * This function unregisters the msm shrinker.
237 */
238void msm_gem_shrinker_cleanup(struct drm_device *dev)
239{
240 struct msm_drm_private *priv = dev->dev_private;
241
242 if (priv->shrinker) {
243 WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
244 shrinker_free(priv->shrinker);
245 }
246}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2016 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7#include <linux/vmalloc.h>
8#include <linux/sched/mm.h>
9
10#include "msm_drv.h"
11#include "msm_gem.h"
12#include "msm_gpu.h"
13#include "msm_gpu_trace.h"
14
15/* Default disabled for now until it has some more testing on the different
16 * iommu combinations that can be paired with the driver:
17 */
18static bool enable_eviction = true;
19MODULE_PARM_DESC(enable_eviction, "Enable swappable GEM buffers");
20module_param(enable_eviction, bool, 0600);
21
22static bool can_swap(void)
23{
24 return enable_eviction && get_nr_swap_pages() > 0;
25}
26
27static bool can_block(struct shrink_control *sc)
28{
29 if (!(sc->gfp_mask & __GFP_DIRECT_RECLAIM))
30 return false;
31 return current_is_kswapd() || (sc->gfp_mask & __GFP_RECLAIM);
32}
33
34static unsigned long
35msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
36{
37 struct msm_drm_private *priv =
38 container_of(shrinker, struct msm_drm_private, shrinker);
39 unsigned count = priv->lru.dontneed.count;
40
41 if (can_swap())
42 count += priv->lru.willneed.count;
43
44 return count;
45}
46
47static bool
48purge(struct drm_gem_object *obj)
49{
50 if (!is_purgeable(to_msm_bo(obj)))
51 return false;
52
53 if (msm_gem_active(obj))
54 return false;
55
56 msm_gem_purge(obj);
57
58 return true;
59}
60
61static bool
62evict(struct drm_gem_object *obj)
63{
64 if (is_unevictable(to_msm_bo(obj)))
65 return false;
66
67 if (msm_gem_active(obj))
68 return false;
69
70 msm_gem_evict(obj);
71
72 return true;
73}
74
75static bool
76wait_for_idle(struct drm_gem_object *obj)
77{
78 enum dma_resv_usage usage = dma_resv_usage_rw(true);
79 return dma_resv_wait_timeout(obj->resv, usage, false, 1000) > 0;
80}
81
82static bool
83active_purge(struct drm_gem_object *obj)
84{
85 if (!wait_for_idle(obj))
86 return false;
87
88 return purge(obj);
89}
90
91static bool
92active_evict(struct drm_gem_object *obj)
93{
94 if (!wait_for_idle(obj))
95 return false;
96
97 return evict(obj);
98}
99
100static unsigned long
101msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
102{
103 struct msm_drm_private *priv =
104 container_of(shrinker, struct msm_drm_private, shrinker);
105 struct {
106 struct drm_gem_lru *lru;
107 bool (*shrink)(struct drm_gem_object *obj);
108 bool cond;
109 unsigned long freed;
110 } stages[] = {
111 /* Stages of progressively more aggressive/expensive reclaim: */
112 { &priv->lru.dontneed, purge, true },
113 { &priv->lru.willneed, evict, can_swap() },
114 { &priv->lru.dontneed, active_purge, can_block(sc) },
115 { &priv->lru.willneed, active_evict, can_swap() && can_block(sc) },
116 };
117 long nr = sc->nr_to_scan;
118 unsigned long freed = 0;
119
120 for (unsigned i = 0; (nr > 0) && (i < ARRAY_SIZE(stages)); i++) {
121 if (!stages[i].cond)
122 continue;
123 stages[i].freed =
124 drm_gem_lru_scan(stages[i].lru, nr, stages[i].shrink);
125 nr -= stages[i].freed;
126 freed += stages[i].freed;
127 }
128
129 if (freed) {
130 trace_msm_gem_shrink(sc->nr_to_scan, stages[0].freed,
131 stages[1].freed, stages[2].freed,
132 stages[3].freed);
133 }
134
135 return (freed > 0) ? freed : SHRINK_STOP;
136}
137
138#ifdef CONFIG_DEBUG_FS
139unsigned long
140msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan)
141{
142 struct msm_drm_private *priv = dev->dev_private;
143 struct shrink_control sc = {
144 .nr_to_scan = nr_to_scan,
145 };
146 int ret;
147
148 fs_reclaim_acquire(GFP_KERNEL);
149 ret = msm_gem_shrinker_scan(&priv->shrinker, &sc);
150 fs_reclaim_release(GFP_KERNEL);
151
152 return ret;
153}
154#endif
155
156/* since we don't know any better, lets bail after a few
157 * and if necessary the shrinker will be invoked again.
158 * Seems better than unmapping *everything*
159 */
160static const int vmap_shrink_limit = 15;
161
162static bool
163vmap_shrink(struct drm_gem_object *obj)
164{
165 if (!is_vunmapable(to_msm_bo(obj)))
166 return false;
167
168 msm_gem_vunmap(obj);
169
170 return true;
171}
172
173static int
174msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
175{
176 struct msm_drm_private *priv =
177 container_of(nb, struct msm_drm_private, vmap_notifier);
178 struct drm_gem_lru *lrus[] = {
179 &priv->lru.dontneed,
180 &priv->lru.willneed,
181 &priv->lru.pinned,
182 NULL,
183 };
184 unsigned idx, unmapped = 0;
185
186 for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) {
187 unmapped += drm_gem_lru_scan(lrus[idx],
188 vmap_shrink_limit - unmapped,
189 vmap_shrink);
190 }
191
192 *(unsigned long *)ptr += unmapped;
193
194 if (unmapped > 0)
195 trace_msm_gem_purge_vmaps(unmapped);
196
197 return NOTIFY_DONE;
198}
199
200/**
201 * msm_gem_shrinker_init - Initialize msm shrinker
202 * @dev: drm device
203 *
204 * This function registers and sets up the msm shrinker.
205 */
206void msm_gem_shrinker_init(struct drm_device *dev)
207{
208 struct msm_drm_private *priv = dev->dev_private;
209 priv->shrinker.count_objects = msm_gem_shrinker_count;
210 priv->shrinker.scan_objects = msm_gem_shrinker_scan;
211 priv->shrinker.seeks = DEFAULT_SEEKS;
212 WARN_ON(register_shrinker(&priv->shrinker, "drm-msm_gem"));
213
214 priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
215 WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
216}
217
218/**
219 * msm_gem_shrinker_cleanup - Clean up msm shrinker
220 * @dev: drm device
221 *
222 * This function unregisters the msm shrinker.
223 */
224void msm_gem_shrinker_cleanup(struct drm_device *dev)
225{
226 struct msm_drm_private *priv = dev->dev_private;
227
228 if (priv->shrinker.nr_deferred) {
229 WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
230 unregister_shrinker(&priv->shrinker);
231 }
232}