Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2016 Red Hat
  4 * Author: Rob Clark <robdclark@gmail.com>
  5 */
  6
 
 
 
  7#include "msm_drv.h"
  8#include "msm_gem.h"
 
 
 
 
 
 
 
 
 
  9
 10static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
 11{
 12	/* NOTE: we are *closer* to being able to get rid of
 13	 * mutex_trylock_recursive().. the msm_gem code itself does
 14	 * not need struct_mutex, although codepaths that can trigger
 15	 * shrinker are still called in code-paths that hold the
 16	 * struct_mutex.
 17	 *
 18	 * Also, msm_obj->madv is protected by struct_mutex.
 19	 *
 20	 * The next step is probably split out a seperate lock for
 21	 * protecting inactive_list, so that shrinker does not need
 22	 * struct_mutex.
 23	 */
 24	switch (mutex_trylock_recursive(&dev->struct_mutex)) {
 25	case MUTEX_TRYLOCK_FAILED:
 26		return false;
 27
 28	case MUTEX_TRYLOCK_SUCCESS:
 29		*unlock = true;
 30		return true;
 31
 32	case MUTEX_TRYLOCK_RECURSIVE:
 33		*unlock = false;
 34		return true;
 35	}
 36
 37	BUG();
 
 
 
 
 38}
 39
 40static unsigned long
 41msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
 42{
 43	struct msm_drm_private *priv =
 44		container_of(shrinker, struct msm_drm_private, shrinker);
 45	struct drm_device *dev = priv->dev;
 46	struct msm_gem_object *msm_obj;
 47	unsigned long count = 0;
 48	bool unlock;
 49
 50	if (!msm_gem_shrinker_lock(dev, &unlock))
 51		return 0;
 52
 53	list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
 54		if (is_purgeable(msm_obj))
 55			count += msm_obj->base.size >> PAGE_SHIFT;
 56	}
 57
 58	if (unlock)
 59		mutex_unlock(&dev->struct_mutex);
 60
 61	return count;
 62}
 63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 64static unsigned long
 65msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
 66{
 67	struct msm_drm_private *priv =
 68		container_of(shrinker, struct msm_drm_private, shrinker);
 69	struct drm_device *dev = priv->dev;
 70	struct msm_gem_object *msm_obj;
 
 
 
 
 
 
 
 
 
 
 
 71	unsigned long freed = 0;
 72	bool unlock;
 73
 74	if (!msm_gem_shrinker_lock(dev, &unlock))
 75		return SHRINK_STOP;
 
 
 
 
 
 
 
 
 
 76
 77	list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
 78		if (freed >= sc->nr_to_scan)
 79			break;
 80		if (is_purgeable(msm_obj)) {
 81			msm_gem_purge(&msm_obj->base, OBJ_LOCK_SHRINKER);
 82			freed += msm_obj->base.size >> PAGE_SHIFT;
 83		}
 84	}
 85
 86	if (unlock)
 87		mutex_unlock(&dev->struct_mutex);
 88
 89	if (freed > 0)
 90		pr_info_ratelimited("Purging %lu bytes\n", freed << PAGE_SHIFT);
 
 
 
 
 
 
 
 
 
 
 
 
 91
 92	return freed;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 93}
 94
 95static int
 96msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
 97{
 98	struct msm_drm_private *priv =
 99		container_of(nb, struct msm_drm_private, vmap_notifier);
100	struct drm_device *dev = priv->dev;
101	struct msm_gem_object *msm_obj;
102	unsigned unmapped = 0;
103	bool unlock;
104
105	if (!msm_gem_shrinker_lock(dev, &unlock))
106		return NOTIFY_DONE;
107
108	list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
109		if (is_vunmapable(msm_obj)) {
110			msm_gem_vunmap(&msm_obj->base, OBJ_LOCK_SHRINKER);
111			/* since we don't know any better, lets bail after a few
112			 * and if necessary the shrinker will be invoked again.
113			 * Seems better than unmapping *everything*
114			 */
115			if (++unmapped >= 15)
116				break;
117		}
118	}
119
120	if (unlock)
121		mutex_unlock(&dev->struct_mutex);
122
123	*(unsigned long *)ptr += unmapped;
124
125	if (unmapped > 0)
126		pr_info_ratelimited("Purging %u vmaps\n", unmapped);
127
128	return NOTIFY_DONE;
129}
130
131/**
132 * msm_gem_shrinker_init - Initialize msm shrinker
133 * @dev_priv: msm device
134 *
135 * This function registers and sets up the msm shrinker.
136 */
137void msm_gem_shrinker_init(struct drm_device *dev)
138{
139	struct msm_drm_private *priv = dev->dev_private;
140	priv->shrinker.count_objects = msm_gem_shrinker_count;
141	priv->shrinker.scan_objects = msm_gem_shrinker_scan;
142	priv->shrinker.seeks = DEFAULT_SEEKS;
143	WARN_ON(register_shrinker(&priv->shrinker));
 
 
 
 
 
 
144
145	priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
146	WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
 
 
147}
148
149/**
150 * msm_gem_shrinker_cleanup - Clean up msm shrinker
151 * @dev_priv: msm device
152 *
153 * This function unregisters the msm shrinker.
154 */
155void msm_gem_shrinker_cleanup(struct drm_device *dev)
156{
157	struct msm_drm_private *priv = dev->dev_private;
158
159	if (priv->shrinker.nr_deferred) {
160		WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
161		unregister_shrinker(&priv->shrinker);
162	}
163}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2016 Red Hat
  4 * Author: Rob Clark <robdclark@gmail.com>
  5 */
  6
  7#include <linux/vmalloc.h>
  8#include <linux/sched/mm.h>
  9
 10#include "msm_drv.h"
 11#include "msm_gem.h"
 12#include "msm_gpu.h"
 13#include "msm_gpu_trace.h"
 14
 15/* Default disabled for now until it has some more testing on the different
 16 * iommu combinations that can be paired with the driver:
 17 */
 18static bool enable_eviction = true;
 19MODULE_PARM_DESC(enable_eviction, "Enable swappable GEM buffers");
 20module_param(enable_eviction, bool, 0600);
 21
 22static bool can_swap(void)
 23{
 24	return enable_eviction && get_nr_swap_pages() > 0;
 25}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 26
 27static bool can_block(struct shrink_control *sc)
 28{
 29	if (!(sc->gfp_mask & __GFP_DIRECT_RECLAIM))
 30		return false;
 31	return current_is_kswapd() || (sc->gfp_mask & __GFP_RECLAIM);
 32}
 33
 34static unsigned long
 35msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
 36{
 37	struct msm_drm_private *priv = shrinker->private_data;
 38	unsigned count = priv->lru.dontneed.count;
 
 
 
 
 
 
 
 
 
 
 
 
 39
 40	if (can_swap())
 41		count += priv->lru.willneed.count;
 42
 43	return count;
 44}
 45
 46static bool
 47purge(struct drm_gem_object *obj)
 48{
 49	if (!is_purgeable(to_msm_bo(obj)))
 50		return false;
 51
 52	if (msm_gem_active(obj))
 53		return false;
 54
 55	msm_gem_purge(obj);
 56
 57	return true;
 58}
 59
 60static bool
 61evict(struct drm_gem_object *obj)
 62{
 63	if (is_unevictable(to_msm_bo(obj)))
 64		return false;
 65
 66	if (msm_gem_active(obj))
 67		return false;
 68
 69	msm_gem_evict(obj);
 70
 71	return true;
 72}
 73
 74static bool
 75wait_for_idle(struct drm_gem_object *obj)
 76{
 77	enum dma_resv_usage usage = dma_resv_usage_rw(true);
 78	return dma_resv_wait_timeout(obj->resv, usage, false, 10) > 0;
 79}
 80
 81static bool
 82active_purge(struct drm_gem_object *obj)
 83{
 84	if (!wait_for_idle(obj))
 85		return false;
 86
 87	return purge(obj);
 88}
 89
 90static bool
 91active_evict(struct drm_gem_object *obj)
 92{
 93	if (!wait_for_idle(obj))
 94		return false;
 95
 96	return evict(obj);
 97}
 98
 99static unsigned long
100msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
101{
102	struct msm_drm_private *priv = shrinker->private_data;
103	struct {
104		struct drm_gem_lru *lru;
105		bool (*shrink)(struct drm_gem_object *obj);
106		bool cond;
107		unsigned long freed;
108		unsigned long remaining;
109	} stages[] = {
110		/* Stages of progressively more aggressive/expensive reclaim: */
111		{ &priv->lru.dontneed, purge,        true },
112		{ &priv->lru.willneed, evict,        can_swap() },
113		{ &priv->lru.dontneed, active_purge, can_block(sc) },
114		{ &priv->lru.willneed, active_evict, can_swap() && can_block(sc) },
115	};
116	long nr = sc->nr_to_scan;
117	unsigned long freed = 0;
118	unsigned long remaining = 0;
119
120	for (unsigned i = 0; (nr > 0) && (i < ARRAY_SIZE(stages)); i++) {
121		if (!stages[i].cond)
122			continue;
123		stages[i].freed =
124			drm_gem_lru_scan(stages[i].lru, nr,
125					&stages[i].remaining,
126					 stages[i].shrink);
127		nr -= stages[i].freed;
128		freed += stages[i].freed;
129		remaining += stages[i].remaining;
130	}
131
132	if (freed) {
133		trace_msm_gem_shrink(sc->nr_to_scan, stages[0].freed,
134				     stages[1].freed, stages[2].freed,
135				     stages[3].freed);
 
 
 
136	}
137
138	return (freed > 0 && remaining > 0) ? freed : SHRINK_STOP;
139}
140
141#ifdef CONFIG_DEBUG_FS
142unsigned long
143msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan)
144{
145	struct msm_drm_private *priv = dev->dev_private;
146	struct shrink_control sc = {
147		.nr_to_scan = nr_to_scan,
148	};
149	unsigned long ret = SHRINK_STOP;
150
151	fs_reclaim_acquire(GFP_KERNEL);
152	if (priv->shrinker)
153		ret = msm_gem_shrinker_scan(priv->shrinker, &sc);
154	fs_reclaim_release(GFP_KERNEL);
155
156	return ret;
157}
158#endif
159
160/* since we don't know any better, lets bail after a few
161 * and if necessary the shrinker will be invoked again.
162 * Seems better than unmapping *everything*
163 */
164static const int vmap_shrink_limit = 15;
165
166static bool
167vmap_shrink(struct drm_gem_object *obj)
168{
169	if (!is_vunmapable(to_msm_bo(obj)))
170		return false;
171
172	msm_gem_vunmap(obj);
173
174	return true;
175}
176
177static int
178msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
179{
180	struct msm_drm_private *priv =
181		container_of(nb, struct msm_drm_private, vmap_notifier);
182	struct drm_gem_lru *lrus[] = {
183		&priv->lru.dontneed,
184		&priv->lru.willneed,
185		&priv->lru.pinned,
186		NULL,
187	};
188	unsigned idx, unmapped = 0;
189	unsigned long remaining = 0;
190
191	for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) {
192		unmapped += drm_gem_lru_scan(lrus[idx],
193					     vmap_shrink_limit - unmapped,
194					     &remaining,
195					     vmap_shrink);
 
 
 
 
196	}
197
 
 
 
198	*(unsigned long *)ptr += unmapped;
199
200	if (unmapped > 0)
201		trace_msm_gem_purge_vmaps(unmapped);
202
203	return NOTIFY_DONE;
204}
205
206/**
207 * msm_gem_shrinker_init - Initialize msm shrinker
208 * @dev: drm device
209 *
210 * This function registers and sets up the msm shrinker.
211 */
212int msm_gem_shrinker_init(struct drm_device *dev)
213{
214	struct msm_drm_private *priv = dev->dev_private;
215
216	priv->shrinker = shrinker_alloc(0, "drm-msm_gem");
217	if (!priv->shrinker)
218		return -ENOMEM;
219
220	priv->shrinker->count_objects = msm_gem_shrinker_count;
221	priv->shrinker->scan_objects = msm_gem_shrinker_scan;
222	priv->shrinker->private_data = priv;
223
224	shrinker_register(priv->shrinker);
225
226	priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
227	WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
228
229	return 0;
230}
231
232/**
233 * msm_gem_shrinker_cleanup - Clean up msm shrinker
234 * @dev: drm device
235 *
236 * This function unregisters the msm shrinker.
237 */
238void msm_gem_shrinker_cleanup(struct drm_device *dev)
239{
240	struct msm_drm_private *priv = dev->dev_private;
241
242	if (priv->shrinker) {
243		WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
244		shrinker_free(priv->shrinker);
245	}
246}