Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 * Copyright (C) 2016 Red Hat
  3 * Author: Rob Clark <robdclark@gmail.com>
  4 *
  5 * This program is free software; you can redistribute it and/or modify it
  6 * under the terms of the GNU General Public License version 2 as published by
  7 * the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it will be useful, but WITHOUT
 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 12 * more details.
 13 *
 14 * You should have received a copy of the GNU General Public License along with
 15 * this program.  If not, see <http://www.gnu.org/licenses/>.
 16 */
 17
 
 
 
 18#include "msm_drv.h"
 19#include "msm_gem.h"
 
 
 20
 21static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
 
 
 
 
 
 
 
 22{
 23	/* NOTE: we are *closer* to being able to get rid of
 24	 * mutex_trylock_recursive().. the msm_gem code itself does
 25	 * not need struct_mutex, although codepaths that can trigger
 26	 * shrinker are still called in code-paths that hold the
 27	 * struct_mutex.
 28	 *
 29	 * Also, msm_obj->madv is protected by struct_mutex.
 30	 *
 31	 * The next step is probably split out a seperate lock for
 32	 * protecting inactive_list, so that shrinker does not need
 33	 * struct_mutex.
 34	 */
 35	switch (mutex_trylock_recursive(&dev->struct_mutex)) {
 36	case MUTEX_TRYLOCK_FAILED:
 37		return false;
 38
 39	case MUTEX_TRYLOCK_SUCCESS:
 40		*unlock = true;
 41		return true;
 42
 43	case MUTEX_TRYLOCK_RECURSIVE:
 44		*unlock = false;
 45		return true;
 46	}
 47
 48	BUG();
 
 
 
 
 49}
 50
 51static unsigned long
 52msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
 53{
 54	struct msm_drm_private *priv =
 55		container_of(shrinker, struct msm_drm_private, shrinker);
 56	struct drm_device *dev = priv->dev;
 57	struct msm_gem_object *msm_obj;
 58	unsigned long count = 0;
 59	bool unlock;
 60
 61	if (!msm_gem_shrinker_lock(dev, &unlock))
 62		return 0;
 63
 64	list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
 65		if (is_purgeable(msm_obj))
 66			count += msm_obj->base.size >> PAGE_SHIFT;
 67	}
 68
 69	if (unlock)
 70		mutex_unlock(&dev->struct_mutex);
 71
 72	return count;
 73}
 74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 75static unsigned long
 76msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
 77{
 78	struct msm_drm_private *priv =
 79		container_of(shrinker, struct msm_drm_private, shrinker);
 80	struct drm_device *dev = priv->dev;
 81	struct msm_gem_object *msm_obj;
 
 
 
 
 
 
 
 
 
 
 
 82	unsigned long freed = 0;
 83	bool unlock;
 84
 85	if (!msm_gem_shrinker_lock(dev, &unlock))
 86		return SHRINK_STOP;
 
 
 
 
 
 
 87
 88	list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
 89		if (freed >= sc->nr_to_scan)
 90			break;
 91		if (is_purgeable(msm_obj)) {
 92			msm_gem_purge(&msm_obj->base, OBJ_LOCK_SHRINKER);
 93			freed += msm_obj->base.size >> PAGE_SHIFT;
 94		}
 95	}
 96
 97	if (unlock)
 98		mutex_unlock(&dev->struct_mutex);
 99
100	if (freed > 0)
101		pr_info_ratelimited("Purging %lu bytes\n", freed << PAGE_SHIFT);
 
 
 
 
 
 
 
 
 
 
 
102
103	return freed;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104}
105
106static int
107msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
108{
109	struct msm_drm_private *priv =
110		container_of(nb, struct msm_drm_private, vmap_notifier);
111	struct drm_device *dev = priv->dev;
112	struct msm_gem_object *msm_obj;
113	unsigned unmapped = 0;
114	bool unlock;
115
116	if (!msm_gem_shrinker_lock(dev, &unlock))
117		return NOTIFY_DONE;
118
119	list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
120		if (is_vunmapable(msm_obj)) {
121			msm_gem_vunmap(&msm_obj->base, OBJ_LOCK_SHRINKER);
122			/* since we don't know any better, lets bail after a few
123			 * and if necessary the shrinker will be invoked again.
124			 * Seems better than unmapping *everything*
125			 */
126			if (++unmapped >= 15)
127				break;
128		}
129	}
130
131	if (unlock)
132		mutex_unlock(&dev->struct_mutex);
133
134	*(unsigned long *)ptr += unmapped;
135
136	if (unmapped > 0)
137		pr_info_ratelimited("Purging %u vmaps\n", unmapped);
138
139	return NOTIFY_DONE;
140}
141
142/**
143 * msm_gem_shrinker_init - Initialize msm shrinker
144 * @dev_priv: msm device
145 *
146 * This function registers and sets up the msm shrinker.
147 */
148void msm_gem_shrinker_init(struct drm_device *dev)
149{
150	struct msm_drm_private *priv = dev->dev_private;
151	priv->shrinker.count_objects = msm_gem_shrinker_count;
152	priv->shrinker.scan_objects = msm_gem_shrinker_scan;
153	priv->shrinker.seeks = DEFAULT_SEEKS;
154	WARN_ON(register_shrinker(&priv->shrinker));
155
156	priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
157	WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
158}
159
160/**
161 * msm_gem_shrinker_cleanup - Clean up msm shrinker
162 * @dev_priv: msm device
163 *
164 * This function unregisters the msm shrinker.
165 */
166void msm_gem_shrinker_cleanup(struct drm_device *dev)
167{
168	struct msm_drm_private *priv = dev->dev_private;
169
170	if (priv->shrinker.nr_deferred) {
171		WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
172		unregister_shrinker(&priv->shrinker);
173	}
174}
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2016 Red Hat
  4 * Author: Rob Clark <robdclark@gmail.com>
 
 
 
 
 
 
 
 
 
 
 
 
  5 */
  6
  7#include <linux/vmalloc.h>
  8#include <linux/sched/mm.h>
  9
 10#include "msm_drv.h"
 11#include "msm_gem.h"
 12#include "msm_gpu.h"
 13#include "msm_gpu_trace.h"
 14
 15/* Default disabled for now until it has some more testing on the different
 16 * iommu combinations that can be paired with the driver:
 17 */
 18static bool enable_eviction = true;
 19MODULE_PARM_DESC(enable_eviction, "Enable swappable GEM buffers");
 20module_param(enable_eviction, bool, 0600);
 21
 22static bool can_swap(void)
 23{
 24	return enable_eviction && get_nr_swap_pages() > 0;
 25}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 26
 27static bool can_block(struct shrink_control *sc)
 28{
 29	if (!(sc->gfp_mask & __GFP_DIRECT_RECLAIM))
 30		return false;
 31	return current_is_kswapd() || (sc->gfp_mask & __GFP_RECLAIM);
 32}
 33
 34static unsigned long
 35msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
 36{
 37	struct msm_drm_private *priv =
 38		container_of(shrinker, struct msm_drm_private, shrinker);
 39	unsigned count = priv->lru.dontneed.count;
 
 
 
 
 
 
 
 
 
 
 
 40
 41	if (can_swap())
 42		count += priv->lru.willneed.count;
 43
 44	return count;
 45}
 46
 47static bool
 48purge(struct drm_gem_object *obj)
 49{
 50	if (!is_purgeable(to_msm_bo(obj)))
 51		return false;
 52
 53	if (msm_gem_active(obj))
 54		return false;
 55
 56	msm_gem_purge(obj);
 57
 58	return true;
 59}
 60
 61static bool
 62evict(struct drm_gem_object *obj)
 63{
 64	if (is_unevictable(to_msm_bo(obj)))
 65		return false;
 66
 67	if (msm_gem_active(obj))
 68		return false;
 69
 70	msm_gem_evict(obj);
 71
 72	return true;
 73}
 74
 75static bool
 76wait_for_idle(struct drm_gem_object *obj)
 77{
 78	enum dma_resv_usage usage = dma_resv_usage_rw(true);
 79	return dma_resv_wait_timeout(obj->resv, usage, false, 1000) > 0;
 80}
 81
 82static bool
 83active_purge(struct drm_gem_object *obj)
 84{
 85	if (!wait_for_idle(obj))
 86		return false;
 87
 88	return purge(obj);
 89}
 90
 91static bool
 92active_evict(struct drm_gem_object *obj)
 93{
 94	if (!wait_for_idle(obj))
 95		return false;
 96
 97	return evict(obj);
 98}
 99
100static unsigned long
101msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
102{
103	struct msm_drm_private *priv =
104		container_of(shrinker, struct msm_drm_private, shrinker);
105	struct {
106		struct drm_gem_lru *lru;
107		bool (*shrink)(struct drm_gem_object *obj);
108		bool cond;
109		unsigned long freed;
110	} stages[] = {
111		/* Stages of progressively more aggressive/expensive reclaim: */
112		{ &priv->lru.dontneed, purge,        true },
113		{ &priv->lru.willneed, evict,        can_swap() },
114		{ &priv->lru.dontneed, active_purge, can_block(sc) },
115		{ &priv->lru.willneed, active_evict, can_swap() && can_block(sc) },
116	};
117	long nr = sc->nr_to_scan;
118	unsigned long freed = 0;
 
119
120	for (unsigned i = 0; (nr > 0) && (i < ARRAY_SIZE(stages)); i++) {
121		if (!stages[i].cond)
122			continue;
123		stages[i].freed =
124			drm_gem_lru_scan(stages[i].lru, nr, stages[i].shrink);
125		nr -= stages[i].freed;
126		freed += stages[i].freed;
127	}
128
129	if (freed) {
130		trace_msm_gem_shrink(sc->nr_to_scan, stages[0].freed,
131				     stages[1].freed, stages[2].freed,
132				     stages[3].freed);
 
 
 
133	}
134
135	return (freed > 0) ? freed : SHRINK_STOP;
136}
137
138#ifdef CONFIG_DEBUG_FS
139unsigned long
140msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan)
141{
142	struct msm_drm_private *priv = dev->dev_private;
143	struct shrink_control sc = {
144		.nr_to_scan = nr_to_scan,
145	};
146	int ret;
147
148	fs_reclaim_acquire(GFP_KERNEL);
149	ret = msm_gem_shrinker_scan(&priv->shrinker, &sc);
150	fs_reclaim_release(GFP_KERNEL);
151
152	return ret;
153}
154#endif
155
156/* since we don't know any better, lets bail after a few
157 * and if necessary the shrinker will be invoked again.
158 * Seems better than unmapping *everything*
159 */
160static const int vmap_shrink_limit = 15;
161
162static bool
163vmap_shrink(struct drm_gem_object *obj)
164{
165	if (!is_vunmapable(to_msm_bo(obj)))
166		return false;
167
168	msm_gem_vunmap(obj);
169
170	return true;
171}
172
173static int
174msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
175{
176	struct msm_drm_private *priv =
177		container_of(nb, struct msm_drm_private, vmap_notifier);
178	struct drm_gem_lru *lrus[] = {
179		&priv->lru.dontneed,
180		&priv->lru.willneed,
181		&priv->lru.pinned,
182		NULL,
183	};
184	unsigned idx, unmapped = 0;
185
186	for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) {
187		unmapped += drm_gem_lru_scan(lrus[idx],
188					     vmap_shrink_limit - unmapped,
189					     vmap_shrink);
 
 
 
 
 
 
190	}
191
 
 
 
192	*(unsigned long *)ptr += unmapped;
193
194	if (unmapped > 0)
195		trace_msm_gem_purge_vmaps(unmapped);
196
197	return NOTIFY_DONE;
198}
199
200/**
201 * msm_gem_shrinker_init - Initialize msm shrinker
202 * @dev: drm device
203 *
204 * This function registers and sets up the msm shrinker.
205 */
206void msm_gem_shrinker_init(struct drm_device *dev)
207{
208	struct msm_drm_private *priv = dev->dev_private;
209	priv->shrinker.count_objects = msm_gem_shrinker_count;
210	priv->shrinker.scan_objects = msm_gem_shrinker_scan;
211	priv->shrinker.seeks = DEFAULT_SEEKS;
212	WARN_ON(register_shrinker(&priv->shrinker, "drm-msm_gem"));
213
214	priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
215	WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
216}
217
218/**
219 * msm_gem_shrinker_cleanup - Clean up msm shrinker
220 * @dev: drm device
221 *
222 * This function unregisters the msm shrinker.
223 */
224void msm_gem_shrinker_cleanup(struct drm_device *dev)
225{
226	struct msm_drm_private *priv = dev->dev_private;
227
228	if (priv->shrinker.nr_deferred) {
229		WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
230		unregister_shrinker(&priv->shrinker);
231	}
232}