Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2016 Red Hat
  4 * Author: Rob Clark <robdclark@gmail.com>
 
 
 
 
 
 
 
 
 
 
 
 
  5 */
  6
  7#include "msm_drv.h"
  8#include "msm_gem.h"
  9
 10static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
 11{
 12	/* NOTE: we are *closer* to being able to get rid of
 13	 * mutex_trylock_recursive().. the msm_gem code itself does
 14	 * not need struct_mutex, although codepaths that can trigger
 15	 * shrinker are still called in code-paths that hold the
 16	 * struct_mutex.
 17	 *
 18	 * Also, msm_obj->madv is protected by struct_mutex.
 19	 *
 20	 * The next step is probably split out a seperate lock for
 21	 * protecting inactive_list, so that shrinker does not need
 22	 * struct_mutex.
 23	 */
 24	switch (mutex_trylock_recursive(&dev->struct_mutex)) {
 25	case MUTEX_TRYLOCK_FAILED:
 26		return false;
 27
 28	case MUTEX_TRYLOCK_SUCCESS:
 29		*unlock = true;
 30		return true;
 31
 32	case MUTEX_TRYLOCK_RECURSIVE:
 33		*unlock = false;
 34		return true;
 35	}
 36
 37	BUG();
 38}
 39
 40static unsigned long
 41msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
 42{
 43	struct msm_drm_private *priv =
 44		container_of(shrinker, struct msm_drm_private, shrinker);
 45	struct drm_device *dev = priv->dev;
 46	struct msm_gem_object *msm_obj;
 47	unsigned long count = 0;
 48	bool unlock;
 49
 50	if (!msm_gem_shrinker_lock(dev, &unlock))
 51		return 0;
 52
 53	list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
 54		if (is_purgeable(msm_obj))
 55			count += msm_obj->base.size >> PAGE_SHIFT;
 56	}
 57
 58	if (unlock)
 59		mutex_unlock(&dev->struct_mutex);
 60
 61	return count;
 62}
 63
 64static unsigned long
 65msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
 66{
 67	struct msm_drm_private *priv =
 68		container_of(shrinker, struct msm_drm_private, shrinker);
 69	struct drm_device *dev = priv->dev;
 70	struct msm_gem_object *msm_obj;
 71	unsigned long freed = 0;
 72	bool unlock;
 73
 74	if (!msm_gem_shrinker_lock(dev, &unlock))
 75		return SHRINK_STOP;
 76
 77	list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
 78		if (freed >= sc->nr_to_scan)
 79			break;
 80		if (is_purgeable(msm_obj)) {
 81			msm_gem_purge(&msm_obj->base, OBJ_LOCK_SHRINKER);
 82			freed += msm_obj->base.size >> PAGE_SHIFT;
 83		}
 84	}
 85
 86	if (unlock)
 87		mutex_unlock(&dev->struct_mutex);
 88
 89	if (freed > 0)
 90		pr_info_ratelimited("Purging %lu bytes\n", freed << PAGE_SHIFT);
 91
 92	return freed;
 93}
 94
 95static int
 96msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
 97{
 98	struct msm_drm_private *priv =
 99		container_of(nb, struct msm_drm_private, vmap_notifier);
100	struct drm_device *dev = priv->dev;
101	struct msm_gem_object *msm_obj;
102	unsigned unmapped = 0;
103	bool unlock;
104
105	if (!msm_gem_shrinker_lock(dev, &unlock))
106		return NOTIFY_DONE;
107
108	list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
109		if (is_vunmapable(msm_obj)) {
110			msm_gem_vunmap(&msm_obj->base, OBJ_LOCK_SHRINKER);
111			/* since we don't know any better, lets bail after a few
112			 * and if necessary the shrinker will be invoked again.
113			 * Seems better than unmapping *everything*
114			 */
115			if (++unmapped >= 15)
116				break;
117		}
118	}
119
120	if (unlock)
121		mutex_unlock(&dev->struct_mutex);
122
123	*(unsigned long *)ptr += unmapped;
124
125	if (unmapped > 0)
126		pr_info_ratelimited("Purging %u vmaps\n", unmapped);
127
128	return NOTIFY_DONE;
129}
130
131/**
132 * msm_gem_shrinker_init - Initialize msm shrinker
133 * @dev_priv: msm device
134 *
135 * This function registers and sets up the msm shrinker.
136 */
137void msm_gem_shrinker_init(struct drm_device *dev)
138{
139	struct msm_drm_private *priv = dev->dev_private;
140	priv->shrinker.count_objects = msm_gem_shrinker_count;
141	priv->shrinker.scan_objects = msm_gem_shrinker_scan;
142	priv->shrinker.seeks = DEFAULT_SEEKS;
143	WARN_ON(register_shrinker(&priv->shrinker));
144
145	priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
146	WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
147}
148
149/**
150 * msm_gem_shrinker_cleanup - Clean up msm shrinker
151 * @dev_priv: msm device
152 *
153 * This function unregisters the msm shrinker.
154 */
155void msm_gem_shrinker_cleanup(struct drm_device *dev)
156{
157	struct msm_drm_private *priv = dev->dev_private;
158
159	if (priv->shrinker.nr_deferred) {
160		WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
161		unregister_shrinker(&priv->shrinker);
162	}
163}
v4.10.11
 
  1/*
  2 * Copyright (C) 2016 Red Hat
  3 * Author: Rob Clark <robdclark@gmail.com>
  4 *
  5 * This program is free software; you can redistribute it and/or modify it
  6 * under the terms of the GNU General Public License version 2 as published by
  7 * the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it will be useful, but WITHOUT
 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 12 * more details.
 13 *
 14 * You should have received a copy of the GNU General Public License along with
 15 * this program.  If not, see <http://www.gnu.org/licenses/>.
 16 */
 17
 18#include "msm_drv.h"
 19#include "msm_gem.h"
 20
 21static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
 22{
 
 
 
 
 
 
 
 
 
 
 
 
 23	switch (mutex_trylock_recursive(&dev->struct_mutex)) {
 24	case MUTEX_TRYLOCK_FAILED:
 25		return false;
 26
 27	case MUTEX_TRYLOCK_SUCCESS:
 28		*unlock = true;
 29		return true;
 30
 31	case MUTEX_TRYLOCK_RECURSIVE:
 32		*unlock = false;
 33		return true;
 34	}
 35
 36	BUG();
 37}
 38
 39static unsigned long
 40msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
 41{
 42	struct msm_drm_private *priv =
 43		container_of(shrinker, struct msm_drm_private, shrinker);
 44	struct drm_device *dev = priv->dev;
 45	struct msm_gem_object *msm_obj;
 46	unsigned long count = 0;
 47	bool unlock;
 48
 49	if (!msm_gem_shrinker_lock(dev, &unlock))
 50		return 0;
 51
 52	list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
 53		if (is_purgeable(msm_obj))
 54			count += msm_obj->base.size >> PAGE_SHIFT;
 55	}
 56
 57	if (unlock)
 58		mutex_unlock(&dev->struct_mutex);
 59
 60	return count;
 61}
 62
 63static unsigned long
 64msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
 65{
 66	struct msm_drm_private *priv =
 67		container_of(shrinker, struct msm_drm_private, shrinker);
 68	struct drm_device *dev = priv->dev;
 69	struct msm_gem_object *msm_obj;
 70	unsigned long freed = 0;
 71	bool unlock;
 72
 73	if (!msm_gem_shrinker_lock(dev, &unlock))
 74		return SHRINK_STOP;
 75
 76	list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
 77		if (freed >= sc->nr_to_scan)
 78			break;
 79		if (is_purgeable(msm_obj)) {
 80			msm_gem_purge(&msm_obj->base);
 81			freed += msm_obj->base.size >> PAGE_SHIFT;
 82		}
 83	}
 84
 85	if (unlock)
 86		mutex_unlock(&dev->struct_mutex);
 87
 88	if (freed > 0)
 89		pr_info_ratelimited("Purging %lu bytes\n", freed << PAGE_SHIFT);
 90
 91	return freed;
 92}
 93
 94static int
 95msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
 96{
 97	struct msm_drm_private *priv =
 98		container_of(nb, struct msm_drm_private, vmap_notifier);
 99	struct drm_device *dev = priv->dev;
100	struct msm_gem_object *msm_obj;
101	unsigned unmapped = 0;
102	bool unlock;
103
104	if (!msm_gem_shrinker_lock(dev, &unlock))
105		return NOTIFY_DONE;
106
107	list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
108		if (is_vunmapable(msm_obj)) {
109			msm_gem_vunmap(&msm_obj->base);
110			/* since we don't know any better, lets bail after a few
111			 * and if necessary the shrinker will be invoked again.
112			 * Seems better than unmapping *everything*
113			 */
114			if (++unmapped >= 15)
115				break;
116		}
117	}
118
119	if (unlock)
120		mutex_unlock(&dev->struct_mutex);
121
122	*(unsigned long *)ptr += unmapped;
123
124	if (unmapped > 0)
125		pr_info_ratelimited("Purging %u vmaps\n", unmapped);
126
127	return NOTIFY_DONE;
128}
129
130/**
131 * msm_gem_shrinker_init - Initialize msm shrinker
132 * @dev_priv: msm device
133 *
134 * This function registers and sets up the msm shrinker.
135 */
136void msm_gem_shrinker_init(struct drm_device *dev)
137{
138	struct msm_drm_private *priv = dev->dev_private;
139	priv->shrinker.count_objects = msm_gem_shrinker_count;
140	priv->shrinker.scan_objects = msm_gem_shrinker_scan;
141	priv->shrinker.seeks = DEFAULT_SEEKS;
142	WARN_ON(register_shrinker(&priv->shrinker));
143
144	priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
145	WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
146}
147
148/**
149 * msm_gem_shrinker_cleanup - Clean up msm shrinker
150 * @dev_priv: msm device
151 *
152 * This function unregisters the msm shrinker.
153 */
154void msm_gem_shrinker_cleanup(struct drm_device *dev)
155{
156	struct msm_drm_private *priv = dev->dev_private;
157
158	if (priv->shrinker.nr_deferred) {
159		WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
160		unregister_shrinker(&priv->shrinker);
161	}
162}