Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.5.6.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Manage cache of swap slots to be used for and returned from
  4 * swap.
  5 *
  6 * Copyright(c) 2016 Intel Corporation.
  7 *
  8 * Author: Tim Chen <tim.c.chen@linux.intel.com>
  9 *
 10 * We allocate the swap slots from the global pool and put
 11 * it into local per cpu caches.  This has the advantage
 12 * of no needing to acquire the swap_info lock every time
 13 * we need a new slot.
 14 *
 15 * There is also opportunity to simply return the slot
 16 * to local caches without needing to acquire swap_info
 17 * lock.  We do not reuse the returned slots directly but
 18 * move them back to the global pool in a batch.  This
 19 * allows the slots to coalesce and reduce fragmentation.
 20 *
 21 * The swap entry allocated is marked with SWAP_HAS_CACHE
 22 * flag in map_count that prevents it from being allocated
 23 * again from the global pool.
 24 *
 25 * The swap slots cache is protected by a mutex instead of
 26 * a spin lock as when we search for slots with scan_swap_map,
 27 * we can possibly sleep.
 28 */
 29
 30#include <linux/swap_slots.h>
 31#include <linux/cpu.h>
 32#include <linux/cpumask.h>
 33#include <linux/vmalloc.h>
 34#include <linux/mutex.h>
 35#include <linux/mm.h>
 36
 37static DEFINE_PER_CPU(struct swap_slots_cache, swp_slots);
 38static bool	swap_slot_cache_active;
 39bool	swap_slot_cache_enabled;
 40static bool	swap_slot_cache_initialized;
 41static DEFINE_MUTEX(swap_slots_cache_mutex);
 42/* Serialize swap slots cache enable/disable operations */
 43static DEFINE_MUTEX(swap_slots_cache_enable_mutex);
 44
 45static void __drain_swap_slots_cache(unsigned int type);
 46
 47#define use_swap_slot_cache (swap_slot_cache_active && swap_slot_cache_enabled)
 48#define SLOTS_CACHE 0x1
 49#define SLOTS_CACHE_RET 0x2
 50
 51static void deactivate_swap_slots_cache(void)
 52{
 53	mutex_lock(&swap_slots_cache_mutex);
 54	swap_slot_cache_active = false;
 55	__drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET);
 56	mutex_unlock(&swap_slots_cache_mutex);
 57}
 58
 59static void reactivate_swap_slots_cache(void)
 60{
 61	mutex_lock(&swap_slots_cache_mutex);
 62	swap_slot_cache_active = true;
 63	mutex_unlock(&swap_slots_cache_mutex);
 64}
 65
 66/* Must not be called with cpu hot plug lock */
 67void disable_swap_slots_cache_lock(void)
 68{
 69	mutex_lock(&swap_slots_cache_enable_mutex);
 70	swap_slot_cache_enabled = false;
 71	if (swap_slot_cache_initialized) {
 72		/* serialize with cpu hotplug operations */
 73		get_online_cpus();
 74		__drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET);
 75		put_online_cpus();
 76	}
 77}
 78
 79static void __reenable_swap_slots_cache(void)
 80{
 81	swap_slot_cache_enabled = has_usable_swap();
 82}
 83
 84void reenable_swap_slots_cache_unlock(void)
 85{
 86	__reenable_swap_slots_cache();
 87	mutex_unlock(&swap_slots_cache_enable_mutex);
 88}
 89
 90static bool check_cache_active(void)
 91{
 92	long pages;
 93
 94	if (!swap_slot_cache_enabled)
 95		return false;
 96
 97	pages = get_nr_swap_pages();
 98	if (!swap_slot_cache_active) {
 99		if (pages > num_online_cpus() *
100		    THRESHOLD_ACTIVATE_SWAP_SLOTS_CACHE)
101			reactivate_swap_slots_cache();
102		goto out;
103	}
104
105	/* if global pool of slot caches too low, deactivate cache */
106	if (pages < num_online_cpus() * THRESHOLD_DEACTIVATE_SWAP_SLOTS_CACHE)
107		deactivate_swap_slots_cache();
108out:
109	return swap_slot_cache_active;
110}
111
112static int alloc_swap_slot_cache(unsigned int cpu)
113{
114	struct swap_slots_cache *cache;
115	swp_entry_t *slots, *slots_ret;
116
117	/*
118	 * Do allocation outside swap_slots_cache_mutex
119	 * as kvzalloc could trigger reclaim and get_swap_page,
120	 * which can lock swap_slots_cache_mutex.
121	 */
122	slots = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t),
123			 GFP_KERNEL);
124	if (!slots)
125		return -ENOMEM;
126
127	slots_ret = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t),
128			     GFP_KERNEL);
129	if (!slots_ret) {
130		kvfree(slots);
131		return -ENOMEM;
132	}
133
134	mutex_lock(&swap_slots_cache_mutex);
135	cache = &per_cpu(swp_slots, cpu);
136	if (cache->slots || cache->slots_ret) {
137		/* cache already allocated */
138		mutex_unlock(&swap_slots_cache_mutex);
139
140		kvfree(slots);
141		kvfree(slots_ret);
142
143		return 0;
144	}
145
146	if (!cache->lock_initialized) {
147		mutex_init(&cache->alloc_lock);
148		spin_lock_init(&cache->free_lock);
149		cache->lock_initialized = true;
150	}
151	cache->nr = 0;
152	cache->cur = 0;
153	cache->n_ret = 0;
154	/*
155	 * We initialized alloc_lock and free_lock earlier.  We use
156	 * !cache->slots or !cache->slots_ret to know if it is safe to acquire
157	 * the corresponding lock and use the cache.  Memory barrier below
158	 * ensures the assumption.
159	 */
160	mb();
161	cache->slots = slots;
162	cache->slots_ret = slots_ret;
163	mutex_unlock(&swap_slots_cache_mutex);
164	return 0;
165}
166
167static void drain_slots_cache_cpu(unsigned int cpu, unsigned int type,
168				  bool free_slots)
169{
170	struct swap_slots_cache *cache;
171	swp_entry_t *slots = NULL;
172
173	cache = &per_cpu(swp_slots, cpu);
174	if ((type & SLOTS_CACHE) && cache->slots) {
175		mutex_lock(&cache->alloc_lock);
176		swapcache_free_entries(cache->slots + cache->cur, cache->nr);
177		cache->cur = 0;
178		cache->nr = 0;
179		if (free_slots && cache->slots) {
180			kvfree(cache->slots);
181			cache->slots = NULL;
182		}
183		mutex_unlock(&cache->alloc_lock);
184	}
185	if ((type & SLOTS_CACHE_RET) && cache->slots_ret) {
186		spin_lock_irq(&cache->free_lock);
187		swapcache_free_entries(cache->slots_ret, cache->n_ret);
188		cache->n_ret = 0;
189		if (free_slots && cache->slots_ret) {
190			slots = cache->slots_ret;
191			cache->slots_ret = NULL;
192		}
193		spin_unlock_irq(&cache->free_lock);
194		kvfree(slots);
195	}
196}
197
198static void __drain_swap_slots_cache(unsigned int type)
199{
200	unsigned int cpu;
201
202	/*
203	 * This function is called during
204	 *	1) swapoff, when we have to make sure no
205	 *	   left over slots are in cache when we remove
206	 *	   a swap device;
207	 *      2) disabling of swap slot cache, when we run low
208	 *	   on swap slots when allocating memory and need
209	 *	   to return swap slots to global pool.
210	 *
211	 * We cannot acquire cpu hot plug lock here as
212	 * this function can be invoked in the cpu
213	 * hot plug path:
214	 * cpu_up -> lock cpu_hotplug -> cpu hotplug state callback
215	 *   -> memory allocation -> direct reclaim -> get_swap_page
216	 *   -> drain_swap_slots_cache
217	 *
218	 * Hence the loop over current online cpu below could miss cpu that
219	 * is being brought online but not yet marked as online.
220	 * That is okay as we do not schedule and run anything on a
221	 * cpu before it has been marked online. Hence, we will not
222	 * fill any swap slots in slots cache of such cpu.
223	 * There are no slots on such cpu that need to be drained.
224	 */
225	for_each_online_cpu(cpu)
226		drain_slots_cache_cpu(cpu, type, false);
227}
228
229static int free_slot_cache(unsigned int cpu)
230{
231	mutex_lock(&swap_slots_cache_mutex);
232	drain_slots_cache_cpu(cpu, SLOTS_CACHE | SLOTS_CACHE_RET, true);
233	mutex_unlock(&swap_slots_cache_mutex);
234	return 0;
235}
236
237void enable_swap_slots_cache(void)
238{
239	mutex_lock(&swap_slots_cache_enable_mutex);
240	if (!swap_slot_cache_initialized) {
241		int ret;
242
243		ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "swap_slots_cache",
244					alloc_swap_slot_cache, free_slot_cache);
245		if (WARN_ONCE(ret < 0, "Cache allocation failed (%s), operating "
246				       "without swap slots cache.\n", __func__))
247			goto out_unlock;
248
249		swap_slot_cache_initialized = true;
250	}
251
252	__reenable_swap_slots_cache();
253out_unlock:
254	mutex_unlock(&swap_slots_cache_enable_mutex);
255}
256
257/* called with swap slot cache's alloc lock held */
258static int refill_swap_slots_cache(struct swap_slots_cache *cache)
259{
260	if (!use_swap_slot_cache || cache->nr)
261		return 0;
262
263	cache->cur = 0;
264	if (swap_slot_cache_active)
265		cache->nr = get_swap_pages(SWAP_SLOTS_CACHE_SIZE,
266					   cache->slots, 1);
267
268	return cache->nr;
269}
270
271int free_swap_slot(swp_entry_t entry)
272{
273	struct swap_slots_cache *cache;
274
275	cache = raw_cpu_ptr(&swp_slots);
276	if (likely(use_swap_slot_cache && cache->slots_ret)) {
277		spin_lock_irq(&cache->free_lock);
278		/* Swap slots cache may be deactivated before acquiring lock */
279		if (!use_swap_slot_cache || !cache->slots_ret) {
280			spin_unlock_irq(&cache->free_lock);
281			goto direct_free;
282		}
283		if (cache->n_ret >= SWAP_SLOTS_CACHE_SIZE) {
284			/*
285			 * Return slots to global pool.
286			 * The current swap_map value is SWAP_HAS_CACHE.
287			 * Set it to 0 to indicate it is available for
288			 * allocation in global pool
289			 */
290			swapcache_free_entries(cache->slots_ret, cache->n_ret);
291			cache->n_ret = 0;
292		}
293		cache->slots_ret[cache->n_ret++] = entry;
294		spin_unlock_irq(&cache->free_lock);
295	} else {
296direct_free:
297		swapcache_free_entries(&entry, 1);
298	}
299
300	return 0;
301}
302
303swp_entry_t get_swap_page(struct page *page)
304{
305	swp_entry_t entry;
306	struct swap_slots_cache *cache;
307
308	entry.val = 0;
309
310	if (PageTransHuge(page)) {
311		if (IS_ENABLED(CONFIG_THP_SWAP))
312			get_swap_pages(1, &entry, HPAGE_PMD_NR);
313		goto out;
314	}
315
316	/*
317	 * Preemption is allowed here, because we may sleep
318	 * in refill_swap_slots_cache().  But it is safe, because
319	 * accesses to the per-CPU data structure are protected by the
320	 * mutex cache->alloc_lock.
321	 *
322	 * The alloc path here does not touch cache->slots_ret
323	 * so cache->free_lock is not taken.
324	 */
325	cache = raw_cpu_ptr(&swp_slots);
326
327	if (likely(check_cache_active() && cache->slots)) {
328		mutex_lock(&cache->alloc_lock);
329		if (cache->slots) {
330repeat:
331			if (cache->nr) {
332				entry = cache->slots[cache->cur];
333				cache->slots[cache->cur++].val = 0;
334				cache->nr--;
335			} else if (refill_swap_slots_cache(cache)) {
336				goto repeat;
337			}
338		}
339		mutex_unlock(&cache->alloc_lock);
340		if (entry.val)
341			goto out;
342	}
343
344	get_swap_pages(1, &entry, 1);
345out:
346	if (mem_cgroup_try_charge_swap(page, entry)) {
347		put_swap_page(page, entry);
348		entry.val = 0;
349	}
350	return entry;
351}