Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 * Quicklist support.
  3 *
  4 * Quicklists are light weight lists of pages that have a defined state
  5 * on alloc and free. Pages must be in the quicklist specific defined state
  6 * (zero by default) when the page is freed. It seems that the initial idea
  7 * for such lists first came from Dave Miller and then various other people
  8 * improved on it.
  9 *
 10 * Copyright (C) 2007 SGI,
 11 * 	Christoph Lameter <clameter@sgi.com>
 12 * 		Generalized, added support for multiple lists and
 13 * 		constructors / destructors.
 14 */
 15#include <linux/kernel.h>
 16
 17#include <linux/gfp.h>
 18#include <linux/mm.h>
 19#include <linux/mmzone.h>
 20#include <linux/module.h>
 21#include <linux/quicklist.h>
 22
 23DEFINE_PER_CPU(struct quicklist [CONFIG_NR_QUICK], quicklist);
 24
 25#define FRACTION_OF_NODE_MEM	16
 26
 27static unsigned long max_pages(unsigned long min_pages)
 28{
 29	unsigned long node_free_pages, max;
 30	int node = numa_node_id();
 31	struct zone *zones = NODE_DATA(node)->node_zones;
 32	int num_cpus_on_node;
 33
 34	node_free_pages =
 35#ifdef CONFIG_ZONE_DMA
 36		zone_page_state(&zones[ZONE_DMA], NR_FREE_PAGES) +
 37#endif
 38#ifdef CONFIG_ZONE_DMA32
 39		zone_page_state(&zones[ZONE_DMA32], NR_FREE_PAGES) +
 40#endif
 41		zone_page_state(&zones[ZONE_NORMAL], NR_FREE_PAGES);
 42
 43	max = node_free_pages / FRACTION_OF_NODE_MEM;
 44
 45	num_cpus_on_node = cpumask_weight(cpumask_of_node(node));
 46	max /= num_cpus_on_node;
 47
 48	return max(max, min_pages);
 49}
 50
 51static long min_pages_to_free(struct quicklist *q,
 52	unsigned long min_pages, long max_free)
 53{
 54	long pages_to_free;
 55
 56	pages_to_free = q->nr_pages - max_pages(min_pages);
 57
 58	return min(pages_to_free, max_free);
 59}
 60
 61/*
 62 * Trim down the number of pages in the quicklist
 63 */
 64void quicklist_trim(int nr, void (*dtor)(void *),
 65	unsigned long min_pages, unsigned long max_free)
 66{
 67	long pages_to_free;
 68	struct quicklist *q;
 69
 70	q = &get_cpu_var(quicklist)[nr];
 71	if (q->nr_pages > min_pages) {
 72		pages_to_free = min_pages_to_free(q, min_pages, max_free);
 73
 74		while (pages_to_free > 0) {
 75			/*
 76			 * We pass a gfp_t of 0 to quicklist_alloc here
 77			 * because we will never call into the page allocator.
 78			 */
 79			void *p = quicklist_alloc(nr, 0, NULL);
 80
 81			if (dtor)
 82				dtor(p);
 83			free_page((unsigned long)p);
 84			pages_to_free--;
 85		}
 86	}
 87	put_cpu_var(quicklist);
 88}
 89
 90unsigned long quicklist_total_size(void)
 91{
 92	unsigned long count = 0;
 93	int cpu;
 94	struct quicklist *ql, *q;
 95
 96	for_each_online_cpu(cpu) {
 97		ql = per_cpu(quicklist, cpu);
 98		for (q = ql; q < ql + CONFIG_NR_QUICK; q++)
 99			count += q->nr_pages;
100	}
101	return count;
102}
103
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Quicklist support.
  4 *
  5 * Quicklists are light weight lists of pages that have a defined state
  6 * on alloc and free. Pages must be in the quicklist specific defined state
  7 * (zero by default) when the page is freed. It seems that the initial idea
  8 * for such lists first came from Dave Miller and then various other people
  9 * improved on it.
 10 *
 11 * Copyright (C) 2007 SGI,
 12 * 	Christoph Lameter <cl@linux.com>
 13 * 		Generalized, added support for multiple lists and
 14 * 		constructors / destructors.
 15 */
 16#include <linux/kernel.h>
 17
 18#include <linux/gfp.h>
 19#include <linux/mm.h>
 20#include <linux/mmzone.h>
 
 21#include <linux/quicklist.h>
 22
 23DEFINE_PER_CPU(struct quicklist [CONFIG_NR_QUICK], quicklist);
 24
 25#define FRACTION_OF_NODE_MEM	16
 26
 27static unsigned long max_pages(unsigned long min_pages)
 28{
 29	unsigned long node_free_pages, max;
 30	int node = numa_node_id();
 31	struct zone *zones = NODE_DATA(node)->node_zones;
 32	int num_cpus_on_node;
 33
 34	node_free_pages =
 35#ifdef CONFIG_ZONE_DMA
 36		zone_page_state(&zones[ZONE_DMA], NR_FREE_PAGES) +
 37#endif
 38#ifdef CONFIG_ZONE_DMA32
 39		zone_page_state(&zones[ZONE_DMA32], NR_FREE_PAGES) +
 40#endif
 41		zone_page_state(&zones[ZONE_NORMAL], NR_FREE_PAGES);
 42
 43	max = node_free_pages / FRACTION_OF_NODE_MEM;
 44
 45	num_cpus_on_node = cpumask_weight(cpumask_of_node(node));
 46	max /= num_cpus_on_node;
 47
 48	return max(max, min_pages);
 49}
 50
 51static long min_pages_to_free(struct quicklist *q,
 52	unsigned long min_pages, long max_free)
 53{
 54	long pages_to_free;
 55
 56	pages_to_free = q->nr_pages - max_pages(min_pages);
 57
 58	return min(pages_to_free, max_free);
 59}
 60
 61/*
 62 * Trim down the number of pages in the quicklist
 63 */
 64void quicklist_trim(int nr, void (*dtor)(void *),
 65	unsigned long min_pages, unsigned long max_free)
 66{
 67	long pages_to_free;
 68	struct quicklist *q;
 69
 70	q = &get_cpu_var(quicklist)[nr];
 71	if (q->nr_pages > min_pages) {
 72		pages_to_free = min_pages_to_free(q, min_pages, max_free);
 73
 74		while (pages_to_free > 0) {
 75			/*
 76			 * We pass a gfp_t of 0 to quicklist_alloc here
 77			 * because we will never call into the page allocator.
 78			 */
 79			void *p = quicklist_alloc(nr, 0, NULL);
 80
 81			if (dtor)
 82				dtor(p);
 83			free_page((unsigned long)p);
 84			pages_to_free--;
 85		}
 86	}
 87	put_cpu_var(quicklist);
 88}
 89
 90unsigned long quicklist_total_size(void)
 91{
 92	unsigned long count = 0;
 93	int cpu;
 94	struct quicklist *ql, *q;
 95
 96	for_each_online_cpu(cpu) {
 97		ql = per_cpu(quicklist, cpu);
 98		for (q = ql; q < ql + CONFIG_NR_QUICK; q++)
 99			count += q->nr_pages;
100	}
101	return count;
102}
103