Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 * mm/percpu-km.c - kernel memory based chunk allocation
  3 *
  4 * Copyright (C) 2010		SUSE Linux Products GmbH
  5 * Copyright (C) 2010		Tejun Heo <tj@kernel.org>
  6 *
  7 * This file is released under the GPLv2.
  8 *
  9 * Chunks are allocated as a contiguous kernel memory using gfp
 10 * allocation.  This is to be used on nommu architectures.
 11 *
 12 * To use percpu-km,
 13 *
 14 * - define CONFIG_NEED_PER_CPU_KM from the arch Kconfig.
 15 *
 16 * - CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK must not be defined.  It's
 17 *   not compatible with PER_CPU_KM.  EMBED_FIRST_CHUNK should work
 18 *   fine.
 19 *
 20 * - NUMA is not supported.  When setting up the first chunk,
 21 *   @cpu_distance_fn should be NULL or report all CPUs to be nearer
 22 *   than or at LOCAL_DISTANCE.
 23 *
 24 * - It's best if the chunk size is power of two multiple of
 25 *   PAGE_SIZE.  Because each chunk is allocated as a contiguous
 26 *   kernel memory block using alloc_pages(), memory will be wasted if
 27 *   chunk size is not aligned.  percpu-km code will whine about it.
 28 */
 29
 30#if defined(CONFIG_SMP) && defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
 31#error "contiguous percpu allocation is incompatible with paged first chunk"
 32#endif
 33
 34#include <linux/log2.h>
 35
 
 
 
 
 
 
 36static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
 37			       int page_start, int page_end, gfp_t gfp)
 38{
 39	return 0;
 40}
 41
 42static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
 43				  int page_start, int page_end)
 44{
 45	/* nada */
 46}
 47
 48static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp)
 49{
 50	const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT;
 51	struct pcpu_chunk *chunk;
 52	struct page *pages;
 
 53	int i;
 54
 55	chunk = pcpu_alloc_chunk(gfp);
 56	if (!chunk)
 57		return NULL;
 58
 59	pages = alloc_pages(gfp, order_base_2(nr_pages));
 60	if (!pages) {
 61		pcpu_free_chunk(chunk);
 62		return NULL;
 63	}
 64
 65	for (i = 0; i < nr_pages; i++)
 66		pcpu_set_page_chunk(nth_page(pages, i), chunk);
 67
 68	chunk->data = pages;
 69	chunk->base_addr = page_address(pages) - pcpu_group_offsets[0];
 70
 71	spin_lock_irq(&pcpu_lock);
 72	pcpu_chunk_populated(chunk, 0, nr_pages, false);
 73	spin_unlock_irq(&pcpu_lock);
 74
 75	pcpu_stats_chunk_alloc();
 76	trace_percpu_create_chunk(chunk->base_addr);
 77
 78	return chunk;
 79}
 80
 81static void pcpu_destroy_chunk(struct pcpu_chunk *chunk)
 82{
 83	const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT;
 84
 85	if (!chunk)
 86		return;
 87
 88	pcpu_stats_chunk_dealloc();
 89	trace_percpu_destroy_chunk(chunk->base_addr);
 90
 91	if (chunk->data)
 92		__free_pages(chunk->data, order_base_2(nr_pages));
 93	pcpu_free_chunk(chunk);
 94}
 95
 96static struct page *pcpu_addr_to_page(void *addr)
 97{
 98	return virt_to_page(addr);
 99}
100
101static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai)
102{
103	size_t nr_pages, alloc_pages;
104
105	/* all units must be in a single group */
106	if (ai->nr_groups != 1) {
107		pr_crit("can't handle more than one group\n");
108		return -EINVAL;
109	}
110
111	nr_pages = (ai->groups[0].nr_units * ai->unit_size) >> PAGE_SHIFT;
112	alloc_pages = roundup_pow_of_two(nr_pages);
113
114	if (alloc_pages > nr_pages)
115		pr_warn("wasting %zu pages per chunk\n",
116			alloc_pages - nr_pages);
117
118	return 0;
 
 
 
 
 
119}
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * mm/percpu-km.c - kernel memory based chunk allocation
  4 *
  5 * Copyright (C) 2010		SUSE Linux Products GmbH
  6 * Copyright (C) 2010		Tejun Heo <tj@kernel.org>
  7 *
 
 
  8 * Chunks are allocated as a contiguous kernel memory using gfp
  9 * allocation.  This is to be used on nommu architectures.
 10 *
 11 * To use percpu-km,
 12 *
 13 * - define CONFIG_NEED_PER_CPU_KM from the arch Kconfig.
 14 *
 15 * - CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK must not be defined.  It's
 16 *   not compatible with PER_CPU_KM.  EMBED_FIRST_CHUNK should work
 17 *   fine.
 18 *
 19 * - NUMA is not supported.  When setting up the first chunk,
 20 *   @cpu_distance_fn should be NULL or report all CPUs to be nearer
 21 *   than or at LOCAL_DISTANCE.
 22 *
 23 * - It's best if the chunk size is power of two multiple of
 24 *   PAGE_SIZE.  Because each chunk is allocated as a contiguous
 25 *   kernel memory block using alloc_pages(), memory will be wasted if
 26 *   chunk size is not aligned.  percpu-km code will whine about it.
 27 */
 28
 29#if defined(CONFIG_SMP) && defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
 30#error "contiguous percpu allocation is incompatible with paged first chunk"
 31#endif
 32
 33#include <linux/log2.h>
 34
 35static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
 36				      int page_start, int page_end)
 37{
 38	/* nothing */
 39}
 40
 41static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
 42			       int page_start, int page_end, gfp_t gfp)
 43{
 44	return 0;
 45}
 46
 47static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
 48				  int page_start, int page_end)
 49{
 50	/* nada */
 51}
 52
 53static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp)
 54{
 55	const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT;
 56	struct pcpu_chunk *chunk;
 57	struct page *pages;
 58	unsigned long flags;
 59	int i;
 60
 61	chunk = pcpu_alloc_chunk(gfp);
 62	if (!chunk)
 63		return NULL;
 64
 65	pages = alloc_pages(gfp, order_base_2(nr_pages));
 66	if (!pages) {
 67		pcpu_free_chunk(chunk);
 68		return NULL;
 69	}
 70
 71	for (i = 0; i < nr_pages; i++)
 72		pcpu_set_page_chunk(nth_page(pages, i), chunk);
 73
 74	chunk->data = pages;
 75	chunk->base_addr = page_address(pages);
 76
 77	spin_lock_irqsave(&pcpu_lock, flags);
 78	pcpu_chunk_populated(chunk, 0, nr_pages);
 79	spin_unlock_irqrestore(&pcpu_lock, flags);
 80
 81	pcpu_stats_chunk_alloc();
 82	trace_percpu_create_chunk(chunk->base_addr);
 83
 84	return chunk;
 85}
 86
 87static void pcpu_destroy_chunk(struct pcpu_chunk *chunk)
 88{
 89	const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT;
 90
 91	if (!chunk)
 92		return;
 93
 94	pcpu_stats_chunk_dealloc();
 95	trace_percpu_destroy_chunk(chunk->base_addr);
 96
 97	if (chunk->data)
 98		__free_pages(chunk->data, order_base_2(nr_pages));
 99	pcpu_free_chunk(chunk);
100}
101
102static struct page *pcpu_addr_to_page(void *addr)
103{
104	return virt_to_page(addr);
105}
106
107static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai)
108{
109	size_t nr_pages, alloc_pages;
110
111	/* all units must be in a single group */
112	if (ai->nr_groups != 1) {
113		pr_crit("can't handle more than one group\n");
114		return -EINVAL;
115	}
116
117	nr_pages = (ai->groups[0].nr_units * ai->unit_size) >> PAGE_SHIFT;
118	alloc_pages = roundup_pow_of_two(nr_pages);
119
120	if (alloc_pages > nr_pages)
121		pr_warn("wasting %zu pages per chunk\n",
122			alloc_pages - nr_pages);
123
124	return 0;
125}
126
127static bool pcpu_should_reclaim_chunk(struct pcpu_chunk *chunk)
128{
129	return false;
130}