Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright IBM Corp. 2008
  4 *
  5 * Guest page hinting for unused pages.
  6 *
  7 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  8 */
  9
 10#include <linux/kernel.h>
 11#include <linux/errno.h>
 12#include <linux/types.h>
 13#include <linux/mm.h>
 14#include <linux/memblock.h>
 15#include <linux/gfp.h>
 16#include <linux/init.h>
 17#include <asm/facility.h>
 18#include <asm/page-states.h>
 
 
 
 19
 20static int cmma_flag = 1;
 21
 22static int __init cmma(char *str)
 23{
 24	bool enabled;
 25
 26	if (!kstrtobool(str, &enabled))
 27		cmma_flag = enabled;
 28	return 1;
 
 
 
 
 
 
 29}
 30__setup("cmma=", cmma);
 31
 32static inline int cmma_test_essa(void)
 33{
 34	register unsigned long tmp asm("0") = 0;
 35	register int rc asm("1");
 36
 37	/* test ESSA_GET_STATE */
 38	asm volatile(
 39		"	.insn	rrf,0xb9ab0000,%1,%1,%2,0\n"
 40		"0:     la      %0,0\n"
 41		"1:\n"
 42		EX_TABLE(0b,1b)
 43		: "=&d" (rc), "+&d" (tmp)
 44		: "i" (ESSA_GET_STATE), "0" (-EOPNOTSUPP));
 45	return rc;
 46}
 47
 48void __init cmma_init(void)
 49{
 50	if (!cmma_flag)
 51		return;
 52	if (cmma_test_essa()) {
 
 
 
 
 53		cmma_flag = 0;
 54		return;
 55	}
 56	if (test_facility(147))
 57		cmma_flag = 2;
 58}
 59
 60static inline unsigned char get_page_state(struct page *page)
 61{
 62	unsigned char state;
 63
 64	asm volatile("	.insn	rrf,0xb9ab0000,%0,%1,%2,0"
 65		     : "=&d" (state)
 66		     : "a" (page_to_phys(page)),
 67		       "i" (ESSA_GET_STATE));
 68	return state & 0x3f;
 69}
 70
 71static inline void set_page_unused(struct page *page, int order)
 72{
 73	int i, rc;
 74
 75	for (i = 0; i < (1 << order); i++)
 76		asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
 77			     : "=&d" (rc)
 78			     : "a" (page_to_phys(page + i)),
 79			       "i" (ESSA_SET_UNUSED));
 80}
 81
 82static inline void set_page_stable_dat(struct page *page, int order)
 83{
 84	int i, rc;
 85
 86	for (i = 0; i < (1 << order); i++)
 87		asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
 88			     : "=&d" (rc)
 89			     : "a" (page_to_phys(page + i)),
 90			       "i" (ESSA_SET_STABLE));
 91}
 92
 93static inline void set_page_stable_nodat(struct page *page, int order)
 94{
 95	int i, rc;
 96
 97	for (i = 0; i < (1 << order); i++)
 98		asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
 99			     : "=&d" (rc)
100			     : "a" (page_to_phys(page + i)),
101			       "i" (ESSA_SET_STABLE_NODAT));
102}
103
104static void mark_kernel_pmd(pud_t *pud, unsigned long addr, unsigned long end)
105{
106	unsigned long next;
107	struct page *page;
108	pmd_t *pmd;
109
110	pmd = pmd_offset(pud, addr);
111	do {
112		next = pmd_addr_end(addr, end);
113		if (pmd_none(*pmd) || pmd_large(*pmd))
114			continue;
115		page = virt_to_page(pmd_val(*pmd));
116		set_bit(PG_arch_1, &page->flags);
117	} while (pmd++, addr = next, addr != end);
118}
119
120static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end)
121{
122	unsigned long next;
123	struct page *page;
124	pud_t *pud;
125	int i;
126
127	pud = pud_offset(p4d, addr);
128	do {
129		next = pud_addr_end(addr, end);
130		if (pud_none(*pud) || pud_large(*pud))
131			continue;
132		if (!pud_folded(*pud)) {
133			page = virt_to_page(pud_val(*pud));
134			for (i = 0; i < 3; i++)
135				set_bit(PG_arch_1, &page[i].flags);
136		}
137		mark_kernel_pmd(pud, addr, next);
138	} while (pud++, addr = next, addr != end);
139}
140
141static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end)
142{
143	unsigned long next;
144	struct page *page;
145	p4d_t *p4d;
146	int i;
147
148	p4d = p4d_offset(pgd, addr);
149	do {
150		next = p4d_addr_end(addr, end);
151		if (p4d_none(*p4d))
152			continue;
153		if (!p4d_folded(*p4d)) {
154			page = virt_to_page(p4d_val(*p4d));
155			for (i = 0; i < 3; i++)
156				set_bit(PG_arch_1, &page[i].flags);
157		}
158		mark_kernel_pud(p4d, addr, next);
159	} while (p4d++, addr = next, addr != end);
160}
161
162static void mark_kernel_pgd(void)
163{
164	unsigned long addr, next;
165	struct page *page;
166	pgd_t *pgd;
167	int i;
168
169	addr = 0;
170	pgd = pgd_offset_k(addr);
171	do {
172		next = pgd_addr_end(addr, MODULES_END);
173		if (pgd_none(*pgd))
174			continue;
175		if (!pgd_folded(*pgd)) {
176			page = virt_to_page(pgd_val(*pgd));
177			for (i = 0; i < 3; i++)
178				set_bit(PG_arch_1, &page[i].flags);
179		}
180		mark_kernel_p4d(pgd, addr, next);
181	} while (pgd++, addr = next, addr != MODULES_END);
182}
183
184void __init cmma_init_nodat(void)
185{
186	struct memblock_region *reg;
187	struct page *page;
188	unsigned long start, end, ix;
189
190	if (cmma_flag < 2)
191		return;
192	/* Mark pages used in kernel page tables */
193	mark_kernel_pgd();
194
195	/* Set all kernel pages not used for page tables to stable/no-dat */
196	for_each_memblock(memory, reg) {
197		start = memblock_region_memory_base_pfn(reg);
198		end = memblock_region_memory_end_pfn(reg);
199		page = pfn_to_page(start);
200		for (ix = start; ix < end; ix++, page++) {
201			if (__test_and_clear_bit(PG_arch_1, &page->flags))
202				continue;	/* skip page table pages */
203			if (!list_empty(&page->lru))
204				continue;	/* skip free pages */
205			set_page_stable_nodat(page, 0);
206		}
207	}
208}
209
210void arch_free_page(struct page *page, int order)
211{
212	if (!cmma_flag)
213		return;
214	set_page_unused(page, order);
215}
216
217void arch_alloc_page(struct page *page, int order)
218{
219	if (!cmma_flag)
220		return;
221	if (cmma_flag < 2)
222		set_page_stable_dat(page, order);
223	else
224		set_page_stable_nodat(page, order);
225}
226
227void arch_set_page_dat(struct page *page, int order)
228{
229	if (!cmma_flag)
230		return;
231	set_page_stable_dat(page, order);
232}
233
234void arch_set_page_nodat(struct page *page, int order)
235{
236	if (cmma_flag < 2)
237		return;
238	set_page_stable_nodat(page, order);
239}
240
241int arch_test_page_nodat(struct page *page)
242{
243	unsigned char state;
244
245	if (cmma_flag < 2)
246		return 0;
247	state = get_page_state(page);
248	return !!(state & 0x20);
249}
250
251void arch_set_page_states(int make_stable)
252{
253	unsigned long flags, order, t;
254	struct list_head *l;
255	struct page *page;
256	struct zone *zone;
257
258	if (!cmma_flag)
259		return;
260	if (make_stable)
261		drain_local_pages(NULL);
262	for_each_populated_zone(zone) {
263		spin_lock_irqsave(&zone->lock, flags);
264		for_each_migratetype_order(order, t) {
265			list_for_each(l, &zone->free_area[order].free_list[t]) {
266				page = list_entry(l, struct page, lru);
267				if (make_stable)
268					set_page_stable_dat(page, order);
269				else
270					set_page_unused(page, order);
271			}
272		}
273		spin_unlock_irqrestore(&zone->lock, flags);
274	}
275}
v3.15
 
  1/*
  2 * Copyright IBM Corp. 2008
  3 *
  4 * Guest page hinting for unused pages.
  5 *
  6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  7 */
  8
  9#include <linux/kernel.h>
 10#include <linux/errno.h>
 11#include <linux/types.h>
 12#include <linux/mm.h>
 
 13#include <linux/gfp.h>
 14#include <linux/init.h>
 15#include <asm/setup.h>
 16#include <asm/ipl.h>
 17
 18#define ESSA_SET_STABLE		1
 19#define ESSA_SET_UNUSED		2
 20
 21static int cmma_flag = 1;
 22
 23static int __init cmma(char *str)
 24{
 25	char *parm;
 26
 27	parm = strstrip(str);
 28	if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) {
 29		cmma_flag = 1;
 30		return 1;
 31	}
 32	cmma_flag = 0;
 33	if (strcmp(parm, "no") == 0 || strcmp(parm, "off") == 0)
 34		return 1;
 35	return 0;
 36}
 37__setup("cmma=", cmma);
 38
 39void __init cmma_init(void)
 40{
 41	register unsigned long tmp asm("0") = 0;
 42	register int rc asm("1") = -EOPNOTSUPP;
 
 
 
 
 
 
 
 
 
 
 
 43
 
 
 44	if (!cmma_flag)
 45		return;
 46	/*
 47	 * Disable CMM for dump, otherwise  the tprot based memory
 48	 * detection can fail because of unstable pages.
 49	 */
 50	if (OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP) {
 51		cmma_flag = 0;
 52		return;
 53	}
 54	asm volatile(
 55		"       .insn rrf,0xb9ab0000,%1,%1,0,0\n"
 56		"0:     la      %0,0\n"
 57		"1:\n"
 58		EX_TABLE(0b,1b)
 59		: "+&d" (rc), "+&d" (tmp));
 60	if (rc)
 61		cmma_flag = 0;
 
 
 
 
 
 62}
 63
 64static inline void set_page_unstable(struct page *page, int order)
 65{
 66	int i, rc;
 67
 68	for (i = 0; i < (1 << order); i++)
 69		asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
 70			     : "=&d" (rc)
 71			     : "a" (page_to_phys(page + i)),
 72			       "i" (ESSA_SET_UNUSED));
 73}
 74
 75void arch_free_page(struct page *page, int order)
 76{
 77	if (!cmma_flag)
 78		return;
 79	set_page_unstable(page, order);
 
 
 
 
 80}
 81
 82static inline void set_page_stable(struct page *page, int order)
 83{
 84	int i, rc;
 85
 86	for (i = 0; i < (1 << order); i++)
 87		asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
 88			     : "=&d" (rc)
 89			     : "a" (page_to_phys(page + i)),
 90			       "i" (ESSA_SET_STABLE));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 91}
 92
 93void arch_alloc_page(struct page *page, int order)
 94{
 95	if (!cmma_flag)
 96		return;
 97	set_page_stable(page, order);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 98}
 99
100void arch_set_page_states(int make_stable)
101{
102	unsigned long flags, order, t;
103	struct list_head *l;
104	struct page *page;
105	struct zone *zone;
106
107	if (!cmma_flag)
108		return;
109	if (make_stable)
110		drain_local_pages(NULL);
111	for_each_populated_zone(zone) {
112		spin_lock_irqsave(&zone->lock, flags);
113		for_each_migratetype_order(order, t) {
114			list_for_each(l, &zone->free_area[order].free_list[t]) {
115				page = list_entry(l, struct page, lru);
116				if (make_stable)
117					set_page_stable(page, order);
118				else
119					set_page_unstable(page, order);
120			}
121		}
122		spin_unlock_irqrestore(&zone->lock, flags);
123	}
124}