Linux Audio

Check our new training course

Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 *  S390 version
  4 *    Copyright IBM Corp. 1999, 2000
  5 *    Author(s): Hartmut Penner (hp@de.ibm.com)
  6 *               Martin Schwidefsky (schwidefsky@de.ibm.com)
  7 *
  8 *  Derived from "include/asm-i386/pgalloc.h"
  9 *    Copyright (C) 1994  Linus Torvalds
 10 */
 11
 12#ifndef _S390_PGALLOC_H
 13#define _S390_PGALLOC_H
 14
 15#include <linux/threads.h>
 16#include <linux/string.h>
 17#include <linux/gfp.h>
 18#include <linux/mm.h>
 19
 20#define CRST_ALLOC_ORDER 2
 21
 22unsigned long *crst_table_alloc(struct mm_struct *);
 23void crst_table_free(struct mm_struct *, unsigned long *);
 24
 25unsigned long *page_table_alloc(struct mm_struct *);
 26struct page *page_table_alloc_pgste(struct mm_struct *mm);
 27void page_table_free(struct mm_struct *, unsigned long *);
 28void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
 29void page_table_free_pgste(struct page *page);
 30extern int page_table_allocate_pgste;
 31
 32static inline void crst_table_init(unsigned long *crst, unsigned long entry)
 33{
 34	memset64((u64 *)crst, entry, _CRST_ENTRIES);
 35}
 36
 37int crst_table_upgrade(struct mm_struct *mm, unsigned long limit);
 38
 39static inline unsigned long check_asce_limit(struct mm_struct *mm, unsigned long addr,
 40					     unsigned long len)
 41{
 42	int rc;
 43
 44	if (addr + len > mm->context.asce_limit &&
 45	    addr + len <= TASK_SIZE) {
 46		rc = crst_table_upgrade(mm, addr + len);
 47		if (rc)
 48			return (unsigned long) rc;
 49	}
 50	return addr;
 51}
 52
 
 
 
 53static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address)
 54{
 55	unsigned long *table = crst_table_alloc(mm);
 56
 57	if (table)
 58		crst_table_init(table, _REGION2_ENTRY_EMPTY);
 59	return (p4d_t *) table;
 60}
 61
 62static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
 63{
 64	if (!mm_p4d_folded(mm))
 65		crst_table_free(mm, (unsigned long *) p4d);
 66}
 67
 68static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
 69{
 70	unsigned long *table = crst_table_alloc(mm);
 71	if (table)
 72		crst_table_init(table, _REGION3_ENTRY_EMPTY);
 73	return (pud_t *) table;
 74}
 75
 76static inline void pud_free(struct mm_struct *mm, pud_t *pud)
 77{
 78	if (!mm_pud_folded(mm))
 79		crst_table_free(mm, (unsigned long *) pud);
 80}
 81
 82static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
 83{
 84	unsigned long *table = crst_table_alloc(mm);
 85
 86	if (!table)
 87		return NULL;
 88	crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
 89	if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
 90		crst_table_free(mm, table);
 91		return NULL;
 92	}
 93	return (pmd_t *) table;
 94}
 95
 96static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
 97{
 98	if (mm_pmd_folded(mm))
 99		return;
100	pgtable_pmd_page_dtor(virt_to_page(pmd));
101	crst_table_free(mm, (unsigned long *) pmd);
102}
103
104static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
105{
106	set_pgd(pgd, __pgd(_REGION1_ENTRY | __pa(p4d)));
107}
108
109static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
110{
111	set_p4d(p4d, __p4d(_REGION2_ENTRY | __pa(pud)));
112}
113
114static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
115{
116	set_pud(pud, __pud(_REGION3_ENTRY | __pa(pmd)));
117}
118
119static inline pgd_t *pgd_alloc(struct mm_struct *mm)
120{
121	return (pgd_t *) crst_table_alloc(mm);
 
 
 
 
 
 
 
 
 
 
 
122}
123
124static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
125{
 
 
126	crst_table_free(mm, (unsigned long *) pgd);
127}
128
129static inline void pmd_populate(struct mm_struct *mm,
130				pmd_t *pmd, pgtable_t pte)
131{
132	set_pmd(pmd, __pmd(_SEGMENT_ENTRY | __pa(pte)));
133}
134
135#define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
136
 
 
 
137/*
138 * page table entry allocation/free routines.
139 */
140#define pte_alloc_one_kernel(mm) ((pte_t *)page_table_alloc(mm))
141#define pte_alloc_one(mm) ((pte_t *)page_table_alloc(mm))
142
143#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
144#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
 
 
145
146void vmem_map_init(void);
147void *vmem_crst_alloc(unsigned long val);
148pte_t *vmem_pte_alloc(void);
149
150unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages);
151void base_asce_free(unsigned long asce);
152
153#endif /* _S390_PGALLOC_H */
v5.4
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 *  S390 version
  4 *    Copyright IBM Corp. 1999, 2000
  5 *    Author(s): Hartmut Penner (hp@de.ibm.com)
  6 *               Martin Schwidefsky (schwidefsky@de.ibm.com)
  7 *
  8 *  Derived from "include/asm-i386/pgalloc.h"
  9 *    Copyright (C) 1994  Linus Torvalds
 10 */
 11
 12#ifndef _S390_PGALLOC_H
 13#define _S390_PGALLOC_H
 14
 15#include <linux/threads.h>
 16#include <linux/string.h>
 17#include <linux/gfp.h>
 18#include <linux/mm.h>
 19
 20#define CRST_ALLOC_ORDER 2
 21
 22unsigned long *crst_table_alloc(struct mm_struct *);
 23void crst_table_free(struct mm_struct *, unsigned long *);
 24
 25unsigned long *page_table_alloc(struct mm_struct *);
 26struct page *page_table_alloc_pgste(struct mm_struct *mm);
 27void page_table_free(struct mm_struct *, unsigned long *);
 28void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
 29void page_table_free_pgste(struct page *page);
 30extern int page_table_allocate_pgste;
 31
 32static inline void crst_table_init(unsigned long *crst, unsigned long entry)
 33{
 34	memset64((u64 *)crst, entry, _CRST_ENTRIES);
 35}
 36
 37static inline unsigned long pgd_entry_type(struct mm_struct *mm)
 
 
 
 38{
 39	if (mm_pmd_folded(mm))
 40		return _SEGMENT_ENTRY_EMPTY;
 41	if (mm_pud_folded(mm))
 42		return _REGION3_ENTRY_EMPTY;
 43	if (mm_p4d_folded(mm))
 44		return _REGION2_ENTRY_EMPTY;
 45	return _REGION1_ENTRY_EMPTY;
 
 
 46}
 47
 48int crst_table_upgrade(struct mm_struct *mm, unsigned long limit);
 49void crst_table_downgrade(struct mm_struct *);
 50
 51static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address)
 52{
 53	unsigned long *table = crst_table_alloc(mm);
 54
 55	if (table)
 56		crst_table_init(table, _REGION2_ENTRY_EMPTY);
 57	return (p4d_t *) table;
 58}
 59#define p4d_free(mm, p4d) crst_table_free(mm, (unsigned long *) p4d)
 
 
 
 
 
 60
 61static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
 62{
 63	unsigned long *table = crst_table_alloc(mm);
 64	if (table)
 65		crst_table_init(table, _REGION3_ENTRY_EMPTY);
 66	return (pud_t *) table;
 67}
 68#define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud)
 
 
 
 
 
 69
 70static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
 71{
 72	unsigned long *table = crst_table_alloc(mm);
 73
 74	if (!table)
 75		return NULL;
 76	crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
 77	if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
 78		crst_table_free(mm, table);
 79		return NULL;
 80	}
 81	return (pmd_t *) table;
 82}
 83
 84static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
 85{
 
 
 86	pgtable_pmd_page_dtor(virt_to_page(pmd));
 87	crst_table_free(mm, (unsigned long *) pmd);
 88}
 89
 90static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
 91{
 92	pgd_val(*pgd) = _REGION1_ENTRY | __pa(p4d);
 93}
 94
 95static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
 96{
 97	p4d_val(*p4d) = _REGION2_ENTRY | __pa(pud);
 98}
 99
100static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
101{
102	pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
103}
104
105static inline pgd_t *pgd_alloc(struct mm_struct *mm)
106{
107	unsigned long *table = crst_table_alloc(mm);
108
109	if (!table)
110		return NULL;
111	if (mm->context.asce_limit == _REGION3_SIZE) {
112		/* Forking a compat process with 2 page table levels */
113		if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
114			crst_table_free(mm, table);
115			return NULL;
116		}
117	}
118	return (pgd_t *) table;
119}
120
121static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
122{
123	if (mm->context.asce_limit == _REGION3_SIZE)
124		pgtable_pmd_page_dtor(virt_to_page(pgd));
125	crst_table_free(mm, (unsigned long *) pgd);
126}
127
128static inline void pmd_populate(struct mm_struct *mm,
129				pmd_t *pmd, pgtable_t pte)
130{
131	pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
132}
133
134#define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
135
136#define pmd_pgtable(pmd) \
137	(pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)
138
139/*
140 * page table entry allocation/free routines.
141 */
142#define pte_alloc_one_kernel(mm) ((pte_t *)page_table_alloc(mm))
143#define pte_alloc_one(mm) ((pte_t *)page_table_alloc(mm))
144
145#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
146#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
147
148extern void rcu_table_freelist_finish(void);
149
150void vmem_map_init(void);
151void *vmem_crst_alloc(unsigned long val);
152pte_t *vmem_pte_alloc(void);
153
154unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages);
155void base_asce_free(unsigned long asce);
156
157#endif /* _S390_PGALLOC_H */