Linux Audio

Check our new training course

Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 *  S390 version
  4 *    Copyright IBM Corp. 1999, 2000
  5 *    Author(s): Hartmut Penner (hp@de.ibm.com)
  6 *               Martin Schwidefsky (schwidefsky@de.ibm.com)
  7 *
  8 *  Derived from "include/asm-i386/pgalloc.h"
  9 *    Copyright (C) 1994  Linus Torvalds
 10 */
 11
 12#ifndef _S390_PGALLOC_H
 13#define _S390_PGALLOC_H
 14
 15#include <linux/threads.h>
 16#include <linux/string.h>
 17#include <linux/gfp.h>
 18#include <linux/mm.h>
 19
 20#define CRST_ALLOC_ORDER 2
 21
 22unsigned long *crst_table_alloc(struct mm_struct *);
 23void crst_table_free(struct mm_struct *, unsigned long *);
 24
 25unsigned long *page_table_alloc(struct mm_struct *);
 26struct page *page_table_alloc_pgste(struct mm_struct *mm);
 27void page_table_free(struct mm_struct *, unsigned long *);
 28void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
 29void page_table_free_pgste(struct page *page);
 30extern int page_table_allocate_pgste;
 31
 32static inline void crst_table_init(unsigned long *crst, unsigned long entry)
 33{
 34	memset64((u64 *)crst, entry, _CRST_ENTRIES);
 35}
 36
 37int crst_table_upgrade(struct mm_struct *mm, unsigned long limit);
 38
 39static inline unsigned long check_asce_limit(struct mm_struct *mm, unsigned long addr,
 40					     unsigned long len)
 41{
 42	int rc;
 
 43
 44	if (addr + len > mm->context.asce_limit &&
 45	    addr + len <= TASK_SIZE) {
 46		rc = crst_table_upgrade(mm, addr + len);
 47		if (rc)
 48			return (unsigned long) rc;
 
 
 49	}
 50	return addr;
 51}
 52
 53static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address)
 54{
 55	unsigned long *table = crst_table_alloc(mm);
 56
 57	if (table)
 58		crst_table_init(table, _REGION2_ENTRY_EMPTY);
 59	return (p4d_t *) table;
 60}
 61
 62static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
 63{
 64	if (!mm_p4d_folded(mm))
 65		crst_table_free(mm, (unsigned long *) p4d);
 
 
 
 66}
 67
 
 
 
 68static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
 69{
 70	unsigned long *table = crst_table_alloc(mm);
 71	if (table)
 72		crst_table_init(table, _REGION3_ENTRY_EMPTY);
 73	return (pud_t *) table;
 74}
 75
 76static inline void pud_free(struct mm_struct *mm, pud_t *pud)
 77{
 78	if (!mm_pud_folded(mm))
 79		crst_table_free(mm, (unsigned long *) pud);
 80}
 81
 82static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
 83{
 84	unsigned long *table = crst_table_alloc(mm);
 85
 86	if (!table)
 87		return NULL;
 88	crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
 89	if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
 90		crst_table_free(mm, table);
 91		return NULL;
 92	}
 93	return (pmd_t *) table;
 94}
 95
 96static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
 97{
 98	if (mm_pmd_folded(mm))
 99		return;
100	pgtable_pmd_page_dtor(virt_to_page(pmd));
101	crst_table_free(mm, (unsigned long *) pmd);
102}
103
104static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
105{
106	set_pgd(pgd, __pgd(_REGION1_ENTRY | __pa(p4d)));
107}
108
109static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
110{
111	set_p4d(p4d, __p4d(_REGION2_ENTRY | __pa(pud)));
112}
113
114static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
115{
116	set_pud(pud, __pud(_REGION3_ENTRY | __pa(pmd)));
117}
118
119static inline pgd_t *pgd_alloc(struct mm_struct *mm)
120{
121	return (pgd_t *) crst_table_alloc(mm);
 
 
 
 
 
 
 
 
 
 
 
122}
123
124static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
125{
 
 
126	crst_table_free(mm, (unsigned long *) pgd);
127}
128
129static inline void pmd_populate(struct mm_struct *mm,
130				pmd_t *pmd, pgtable_t pte)
131{
132	set_pmd(pmd, __pmd(_SEGMENT_ENTRY | __pa(pte)));
133}
134
135#define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
136
 
 
 
137/*
138 * page table entry allocation/free routines.
139 */
140#define pte_alloc_one_kernel(mm) ((pte_t *)page_table_alloc(mm))
141#define pte_alloc_one(mm) ((pte_t *)page_table_alloc(mm))
142
143#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
144#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
145
146void vmem_map_init(void);
147void *vmem_crst_alloc(unsigned long val);
148pte_t *vmem_pte_alloc(void);
149
150unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages);
151void base_asce_free(unsigned long asce);
152
153#endif /* _S390_PGALLOC_H */
v4.10.11
 
  1/*
  2 *  S390 version
  3 *    Copyright IBM Corp. 1999, 2000
  4 *    Author(s): Hartmut Penner (hp@de.ibm.com)
  5 *               Martin Schwidefsky (schwidefsky@de.ibm.com)
  6 *
  7 *  Derived from "include/asm-i386/pgalloc.h"
  8 *    Copyright (C) 1994  Linus Torvalds
  9 */
 10
 11#ifndef _S390_PGALLOC_H
 12#define _S390_PGALLOC_H
 13
 14#include <linux/threads.h>
 
 15#include <linux/gfp.h>
 16#include <linux/mm.h>
 17
 
 
 18unsigned long *crst_table_alloc(struct mm_struct *);
 19void crst_table_free(struct mm_struct *, unsigned long *);
 20
 21unsigned long *page_table_alloc(struct mm_struct *);
 22struct page *page_table_alloc_pgste(struct mm_struct *mm);
 23void page_table_free(struct mm_struct *, unsigned long *);
 24void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
 25void page_table_free_pgste(struct page *page);
 26extern int page_table_allocate_pgste;
 27
 28static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
 
 
 
 
 
 
 
 
 29{
 30	struct addrtype { char _[256]; };
 31	int i;
 32
 33	for (i = 0; i < n; i += 256) {
 34		*s = val;
 35		asm volatile(
 36			"mvc	8(248,%[s]),0(%[s])\n"
 37			: "+m" (*(struct addrtype *) s)
 38			: [s] "a" (s));
 39		s += 256 / sizeof(long);
 40	}
 
 41}
 42
 43static inline void crst_table_init(unsigned long *crst, unsigned long entry)
 44{
 45	clear_table(crst, entry, sizeof(unsigned long)*2048);
 
 
 
 
 46}
 47
 48static inline unsigned long pgd_entry_type(struct mm_struct *mm)
 49{
 50	if (mm->context.asce_limit <= (1UL << 31))
 51		return _SEGMENT_ENTRY_EMPTY;
 52	if (mm->context.asce_limit <= (1UL << 42))
 53		return _REGION3_ENTRY_EMPTY;
 54	return _REGION2_ENTRY_EMPTY;
 55}
 56
 57int crst_table_upgrade(struct mm_struct *);
 58void crst_table_downgrade(struct mm_struct *);
 59
 60static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
 61{
 62	unsigned long *table = crst_table_alloc(mm);
 63	if (table)
 64		crst_table_init(table, _REGION3_ENTRY_EMPTY);
 65	return (pud_t *) table;
 66}
 67#define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud)
 
 
 
 
 
 68
 69static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
 70{
 71	unsigned long *table = crst_table_alloc(mm);
 72
 73	if (!table)
 74		return NULL;
 75	crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
 76	if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
 77		crst_table_free(mm, table);
 78		return NULL;
 79	}
 80	return (pmd_t *) table;
 81}
 82
 83static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
 84{
 
 
 85	pgtable_pmd_page_dtor(virt_to_page(pmd));
 86	crst_table_free(mm, (unsigned long *) pmd);
 87}
 88
 89static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
 
 
 
 
 
 90{
 91	pgd_val(*pgd) = _REGION2_ENTRY | __pa(pud);
 92}
 93
 94static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
 95{
 96	pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
 97}
 98
 99static inline pgd_t *pgd_alloc(struct mm_struct *mm)
100{
101	unsigned long *table = crst_table_alloc(mm);
102
103	if (!table)
104		return NULL;
105	if (mm->context.asce_limit == (1UL << 31)) {
106		/* Forking a compat process with 2 page table levels */
107		if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
108			crst_table_free(mm, table);
109			return NULL;
110		}
111	}
112	return (pgd_t *) table;
113}
114
115static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
116{
117	if (mm->context.asce_limit == (1UL << 31))
118		pgtable_pmd_page_dtor(virt_to_page(pgd));
119	crst_table_free(mm, (unsigned long *) pgd);
120}
121
122static inline void pmd_populate(struct mm_struct *mm,
123				pmd_t *pmd, pgtable_t pte)
124{
125	pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
126}
127
128#define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
129
130#define pmd_pgtable(pmd) \
131	(pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)
132
133/*
134 * page table entry allocation/free routines.
135 */
136#define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
137#define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
138
139#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
140#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
141
142extern void rcu_table_freelist_finish(void);
 
 
 
 
 
143
144#endif /* _S390_PGALLOC_H */