Linux Audio

Check our new training course

Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 *  S390 version
  4 *    Copyright IBM Corp. 1999, 2000
  5 *    Author(s): Hartmut Penner (hp@de.ibm.com)
  6 *               Martin Schwidefsky (schwidefsky@de.ibm.com)
  7 *
  8 *  Derived from "include/asm-i386/pgalloc.h"
  9 *    Copyright (C) 1994  Linus Torvalds
 10 */
 11
 12#ifndef _S390_PGALLOC_H
 13#define _S390_PGALLOC_H
 14
 15#include <linux/threads.h>
 16#include <linux/string.h>
 17#include <linux/gfp.h>
 18#include <linux/mm.h>
 19
 20#define CRST_ALLOC_ORDER 2
 21
 22unsigned long *crst_table_alloc(struct mm_struct *);
 23void crst_table_free(struct mm_struct *, unsigned long *);
 24
 25unsigned long *page_table_alloc(struct mm_struct *);
 26struct page *page_table_alloc_pgste(struct mm_struct *mm);
 27void page_table_free(struct mm_struct *, unsigned long *);
 28void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
 29void page_table_free_pgste(struct page *page);
 30extern int page_table_allocate_pgste;
 31
 32static inline void crst_table_init(unsigned long *crst, unsigned long entry)
 33{
 34	memset64((u64 *)crst, entry, _CRST_ENTRIES);
 35}
 36
 37int crst_table_upgrade(struct mm_struct *mm, unsigned long limit);
 38
 39static inline unsigned long check_asce_limit(struct mm_struct *mm, unsigned long addr,
 40					     unsigned long len)
 41{
 42	int rc;
 43
 44	if (addr + len > mm->context.asce_limit &&
 45	    addr + len <= TASK_SIZE) {
 46		rc = crst_table_upgrade(mm, addr + len);
 47		if (rc)
 48			return (unsigned long) rc;
 49	}
 50	return addr;
 51}
 52
 53static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address)
 54{
 55	unsigned long *table = crst_table_alloc(mm);
 56
 57	if (table)
 58		crst_table_init(table, _REGION2_ENTRY_EMPTY);
 59	return (p4d_t *) table;
 60}
 61
 62static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
 63{
 64	if (!mm_p4d_folded(mm))
 65		crst_table_free(mm, (unsigned long *) p4d);
 
 
 
 66}
 67
 
 
 
 68static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
 69{
 70	unsigned long *table = crst_table_alloc(mm);
 71	if (table)
 72		crst_table_init(table, _REGION3_ENTRY_EMPTY);
 73	return (pud_t *) table;
 74}
 75
 76static inline void pud_free(struct mm_struct *mm, pud_t *pud)
 77{
 78	if (!mm_pud_folded(mm))
 79		crst_table_free(mm, (unsigned long *) pud);
 80}
 81
 82static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
 83{
 84	unsigned long *table = crst_table_alloc(mm);
 85
 86	if (!table)
 87		return NULL;
 88	crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
 89	if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
 90		crst_table_free(mm, table);
 91		return NULL;
 92	}
 93	return (pmd_t *) table;
 94}
 95
 96static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
 97{
 98	if (mm_pmd_folded(mm))
 99		return;
100	pgtable_pmd_page_dtor(virt_to_page(pmd));
101	crst_table_free(mm, (unsigned long *) pmd);
102}
103
104static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
105{
106	set_pgd(pgd, __pgd(_REGION1_ENTRY | __pa(p4d)));
107}
108
109static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
110{
111	set_p4d(p4d, __p4d(_REGION2_ENTRY | __pa(pud)));
112}
113
114static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
115{
116	set_pud(pud, __pud(_REGION3_ENTRY | __pa(pmd)));
117}
118
119static inline pgd_t *pgd_alloc(struct mm_struct *mm)
120{
121	return (pgd_t *) crst_table_alloc(mm);
 
 
 
 
 
 
 
 
 
 
 
122}
123
124static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
125{
 
 
126	crst_table_free(mm, (unsigned long *) pgd);
127}
128
129static inline void pmd_populate(struct mm_struct *mm,
130				pmd_t *pmd, pgtable_t pte)
131{
132	set_pmd(pmd, __pmd(_SEGMENT_ENTRY | __pa(pte)));
133}
134
135#define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
136
 
 
 
137/*
138 * page table entry allocation/free routines.
139 */
140#define pte_alloc_one_kernel(mm) ((pte_t *)page_table_alloc(mm))
141#define pte_alloc_one(mm) ((pte_t *)page_table_alloc(mm))
142
143#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
144#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
145
146void vmem_map_init(void);
147void *vmem_crst_alloc(unsigned long val);
148pte_t *vmem_pte_alloc(void);
149
150unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages);
151void base_asce_free(unsigned long asce);
152
153#endif /* _S390_PGALLOC_H */
v4.6
 
  1/*
  2 *  S390 version
  3 *    Copyright IBM Corp. 1999, 2000
  4 *    Author(s): Hartmut Penner (hp@de.ibm.com)
  5 *               Martin Schwidefsky (schwidefsky@de.ibm.com)
  6 *
  7 *  Derived from "include/asm-i386/pgalloc.h"
  8 *    Copyright (C) 1994  Linus Torvalds
  9 */
 10
 11#ifndef _S390_PGALLOC_H
 12#define _S390_PGALLOC_H
 13
 14#include <linux/threads.h>
 
 15#include <linux/gfp.h>
 16#include <linux/mm.h>
 17
 
 
 18unsigned long *crst_table_alloc(struct mm_struct *);
 19void crst_table_free(struct mm_struct *, unsigned long *);
 20
 21unsigned long *page_table_alloc(struct mm_struct *);
 
 22void page_table_free(struct mm_struct *, unsigned long *);
 23void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
 
 24extern int page_table_allocate_pgste;
 25
 26static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
 27{
 28	typedef struct { char _[n]; } addrtype;
 
 
 
 29
 30	*s = val;
 31	n = (n / 256) - 1;
 32	asm volatile(
 33		"	mvc	8(248,%0),0(%0)\n"
 34		"0:	mvc	256(256,%0),0(%0)\n"
 35		"	la	%0,256(%0)\n"
 36		"	brct	%1,0b\n"
 37		: "+a" (s), "+d" (n), "=m" (*(addrtype *) s)
 38		: "m" (*(addrtype *) s));
 
 
 
 39}
 40
 41static inline void crst_table_init(unsigned long *crst, unsigned long entry)
 42{
 43	clear_table(crst, entry, sizeof(unsigned long)*2048);
 
 
 
 
 44}
 45
 46static inline unsigned long pgd_entry_type(struct mm_struct *mm)
 47{
 48	if (mm->context.asce_limit <= (1UL << 31))
 49		return _SEGMENT_ENTRY_EMPTY;
 50	if (mm->context.asce_limit <= (1UL << 42))
 51		return _REGION3_ENTRY_EMPTY;
 52	return _REGION2_ENTRY_EMPTY;
 53}
 54
 55int crst_table_upgrade(struct mm_struct *);
 56void crst_table_downgrade(struct mm_struct *);
 57
 58static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
 59{
 60	unsigned long *table = crst_table_alloc(mm);
 61	if (table)
 62		crst_table_init(table, _REGION3_ENTRY_EMPTY);
 63	return (pud_t *) table;
 64}
 65#define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud)
 
 
 
 
 
 66
 67static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
 68{
 69	unsigned long *table = crst_table_alloc(mm);
 70
 71	if (!table)
 72		return NULL;
 73	crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
 74	if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
 75		crst_table_free(mm, table);
 76		return NULL;
 77	}
 78	return (pmd_t *) table;
 79}
 80
 81static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
 82{
 
 
 83	pgtable_pmd_page_dtor(virt_to_page(pmd));
 84	crst_table_free(mm, (unsigned long *) pmd);
 85}
 86
 87static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
 
 
 
 
 
 88{
 89	pgd_val(*pgd) = _REGION2_ENTRY | __pa(pud);
 90}
 91
 92static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
 93{
 94	pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
 95}
 96
 97static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 98{
 99	unsigned long *table = crst_table_alloc(mm);
100
101	if (!table)
102		return NULL;
103	if (mm->context.asce_limit == (1UL << 31)) {
104		/* Forking a compat process with 2 page table levels */
105		if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
106			crst_table_free(mm, table);
107			return NULL;
108		}
109	}
110	return (pgd_t *) table;
111}
112
113static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
114{
115	if (mm->context.asce_limit == (1UL << 31))
116		pgtable_pmd_page_dtor(virt_to_page(pgd));
117	crst_table_free(mm, (unsigned long *) pgd);
118}
119
120static inline void pmd_populate(struct mm_struct *mm,
121				pmd_t *pmd, pgtable_t pte)
122{
123	pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
124}
125
126#define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
127
128#define pmd_pgtable(pmd) \
129	(pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)
130
131/*
132 * page table entry allocation/free routines.
133 */
134#define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
135#define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
136
137#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
138#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
139
140extern void rcu_table_freelist_finish(void);
 
 
 
 
 
141
142#endif /* _S390_PGALLOC_H */