Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.2.
  1/*
  2 *  MMU context allocation for 64-bit kernels.
  3 *
  4 *  Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org>
  5 *
  6 *  This program is free software; you can redistribute it and/or
  7 *  modify it under the terms of the GNU General Public License
  8 *  as published by the Free Software Foundation; either version
  9 *  2 of the License, or (at your option) any later version.
 10 *
 11 */
 12
 13#include <linux/sched.h>
 14#include <linux/kernel.h>
 15#include <linux/errno.h>
 16#include <linux/string.h>
 17#include <linux/types.h>
 18#include <linux/mm.h>
 19#include <linux/spinlock.h>
 20#include <linux/idr.h>
 21#include <linux/export.h>
 22#include <linux/gfp.h>
 23#include <linux/slab.h>
 24
 25#include <asm/mmu_context.h>
 26#include <asm/pgalloc.h>
 27
 28#include "icswx.h"
 29
 30static DEFINE_SPINLOCK(mmu_context_lock);
 31static DEFINE_IDA(mmu_context_ida);
 32
 33int __init_new_context(void)
 34{
 35	int index;
 36	int err;
 37
 38again:
 39	if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
 40		return -ENOMEM;
 41
 42	spin_lock(&mmu_context_lock);
 43	err = ida_get_new_above(&mmu_context_ida, 1, &index);
 44	spin_unlock(&mmu_context_lock);
 45
 46	if (err == -EAGAIN)
 47		goto again;
 48	else if (err)
 49		return err;
 50
 51	if (index > MAX_USER_CONTEXT) {
 52		spin_lock(&mmu_context_lock);
 53		ida_remove(&mmu_context_ida, index);
 54		spin_unlock(&mmu_context_lock);
 55		return -ENOMEM;
 56	}
 57
 58	return index;
 59}
 60EXPORT_SYMBOL_GPL(__init_new_context);
 61
 62int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 63{
 64	int index;
 65
 66	index = __init_new_context();
 67	if (index < 0)
 68		return index;
 69
 70	/* The old code would re-promote on fork, we don't do that
 71	 * when using slices as it could cause problem promoting slices
 72	 * that have been forced down to 4K
 73	 */
 74	if (slice_mm_new_context(mm))
 75		slice_set_user_psize(mm, mmu_virtual_psize);
 76	subpage_prot_init_new_context(mm);
 77	mm->context.id = index;
 78#ifdef CONFIG_PPC_ICSWX
 79	mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
 80	if (!mm->context.cop_lockp) {
 81		__destroy_context(index);
 82		subpage_prot_free(mm);
 83		mm->context.id = MMU_NO_CONTEXT;
 84		return -ENOMEM;
 85	}
 86	spin_lock_init(mm->context.cop_lockp);
 87#endif /* CONFIG_PPC_ICSWX */
 88
 89#ifdef CONFIG_PPC_64K_PAGES
 90	mm->context.pte_frag = NULL;
 91#endif
 92#ifdef CONFIG_SPAPR_TCE_IOMMU
 93	mm_iommu_init(&mm->context);
 94#endif
 95	return 0;
 96}
 97
 98void __destroy_context(int context_id)
 99{
100	spin_lock(&mmu_context_lock);
101	ida_remove(&mmu_context_ida, context_id);
102	spin_unlock(&mmu_context_lock);
103}
104EXPORT_SYMBOL_GPL(__destroy_context);
105
106#ifdef CONFIG_PPC_64K_PAGES
107static void destroy_pagetable_page(struct mm_struct *mm)
108{
109	int count;
110	void *pte_frag;
111	struct page *page;
112
113	pte_frag = mm->context.pte_frag;
114	if (!pte_frag)
115		return;
116
117	page = virt_to_page(pte_frag);
118	/* drop all the pending references */
119	count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
120	/* We allow PTE_FRAG_NR fragments from a PTE page */
121	if (page_ref_sub_and_test(page, PTE_FRAG_NR - count)) {
122		pgtable_page_dtor(page);
123		free_hot_cold_page(page, 0);
124	}
125}
126
127#else
128static inline void destroy_pagetable_page(struct mm_struct *mm)
129{
130	return;
131}
132#endif
133
134
135void destroy_context(struct mm_struct *mm)
136{
137#ifdef CONFIG_SPAPR_TCE_IOMMU
138	mm_iommu_cleanup(&mm->context);
139#endif
140
141#ifdef CONFIG_PPC_ICSWX
142	drop_cop(mm->context.acop, mm);
143	kfree(mm->context.cop_lockp);
144	mm->context.cop_lockp = NULL;
145#endif /* CONFIG_PPC_ICSWX */
146
147	destroy_pagetable_page(mm);
148	__destroy_context(mm->context.id);
149	subpage_prot_free(mm);
150	mm->context.id = MMU_NO_CONTEXT;
151}