Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * CoProcessor (SPU/AFU) mm fault handler
  4 *
  5 * (C) Copyright IBM Deutschland Entwicklung GmbH 2007
  6 *
  7 * Author: Arnd Bergmann <arndb@de.ibm.com>
  8 * Author: Jeremy Kerr <jk@ozlabs.org>
  9 */
 10#include <linux/sched.h>
 11#include <linux/mm.h>
 12#include <linux/export.h>
 13#include <asm/reg.h>
 14#include <asm/copro.h>
 15#include <asm/spu.h>
 16#include <misc/cxl-base.h>
 17
 18/*
 19 * This ought to be kept in sync with the powerpc specific do_page_fault
 20 * function. Currently, there are a few corner cases that we haven't had
 21 * to handle fortunately.
 22 */
 23int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
 24		unsigned long dsisr, vm_fault_t *flt)
 25{
 26	struct vm_area_struct *vma;
 27	unsigned long is_write;
 28	int ret;
 29
 30	if (mm == NULL)
 31		return -EFAULT;
 32
 33	if (mm->pgd == NULL)
 34		return -EFAULT;
 35
 36	vma = lock_mm_and_find_vma(mm, ea, NULL);
 
 
 37	if (!vma)
 38		return -EFAULT;
 
 
 
 
 
 
 
 39
 40	ret = -EFAULT;
 41	is_write = dsisr & DSISR_ISSTORE;
 42	if (is_write) {
 43		if (!(vma->vm_flags & VM_WRITE))
 44			goto out_unlock;
 45	} else {
 46		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
 47			goto out_unlock;
 48		/*
 49		 * PROT_NONE is covered by the VMA check above.
 50		 * and hash should get a NOHPTE fault instead of
 51		 * a PROTFAULT in case fixup is needed for things
 52		 * like autonuma.
 53		 */
 54		if (!radix_enabled())
 55			WARN_ON_ONCE(dsisr & DSISR_PROTFAULT);
 56	}
 57
 58	ret = 0;
 59	*flt = handle_mm_fault(vma, ea, is_write ? FAULT_FLAG_WRITE : 0, NULL);
 60
 61	/* The fault is fully completed (including releasing mmap lock) */
 62	if (*flt & VM_FAULT_COMPLETED)
 63		return 0;
 64
 65	if (unlikely(*flt & VM_FAULT_ERROR)) {
 66		if (*flt & VM_FAULT_OOM) {
 67			ret = -ENOMEM;
 68			goto out_unlock;
 69		} else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
 70			ret = -EFAULT;
 71			goto out_unlock;
 72		}
 73		BUG();
 74	}
 75
 
 
 
 
 
 76out_unlock:
 77	mmap_read_unlock(mm);
 78	return ret;
 79}
 80EXPORT_SYMBOL_GPL(copro_handle_mm_fault);
 81
 82#ifdef CONFIG_PPC_64S_HASH_MMU
 83int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
 84{
 85	u64 vsid, vsidkey;
 86	int psize, ssize;
 87
 88	switch (get_region_id(ea)) {
 89	case USER_REGION_ID:
 90		pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea);
 91		if (mm == NULL)
 92			return 1;
 93		psize = get_slice_psize(mm, ea);
 94		ssize = user_segment_size(ea);
 95		vsid = get_user_vsid(&mm->context, ea, ssize);
 96		vsidkey = SLB_VSID_USER;
 97		break;
 98	case VMALLOC_REGION_ID:
 99		pr_devel("%s: 0x%llx -- VMALLOC_REGION_ID\n", __func__, ea);
100		psize = mmu_vmalloc_psize;
101		ssize = mmu_kernel_ssize;
102		vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
103		vsidkey = SLB_VSID_KERNEL;
104		break;
105	case IO_REGION_ID:
106		pr_devel("%s: 0x%llx -- IO_REGION_ID\n", __func__, ea);
107		psize = mmu_io_psize;
108		ssize = mmu_kernel_ssize;
109		vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
110		vsidkey = SLB_VSID_KERNEL;
111		break;
112	case LINEAR_MAP_REGION_ID:
113		pr_devel("%s: 0x%llx -- LINEAR_MAP_REGION_ID\n", __func__, ea);
114		psize = mmu_linear_psize;
115		ssize = mmu_kernel_ssize;
116		vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
117		vsidkey = SLB_VSID_KERNEL;
118		break;
119	default:
120		pr_debug("%s: invalid region access at %016llx\n", __func__, ea);
121		return 1;
122	}
123	/* Bad address */
124	if (!vsid)
125		return 1;
126
127	vsid = (vsid << slb_vsid_shift(ssize)) | vsidkey;
128
129	vsid |= mmu_psize_defs[psize].sllp |
130		((ssize == MMU_SEGSIZE_1T) ? SLB_VSID_B_1T : 0);
131
132	slb->esid = (ea & (ssize == MMU_SEGSIZE_1T ? ESID_MASK_1T : ESID_MASK)) | SLB_ESID_V;
133	slb->vsid = vsid;
134
135	return 0;
136}
137EXPORT_SYMBOL_GPL(copro_calculate_slb);
138
139void copro_flush_all_slbs(struct mm_struct *mm)
140{
141#ifdef CONFIG_SPU_BASE
142	spu_flush_all_slbs(mm);
143#endif
144	cxl_slbia(mm);
145}
146EXPORT_SYMBOL_GPL(copro_flush_all_slbs);
147#endif
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * CoProcessor (SPU/AFU) mm fault handler
  4 *
  5 * (C) Copyright IBM Deutschland Entwicklung GmbH 2007
  6 *
  7 * Author: Arnd Bergmann <arndb@de.ibm.com>
  8 * Author: Jeremy Kerr <jk@ozlabs.org>
  9 */
 10#include <linux/sched.h>
 11#include <linux/mm.h>
 12#include <linux/export.h>
 13#include <asm/reg.h>
 14#include <asm/copro.h>
 15#include <asm/spu.h>
 16#include <misc/cxl-base.h>
 17
 18/*
 19 * This ought to be kept in sync with the powerpc specific do_page_fault
 20 * function. Currently, there are a few corner cases that we haven't had
 21 * to handle fortunately.
 22 */
 23int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
 24		unsigned long dsisr, vm_fault_t *flt)
 25{
 26	struct vm_area_struct *vma;
 27	unsigned long is_write;
 28	int ret;
 29
 30	if (mm == NULL)
 31		return -EFAULT;
 32
 33	if (mm->pgd == NULL)
 34		return -EFAULT;
 35
 36	down_read(&mm->mmap_sem);
 37	ret = -EFAULT;
 38	vma = find_vma(mm, ea);
 39	if (!vma)
 40		goto out_unlock;
 41
 42	if (ea < vma->vm_start) {
 43		if (!(vma->vm_flags & VM_GROWSDOWN))
 44			goto out_unlock;
 45		if (expand_stack(vma, ea))
 46			goto out_unlock;
 47	}
 48
 
 49	is_write = dsisr & DSISR_ISSTORE;
 50	if (is_write) {
 51		if (!(vma->vm_flags & VM_WRITE))
 52			goto out_unlock;
 53	} else {
 54		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
 55			goto out_unlock;
 56		/*
 57		 * PROT_NONE is covered by the VMA check above.
 58		 * and hash should get a NOHPTE fault instead of
 59		 * a PROTFAULT in case fixup is needed for things
 60		 * like autonuma.
 61		 */
 62		if (!radix_enabled())
 63			WARN_ON_ONCE(dsisr & DSISR_PROTFAULT);
 64	}
 65
 66	ret = 0;
 67	*flt = handle_mm_fault(vma, ea, is_write ? FAULT_FLAG_WRITE : 0);
 
 
 
 
 
 68	if (unlikely(*flt & VM_FAULT_ERROR)) {
 69		if (*flt & VM_FAULT_OOM) {
 70			ret = -ENOMEM;
 71			goto out_unlock;
 72		} else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
 73			ret = -EFAULT;
 74			goto out_unlock;
 75		}
 76		BUG();
 77	}
 78
 79	if (*flt & VM_FAULT_MAJOR)
 80		current->maj_flt++;
 81	else
 82		current->min_flt++;
 83
 84out_unlock:
 85	up_read(&mm->mmap_sem);
 86	return ret;
 87}
 88EXPORT_SYMBOL_GPL(copro_handle_mm_fault);
 89
 
 90int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
 91{
 92	u64 vsid, vsidkey;
 93	int psize, ssize;
 94
 95	switch (get_region_id(ea)) {
 96	case USER_REGION_ID:
 97		pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea);
 98		if (mm == NULL)
 99			return 1;
100		psize = get_slice_psize(mm, ea);
101		ssize = user_segment_size(ea);
102		vsid = get_user_vsid(&mm->context, ea, ssize);
103		vsidkey = SLB_VSID_USER;
104		break;
105	case VMALLOC_REGION_ID:
106		pr_devel("%s: 0x%llx -- VMALLOC_REGION_ID\n", __func__, ea);
107		psize = mmu_vmalloc_psize;
108		ssize = mmu_kernel_ssize;
109		vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
110		vsidkey = SLB_VSID_KERNEL;
111		break;
112	case IO_REGION_ID:
113		pr_devel("%s: 0x%llx -- IO_REGION_ID\n", __func__, ea);
114		psize = mmu_io_psize;
115		ssize = mmu_kernel_ssize;
116		vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
117		vsidkey = SLB_VSID_KERNEL;
118		break;
119	case LINEAR_MAP_REGION_ID:
120		pr_devel("%s: 0x%llx -- LINEAR_MAP_REGION_ID\n", __func__, ea);
121		psize = mmu_linear_psize;
122		ssize = mmu_kernel_ssize;
123		vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
124		vsidkey = SLB_VSID_KERNEL;
125		break;
126	default:
127		pr_debug("%s: invalid region access at %016llx\n", __func__, ea);
128		return 1;
129	}
130	/* Bad address */
131	if (!vsid)
132		return 1;
133
134	vsid = (vsid << slb_vsid_shift(ssize)) | vsidkey;
135
136	vsid |= mmu_psize_defs[psize].sllp |
137		((ssize == MMU_SEGSIZE_1T) ? SLB_VSID_B_1T : 0);
138
139	slb->esid = (ea & (ssize == MMU_SEGSIZE_1T ? ESID_MASK_1T : ESID_MASK)) | SLB_ESID_V;
140	slb->vsid = vsid;
141
142	return 0;
143}
144EXPORT_SYMBOL_GPL(copro_calculate_slb);
145
146void copro_flush_all_slbs(struct mm_struct *mm)
147{
148#ifdef CONFIG_SPU_BASE
149	spu_flush_all_slbs(mm);
150#endif
151	cxl_slbia(mm);
152}
153EXPORT_SYMBOL_GPL(copro_flush_all_slbs);