Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * OpenRISC ioremap.c
  4 *
  5 * Linux architectural port borrowing liberally from similar works of
  6 * others.  All original copyrights apply as per the original source
  7 * declaration.
  8 *
  9 * Modifications for the OpenRISC architecture:
 10 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
 11 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
 12 */
 13
 14#include <linux/vmalloc.h>
 15#include <linux/io.h>
 16#include <linux/pgtable.h>
 17#include <asm/pgalloc.h>
 
 18#include <asm/fixmap.h>
 19#include <asm/bug.h>
 
 20#include <linux/sched.h>
 21#include <asm/tlbflush.h>
 22
 23extern int mem_init_done;
 24
 25static unsigned int fixmaps_used __initdata;
 26
 27/*
 28 * Remap an arbitrary physical address space into the kernel virtual
 29 * address space. Needed when the kernel wants to access high addresses
 30 * directly.
 31 *
 32 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
 33 * have to convert them into an offset in a page-aligned mapping, but the
 34 * caller shouldn't need to know that small detail.
 35 */
 36void __iomem *__ref ioremap(phys_addr_t addr, unsigned long size)
 37{
 38	phys_addr_t p;
 39	unsigned long v;
 40	unsigned long offset, last_addr;
 41	struct vm_struct *area = NULL;
 42
 43	/* Don't allow wraparound or zero size */
 44	last_addr = addr + size - 1;
 45	if (!size || last_addr < addr)
 46		return NULL;
 47
 48	/*
 49	 * Mappings have to be page-aligned
 50	 */
 51	offset = addr & ~PAGE_MASK;
 52	p = addr & PAGE_MASK;
 53	size = PAGE_ALIGN(last_addr + 1) - p;
 54
 55	if (likely(mem_init_done)) {
 56		area = get_vm_area(size, VM_IOREMAP);
 57		if (!area)
 58			return NULL;
 59		v = (unsigned long)area->addr;
 60	} else {
 61		if ((fixmaps_used + (size >> PAGE_SHIFT)) > FIX_N_IOREMAPS)
 62			return NULL;
 63		v = fix_to_virt(FIX_IOREMAP_BEGIN + fixmaps_used);
 64		fixmaps_used += (size >> PAGE_SHIFT);
 65	}
 66
 67	if (ioremap_page_range(v, v + size, p,
 68			__pgprot(pgprot_val(PAGE_KERNEL) | _PAGE_CI))) {
 69		if (likely(mem_init_done))
 70			vfree(area->addr);
 71		else
 72			fixmaps_used -= (size >> PAGE_SHIFT);
 73		return NULL;
 74	}
 75
 76	return (void __iomem *)(offset + (char *)v);
 77}
 78EXPORT_SYMBOL(ioremap);
 79
 80void iounmap(volatile void __iomem *addr)
 81{
 82	/* If the page is from the fixmap pool then we just clear out
 83	 * the fixmap mapping.
 84	 */
 85	if (unlikely((unsigned long)addr > FIXADDR_START)) {
 86		/* This is a bit broken... we don't really know
 87		 * how big the area is so it's difficult to know
 88		 * how many fixed pages to invalidate...
 89		 * just flush tlb and hope for the best...
 90		 * consider this a FIXME
 91		 *
 92		 * Really we should be clearing out one or more page
 93		 * table entries for these virtual addresses so that
 94		 * future references cause a page fault... for now, we
 95		 * rely on two things:
 96		 *   i)  this code never gets called on known boards
 97		 *   ii) invalid accesses to the freed areas aren't made
 98		 */
 99		flush_tlb_all();
100		return;
101	}
102
103	return vfree((void *)(PAGE_MASK & (unsigned long)addr));
104}
105EXPORT_SYMBOL(iounmap);
106
107/**
108 * OK, this one's a bit tricky... ioremap can get called before memory is
109 * initialized (early serial console does this) and will want to alloc a page
110 * for its mapping.  No userspace pages will ever get allocated before memory
111 * is initialized so this applies only to kernel pages.  In the event that
112 * this is called before memory is initialized we allocate the page using
113 * the memblock infrastructure.
114 */
115
116pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm)
117{
118	pte_t *pte;
119
120	if (likely(mem_init_done)) {
121		pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
122	} else {
123		pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
124		if (!pte)
125			panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
126			      __func__, PAGE_SIZE, PAGE_SIZE);
127	}
128
129	return pte;
130}
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * OpenRISC ioremap.c
  4 *
  5 * Linux architectural port borrowing liberally from similar works of
  6 * others.  All original copyrights apply as per the original source
  7 * declaration.
  8 *
  9 * Modifications for the OpenRISC architecture:
 10 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
 11 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
 12 */
 13
 14#include <linux/vmalloc.h>
 15#include <linux/io.h>
 
 16#include <asm/pgalloc.h>
 17#include <asm/kmap_types.h>
 18#include <asm/fixmap.h>
 19#include <asm/bug.h>
 20#include <asm/pgtable.h>
 21#include <linux/sched.h>
 22#include <asm/tlbflush.h>
 23
 24extern int mem_init_done;
 25
 26static unsigned int fixmaps_used __initdata;
 27
 28/*
 29 * Remap an arbitrary physical address space into the kernel virtual
 30 * address space. Needed when the kernel wants to access high addresses
 31 * directly.
 32 *
 33 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
 34 * have to convert them into an offset in a page-aligned mapping, but the
 35 * caller shouldn't need to know that small detail.
 36 */
 37void __iomem *__ref ioremap(phys_addr_t addr, unsigned long size)
 38{
 39	phys_addr_t p;
 40	unsigned long v;
 41	unsigned long offset, last_addr;
 42	struct vm_struct *area = NULL;
 43
 44	/* Don't allow wraparound or zero size */
 45	last_addr = addr + size - 1;
 46	if (!size || last_addr < addr)
 47		return NULL;
 48
 49	/*
 50	 * Mappings have to be page-aligned
 51	 */
 52	offset = addr & ~PAGE_MASK;
 53	p = addr & PAGE_MASK;
 54	size = PAGE_ALIGN(last_addr + 1) - p;
 55
 56	if (likely(mem_init_done)) {
 57		area = get_vm_area(size, VM_IOREMAP);
 58		if (!area)
 59			return NULL;
 60		v = (unsigned long)area->addr;
 61	} else {
 62		if ((fixmaps_used + (size >> PAGE_SHIFT)) > FIX_N_IOREMAPS)
 63			return NULL;
 64		v = fix_to_virt(FIX_IOREMAP_BEGIN + fixmaps_used);
 65		fixmaps_used += (size >> PAGE_SHIFT);
 66	}
 67
 68	if (ioremap_page_range(v, v + size, p,
 69			__pgprot(pgprot_val(PAGE_KERNEL) | _PAGE_CI))) {
 70		if (likely(mem_init_done))
 71			vfree(area->addr);
 72		else
 73			fixmaps_used -= (size >> PAGE_SHIFT);
 74		return NULL;
 75	}
 76
 77	return (void __iomem *)(offset + (char *)v);
 78}
 79EXPORT_SYMBOL(ioremap);
 80
 81void iounmap(void *addr)
 82{
 83	/* If the page is from the fixmap pool then we just clear out
 84	 * the fixmap mapping.
 85	 */
 86	if (unlikely((unsigned long)addr > FIXADDR_START)) {
 87		/* This is a bit broken... we don't really know
 88		 * how big the area is so it's difficult to know
 89		 * how many fixed pages to invalidate...
 90		 * just flush tlb and hope for the best...
 91		 * consider this a FIXME
 92		 *
 93		 * Really we should be clearing out one or more page
 94		 * table entries for these virtual addresses so that
 95		 * future references cause a page fault... for now, we
 96		 * rely on two things:
 97		 *   i)  this code never gets called on known boards
 98		 *   ii) invalid accesses to the freed areas aren't made
 99		 */
100		flush_tlb_all();
101		return;
102	}
103
104	return vfree((void *)(PAGE_MASK & (unsigned long)addr));
105}
106EXPORT_SYMBOL(iounmap);
107
108/**
109 * OK, this one's a bit tricky... ioremap can get called before memory is
110 * initialized (early serial console does this) and will want to alloc a page
111 * for its mapping.  No userspace pages will ever get allocated before memory
112 * is initialized so this applies only to kernel pages.  In the event that
113 * this is called before memory is initialized we allocate the page using
114 * the memblock infrastructure.
115 */
116
117pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm)
118{
119	pte_t *pte;
120
121	if (likely(mem_init_done)) {
122		pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
123	} else {
124		pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
125		if (!pte)
126			panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
127			      __func__, PAGE_SIZE, PAGE_SIZE);
128	}
129
130	return pte;
131}