Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
v3.1
 
  1/*
  2 * (c) Copyright 2006, 2007 Hewlett-Packard Development Company, L.P.
  3 *	Bjorn Helgaas <bjorn.helgaas@hp.com>
  4 *
  5 * This program is free software; you can redistribute it and/or modify
  6 * it under the terms of the GNU General Public License version 2 as
  7 * published by the Free Software Foundation.
  8 */
  9
 10#include <linux/compiler.h>
 11#include <linux/module.h>
 12#include <linux/efi.h>
 13#include <linux/io.h>
 14#include <linux/vmalloc.h>
 15#include <asm/io.h>
 16#include <asm/meminit.h>
 17
 18static inline void __iomem *
 19__ioremap (unsigned long phys_addr)
 20{
 21	return (void __iomem *) (__IA64_UNCACHED_OFFSET | phys_addr);
 22}
 23
 24void __iomem *
 25early_ioremap (unsigned long phys_addr, unsigned long size)
 26{
 27	return __ioremap(phys_addr);
 
 
 
 
 28}
 29
 30void __iomem *
 31ioremap (unsigned long phys_addr, unsigned long size)
 32{
 33	void __iomem *addr;
 34	struct vm_struct *area;
 35	unsigned long offset;
 36	pgprot_t prot;
 37	u64 attr;
 38	unsigned long gran_base, gran_size;
 39	unsigned long page_base;
 40
 41	/*
 42	 * For things in kern_memmap, we must use the same attribute
 43	 * as the rest of the kernel.  For more details, see
 44	 * Documentation/ia64/aliasing.txt.
 45	 */
 46	attr = kern_mem_attribute(phys_addr, size);
 47	if (attr & EFI_MEMORY_WB)
 48		return (void __iomem *) phys_to_virt(phys_addr);
 49	else if (attr & EFI_MEMORY_UC)
 50		return __ioremap(phys_addr);
 51
 52	/*
 53	 * Some chipsets don't support UC access to memory.  If
 54	 * WB is supported for the whole granule, we prefer that.
 55	 */
 56	gran_base = GRANULEROUNDDOWN(phys_addr);
 57	gran_size = GRANULEROUNDUP(phys_addr + size) - gran_base;
 58	if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB)
 59		return (void __iomem *) phys_to_virt(phys_addr);
 60
 61	/*
 62	 * WB is not supported for the whole granule, so we can't use
 63	 * the region 7 identity mapping.  If we can safely cover the
 64	 * area with kernel page table mappings, we can use those
 65	 * instead.
 66	 */
 67	page_base = phys_addr & PAGE_MASK;
 68	size = PAGE_ALIGN(phys_addr + size) - page_base;
 69	if (efi_mem_attribute(page_base, size) & EFI_MEMORY_WB) {
 70		prot = PAGE_KERNEL;
 71
 72		/*
 73		 * Mappings have to be page-aligned
 74		 */
 75		offset = phys_addr & ~PAGE_MASK;
 76		phys_addr &= PAGE_MASK;
 77
 78		/*
 79		 * Ok, go for it..
 80		 */
 81		area = get_vm_area(size, VM_IOREMAP);
 82		if (!area)
 83			return NULL;
 84
 85		area->phys_addr = phys_addr;
 86		addr = (void __iomem *) area->addr;
 87		if (ioremap_page_range((unsigned long) addr,
 88				(unsigned long) addr + size, phys_addr, prot)) {
 89			vunmap((void __force *) addr);
 90			return NULL;
 91		}
 92
 93		return (void __iomem *) (offset + (char __iomem *)addr);
 94	}
 95
 96	return __ioremap(phys_addr);
 97}
 98EXPORT_SYMBOL(ioremap);
 99
100void __iomem *
101ioremap_nocache (unsigned long phys_addr, unsigned long size)
102{
103	if (kern_mem_attribute(phys_addr, size) & EFI_MEMORY_WB)
104		return NULL;
105
106	return __ioremap(phys_addr);
107}
108EXPORT_SYMBOL(ioremap_nocache);
109
110void
111early_iounmap (volatile void __iomem *addr, unsigned long size)
112{
113}
114
115void
116iounmap (volatile void __iomem *addr)
117{
118	if (REGION_NUMBER(addr) == RGN_GATE)
119		vunmap((void *) ((unsigned long) addr & PAGE_MASK));
120}
121EXPORT_SYMBOL(iounmap);
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * (c) Copyright 2006, 2007 Hewlett-Packard Development Company, L.P.
  4 *	Bjorn Helgaas <bjorn.helgaas@hp.com>
 
 
 
 
  5 */
  6
  7#include <linux/compiler.h>
  8#include <linux/module.h>
  9#include <linux/efi.h>
 10#include <linux/io.h>
 11#include <linux/vmalloc.h>
 12#include <asm/io.h>
 13#include <asm/meminit.h>
 14
 15static inline void __iomem *
 16__ioremap_uc(unsigned long phys_addr)
 17{
 18	return (void __iomem *) (__IA64_UNCACHED_OFFSET | phys_addr);
 19}
 20
 21void __iomem *
 22early_ioremap (unsigned long phys_addr, unsigned long size)
 23{
 24	u64 attr;
 25	attr = kern_mem_attribute(phys_addr, size);
 26	if (attr & EFI_MEMORY_WB)
 27		return (void __iomem *) phys_to_virt(phys_addr);
 28	return __ioremap_uc(phys_addr);
 29}
 30
 31void __iomem *
 32ioremap (unsigned long phys_addr, unsigned long size)
 33{
 34	void __iomem *addr;
 35	struct vm_struct *area;
 36	unsigned long offset;
 37	pgprot_t prot;
 38	u64 attr;
 39	unsigned long gran_base, gran_size;
 40	unsigned long page_base;
 41
 42	/*
 43	 * For things in kern_memmap, we must use the same attribute
 44	 * as the rest of the kernel.  For more details, see
 45	 * Documentation/ia64/aliasing.rst.
 46	 */
 47	attr = kern_mem_attribute(phys_addr, size);
 48	if (attr & EFI_MEMORY_WB)
 49		return (void __iomem *) phys_to_virt(phys_addr);
 50	else if (attr & EFI_MEMORY_UC)
 51		return __ioremap_uc(phys_addr);
 52
 53	/*
 54	 * Some chipsets don't support UC access to memory.  If
 55	 * WB is supported for the whole granule, we prefer that.
 56	 */
 57	gran_base = GRANULEROUNDDOWN(phys_addr);
 58	gran_size = GRANULEROUNDUP(phys_addr + size) - gran_base;
 59	if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB)
 60		return (void __iomem *) phys_to_virt(phys_addr);
 61
 62	/*
 63	 * WB is not supported for the whole granule, so we can't use
 64	 * the region 7 identity mapping.  If we can safely cover the
 65	 * area with kernel page table mappings, we can use those
 66	 * instead.
 67	 */
 68	page_base = phys_addr & PAGE_MASK;
 69	size = PAGE_ALIGN(phys_addr + size) - page_base;
 70	if (efi_mem_attribute(page_base, size) & EFI_MEMORY_WB) {
 71		prot = PAGE_KERNEL;
 72
 73		/*
 74		 * Mappings have to be page-aligned
 75		 */
 76		offset = phys_addr & ~PAGE_MASK;
 77		phys_addr &= PAGE_MASK;
 78
 79		/*
 80		 * Ok, go for it..
 81		 */
 82		area = get_vm_area(size, VM_IOREMAP);
 83		if (!area)
 84			return NULL;
 85
 86		area->phys_addr = phys_addr;
 87		addr = (void __iomem *) area->addr;
 88		if (ioremap_page_range((unsigned long) addr,
 89				(unsigned long) addr + size, phys_addr, prot)) {
 90			vunmap((void __force *) addr);
 91			return NULL;
 92		}
 93
 94		return (void __iomem *) (offset + (char __iomem *)addr);
 95	}
 96
 97	return __ioremap_uc(phys_addr);
 98}
 99EXPORT_SYMBOL(ioremap);
100
101void __iomem *
102ioremap_nocache (unsigned long phys_addr, unsigned long size)
103{
104	if (kern_mem_attribute(phys_addr, size) & EFI_MEMORY_WB)
105		return NULL;
106
107	return __ioremap_uc(phys_addr);
108}
109EXPORT_SYMBOL(ioremap_nocache);
110
111void
112early_iounmap (volatile void __iomem *addr, unsigned long size)
113{
114}
115
116void
117iounmap (volatile void __iomem *addr)
118{
119	if (REGION_NUMBER(addr) == RGN_GATE)
120		vunmap((void *) ((unsigned long) addr & PAGE_MASK));
121}
122EXPORT_SYMBOL(iounmap);