Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * Copyright © 2008 Ingo Molnar
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of the GNU General Public License as published by
  6 * the Free Software Foundation; either version 2 of the License, or
  7 * (at your option) any later version.
  8 *
  9 * This program is distributed in the hope that it will be useful, but
 10 * WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 12 * General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License along
 15 * with this program; if not, write to the Free Software Foundation, Inc.,
 16 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
 17 */
 18
 19#include <asm/iomap.h>
 20#include <asm/pat.h>
 21#include <linux/module.h>
 22#include <linux/highmem.h>
 23
 24static int is_io_mapping_possible(resource_size_t base, unsigned long size)
 25{
 26#if !defined(CONFIG_X86_PAE) && defined(CONFIG_PHYS_ADDR_T_64BIT)
 27	/* There is no way to map greater than 1 << 32 address without PAE */
 28	if (base + size > 0x100000000ULL)
 29		return 0;
 30#endif
 31	return 1;
 32}
 33
 34int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot)
 35{
 36	enum page_cache_mode pcm = _PAGE_CACHE_MODE_WC;
 37	int ret;
 38
 39	if (!is_io_mapping_possible(base, size))
 40		return -EINVAL;
 41
 42	ret = io_reserve_memtype(base, base + size, &pcm);
 43	if (ret)
 44		return ret;
 45
 46	*prot = __pgprot(__PAGE_KERNEL | cachemode2protval(pcm));
 
 
 
 47	return 0;
 48}
 49EXPORT_SYMBOL_GPL(iomap_create_wc);
 50
 51void iomap_free(resource_size_t base, unsigned long size)
 52{
 53	io_free_memtype(base, base + size);
 54}
 55EXPORT_SYMBOL_GPL(iomap_free);
 56
 57void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
 58{
 59	unsigned long vaddr;
 60	int idx, type;
 61
 62	preempt_disable();
 63	pagefault_disable();
 64
 65	type = kmap_atomic_idx_push();
 66	idx = type + KM_TYPE_NR * smp_processor_id();
 67	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 68	set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
 69	arch_flush_lazy_mmu_mode();
 70
 71	return (void *)vaddr;
 72}
 73
 74/*
 75 * Map 'pfn' using protections 'prot'
 76 */
 77void __iomem *
 78iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
 79{
 80	/*
 81	 * For non-PAT systems, translate non-WB request to UC- just in
 82	 * case the caller set the PWT bit to prot directly without using
 83	 * pgprot_writecombine(). UC- translates to uncached if the MTRR
 84	 * is UC or WC. UC- gets the real intention, of the user, which is
 85	 * "WC if the MTRR is WC, UC if you can't do that."
 86	 */
 87	if (!pat_enabled() && pgprot2cachemode(prot) != _PAGE_CACHE_MODE_WB)
 88		prot = __pgprot(__PAGE_KERNEL |
 89				cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
 
 
 
 90
 91	return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot);
 92}
 93EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
 94
 95void
 96iounmap_atomic(void __iomem *kvaddr)
 97{
 98	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
 99
100	if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
101	    vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
102		int idx, type;
103
104		type = kmap_atomic_idx();
105		idx = type + KM_TYPE_NR * smp_processor_id();
106
107#ifdef CONFIG_DEBUG_HIGHMEM
108		WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
109#endif
110		/*
111		 * Force other mappings to Oops if they'll try to access this
112		 * pte without first remap it.  Keeping stale mappings around
113		 * is a bad idea also, in case the page changes cacheability
114		 * attributes or becomes a protected page in a hypervisor.
115		 */
116		kpte_clear_flush(kmap_pte-idx, vaddr);
117		kmap_atomic_idx_pop();
118	}
119
120	pagefault_enable();
121	preempt_enable();
122}
123EXPORT_SYMBOL_GPL(iounmap_atomic);
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Copyright © 2008 Ingo Molnar
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  4 */
  5
  6#include <asm/iomap.h>
  7#include <asm/pat.h>
  8#include <linux/export.h>
  9#include <linux/highmem.h>
 10
 11static int is_io_mapping_possible(resource_size_t base, unsigned long size)
 12{
 13#if !defined(CONFIG_X86_PAE) && defined(CONFIG_PHYS_ADDR_T_64BIT)
 14	/* There is no way to map greater than 1 << 32 address without PAE */
 15	if (base + size > 0x100000000ULL)
 16		return 0;
 17#endif
 18	return 1;
 19}
 20
 21int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot)
 22{
 23	enum page_cache_mode pcm = _PAGE_CACHE_MODE_WC;
 24	int ret;
 25
 26	if (!is_io_mapping_possible(base, size))
 27		return -EINVAL;
 28
 29	ret = io_reserve_memtype(base, base + size, &pcm);
 30	if (ret)
 31		return ret;
 32
 33	*prot = __pgprot(__PAGE_KERNEL | cachemode2protval(pcm));
 34	/* Filter out unsupported __PAGE_KERNEL* bits: */
 35	pgprot_val(*prot) &= __default_kernel_pte_mask;
 36
 37	return 0;
 38}
 39EXPORT_SYMBOL_GPL(iomap_create_wc);
 40
 41void iomap_free(resource_size_t base, unsigned long size)
 42{
 43	io_free_memtype(base, base + size);
 44}
 45EXPORT_SYMBOL_GPL(iomap_free);
 46
 47void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
 48{
 49	unsigned long vaddr;
 50	int idx, type;
 51
 52	preempt_disable();
 53	pagefault_disable();
 54
 55	type = kmap_atomic_idx_push();
 56	idx = type + KM_TYPE_NR * smp_processor_id();
 57	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 58	set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
 59	arch_flush_lazy_mmu_mode();
 60
 61	return (void *)vaddr;
 62}
 63
 64/*
 65 * Map 'pfn' using protections 'prot'
 66 */
 67void __iomem *
 68iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
 69{
 70	/*
 71	 * For non-PAT systems, translate non-WB request to UC- just in
 72	 * case the caller set the PWT bit to prot directly without using
 73	 * pgprot_writecombine(). UC- translates to uncached if the MTRR
 74	 * is UC or WC. UC- gets the real intention, of the user, which is
 75	 * "WC if the MTRR is WC, UC if you can't do that."
 76	 */
 77	if (!pat_enabled() && pgprot2cachemode(prot) != _PAGE_CACHE_MODE_WB)
 78		prot = __pgprot(__PAGE_KERNEL |
 79				cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
 80
 81	/* Filter out unsupported __PAGE_KERNEL* bits: */
 82	pgprot_val(prot) &= __default_kernel_pte_mask;
 83
 84	return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot);
 85}
 86EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
 87
 88void
 89iounmap_atomic(void __iomem *kvaddr)
 90{
 91	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
 92
 93	if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
 94	    vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
 95		int idx, type;
 96
 97		type = kmap_atomic_idx();
 98		idx = type + KM_TYPE_NR * smp_processor_id();
 99
100#ifdef CONFIG_DEBUG_HIGHMEM
101		WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
102#endif
103		/*
104		 * Force other mappings to Oops if they'll try to access this
105		 * pte without first remap it.  Keeping stale mappings around
106		 * is a bad idea also, in case the page changes cacheability
107		 * attributes or becomes a protected page in a hypervisor.
108		 */
109		kpte_clear_flush(kmap_pte-idx, vaddr);
110		kmap_atomic_idx_pop();
111	}
112
113	pagefault_enable();
114	preempt_enable();
115}
116EXPORT_SYMBOL_GPL(iounmap_atomic);