Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Re-map IO memory to kernel address space so that we can access it.
4 * This is needed for high PCI addresses that aren't mapped in the
5 * 640k-1MB IO memory area on PC's
6 *
7 * (C) Copyright 1995 1996 Linus Torvalds
8 */
9
10#include <linux/memblock.h>
11#include <linux/init.h>
12#include <linux/io.h>
13#include <linux/ioport.h>
14#include <linux/slab.h>
15#include <linux/vmalloc.h>
16#include <linux/mmiotrace.h>
17#include <linux/cc_platform.h>
18#include <linux/efi.h>
19#include <linux/pgtable.h>
20#include <linux/kmsan.h>
21
22#include <asm/set_memory.h>
23#include <asm/e820/api.h>
24#include <asm/efi.h>
25#include <asm/fixmap.h>
26#include <asm/tlbflush.h>
27#include <asm/pgalloc.h>
28#include <asm/memtype.h>
29#include <asm/setup.h>
30
31#include "physaddr.h"
32
33/*
34 * Descriptor controlling ioremap() behavior.
35 */
36struct ioremap_desc {
37 unsigned int flags;
38};
39
40/*
41 * Fix up the linear direct mapping of the kernel to avoid cache attribute
42 * conflicts.
43 */
44int ioremap_change_attr(unsigned long vaddr, unsigned long size,
45 enum page_cache_mode pcm)
46{
47 unsigned long nrpages = size >> PAGE_SHIFT;
48 int err;
49
50 switch (pcm) {
51 case _PAGE_CACHE_MODE_UC:
52 default:
53 err = _set_memory_uc(vaddr, nrpages);
54 break;
55 case _PAGE_CACHE_MODE_WC:
56 err = _set_memory_wc(vaddr, nrpages);
57 break;
58 case _PAGE_CACHE_MODE_WT:
59 err = _set_memory_wt(vaddr, nrpages);
60 break;
61 case _PAGE_CACHE_MODE_WB:
62 err = _set_memory_wb(vaddr, nrpages);
63 break;
64 }
65
66 return err;
67}
68
69/* Does the range (or a subset of) contain normal RAM? */
70static unsigned int __ioremap_check_ram(struct resource *res)
71{
72 unsigned long start_pfn, stop_pfn;
73 unsigned long i;
74
75 if ((res->flags & IORESOURCE_SYSTEM_RAM) != IORESOURCE_SYSTEM_RAM)
76 return 0;
77
78 start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT;
79 stop_pfn = (res->end + 1) >> PAGE_SHIFT;
80 if (stop_pfn > start_pfn) {
81 for (i = 0; i < (stop_pfn - start_pfn); ++i)
82 if (pfn_valid(start_pfn + i) &&
83 !PageReserved(pfn_to_page(start_pfn + i)))
84 return IORES_MAP_SYSTEM_RAM;
85 }
86
87 return 0;
88}
89
90/*
91 * In a SEV guest, NONE and RESERVED should not be mapped encrypted because
92 * there the whole memory is already encrypted.
93 */
94static unsigned int __ioremap_check_encrypted(struct resource *res)
95{
96 if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
97 return 0;
98
99 switch (res->desc) {
100 case IORES_DESC_NONE:
101 case IORES_DESC_RESERVED:
102 break;
103 default:
104 return IORES_MAP_ENCRYPTED;
105 }
106
107 return 0;
108}
109
110/*
111 * The EFI runtime services data area is not covered by walk_mem_res(), but must
112 * be mapped encrypted when SEV is active.
113 */
114static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *desc)
115{
116 if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
117 return;
118
119 if (!IS_ENABLED(CONFIG_EFI))
120 return;
121
122 if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA ||
123 (efi_mem_type(addr) == EFI_BOOT_SERVICES_DATA &&
124 efi_mem_attributes(addr) & EFI_MEMORY_RUNTIME))
125 desc->flags |= IORES_MAP_ENCRYPTED;
126}
127
128static int __ioremap_collect_map_flags(struct resource *res, void *arg)
129{
130 struct ioremap_desc *desc = arg;
131
132 if (!(desc->flags & IORES_MAP_SYSTEM_RAM))
133 desc->flags |= __ioremap_check_ram(res);
134
135 if (!(desc->flags & IORES_MAP_ENCRYPTED))
136 desc->flags |= __ioremap_check_encrypted(res);
137
138 return ((desc->flags & (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED)) ==
139 (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED));
140}
141
142/*
143 * To avoid multiple resource walks, this function walks resources marked as
144 * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a
145 * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
146 *
147 * After that, deal with misc other ranges in __ioremap_check_other() which do
148 * not fall into the above category.
149 */
150static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
151 struct ioremap_desc *desc)
152{
153 u64 start, end;
154
155 start = (u64)addr;
156 end = start + size - 1;
157 memset(desc, 0, sizeof(struct ioremap_desc));
158
159 walk_mem_res(start, end, desc, __ioremap_collect_map_flags);
160
161 __ioremap_check_other(addr, desc);
162}
163
164/*
165 * Remap an arbitrary physical address space into the kernel virtual
166 * address space. It transparently creates kernel huge I/O mapping when
167 * the physical address is aligned by a huge page size (1GB or 2MB) and
168 * the requested size is at least the huge page size.
169 *
170 * NOTE: MTRRs can override PAT memory types with a 4KB granularity.
171 * Therefore, the mapping code falls back to use a smaller page toward 4KB
172 * when a mapping range is covered by non-WB type of MTRRs.
173 *
174 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
175 * have to convert them into an offset in a page-aligned mapping, but the
176 * caller shouldn't need to know that small detail.
177 */
178static void __iomem *
179__ioremap_caller(resource_size_t phys_addr, unsigned long size,
180 enum page_cache_mode pcm, void *caller, bool encrypted)
181{
182 unsigned long offset, vaddr;
183 resource_size_t last_addr;
184 const resource_size_t unaligned_phys_addr = phys_addr;
185 const unsigned long unaligned_size = size;
186 struct ioremap_desc io_desc;
187 struct vm_struct *area;
188 enum page_cache_mode new_pcm;
189 pgprot_t prot;
190 int retval;
191 void __iomem *ret_addr;
192
193 /* Don't allow wraparound or zero size */
194 last_addr = phys_addr + size - 1;
195 if (!size || last_addr < phys_addr)
196 return NULL;
197
198 if (!phys_addr_valid(phys_addr)) {
199 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
200 (unsigned long long)phys_addr);
201 WARN_ON_ONCE(1);
202 return NULL;
203 }
204
205 __ioremap_check_mem(phys_addr, size, &io_desc);
206
207 /*
208 * Don't allow anybody to remap normal RAM that we're using..
209 */
210 if (io_desc.flags & IORES_MAP_SYSTEM_RAM) {
211 WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
212 &phys_addr, &last_addr);
213 return NULL;
214 }
215
216 /*
217 * Mappings have to be page-aligned
218 */
219 offset = phys_addr & ~PAGE_MASK;
220 phys_addr &= PAGE_MASK;
221 size = PAGE_ALIGN(last_addr+1) - phys_addr;
222
223 /*
224 * Mask out any bits not part of the actual physical
225 * address, like memory encryption bits.
226 */
227 phys_addr &= PHYSICAL_PAGE_MASK;
228
229 retval = memtype_reserve(phys_addr, (u64)phys_addr + size,
230 pcm, &new_pcm);
231 if (retval) {
232 printk(KERN_ERR "ioremap memtype_reserve failed %d\n", retval);
233 return NULL;
234 }
235
236 if (pcm != new_pcm) {
237 if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
238 printk(KERN_ERR
239 "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
240 (unsigned long long)phys_addr,
241 (unsigned long long)(phys_addr + size),
242 pcm, new_pcm);
243 goto err_free_memtype;
244 }
245 pcm = new_pcm;
246 }
247
248 /*
249 * If the page being mapped is in memory and SEV is active then
250 * make sure the memory encryption attribute is enabled in the
251 * resulting mapping.
252 * In TDX guests, memory is marked private by default. If encryption
253 * is not requested (using encrypted), explicitly set decrypt
254 * attribute in all IOREMAPPED memory.
255 */
256 prot = PAGE_KERNEL_IO;
257 if ((io_desc.flags & IORES_MAP_ENCRYPTED) || encrypted)
258 prot = pgprot_encrypted(prot);
259 else
260 prot = pgprot_decrypted(prot);
261
262 switch (pcm) {
263 case _PAGE_CACHE_MODE_UC:
264 default:
265 prot = __pgprot(pgprot_val(prot) |
266 cachemode2protval(_PAGE_CACHE_MODE_UC));
267 break;
268 case _PAGE_CACHE_MODE_UC_MINUS:
269 prot = __pgprot(pgprot_val(prot) |
270 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
271 break;
272 case _PAGE_CACHE_MODE_WC:
273 prot = __pgprot(pgprot_val(prot) |
274 cachemode2protval(_PAGE_CACHE_MODE_WC));
275 break;
276 case _PAGE_CACHE_MODE_WT:
277 prot = __pgprot(pgprot_val(prot) |
278 cachemode2protval(_PAGE_CACHE_MODE_WT));
279 break;
280 case _PAGE_CACHE_MODE_WB:
281 break;
282 }
283
284 /*
285 * Ok, go for it..
286 */
287 area = get_vm_area_caller(size, VM_IOREMAP, caller);
288 if (!area)
289 goto err_free_memtype;
290 area->phys_addr = phys_addr;
291 vaddr = (unsigned long) area->addr;
292
293 if (memtype_kernel_map_sync(phys_addr, size, pcm))
294 goto err_free_area;
295
296 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
297 goto err_free_area;
298
299 ret_addr = (void __iomem *) (vaddr + offset);
300 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
301
302 /*
303 * Check if the request spans more than any BAR in the iomem resource
304 * tree.
305 */
306 if (iomem_map_sanity_check(unaligned_phys_addr, unaligned_size))
307 pr_warn("caller %pS mapping multiple BARs\n", caller);
308
309 return ret_addr;
310err_free_area:
311 free_vm_area(area);
312err_free_memtype:
313 memtype_free(phys_addr, phys_addr + size);
314 return NULL;
315}
316
317/**
318 * ioremap - map bus memory into CPU space
319 * @phys_addr: bus address of the memory
320 * @size: size of the resource to map
321 *
322 * ioremap performs a platform specific sequence of operations to
323 * make bus memory CPU accessible via the readb/readw/readl/writeb/
324 * writew/writel functions and the other mmio helpers. The returned
325 * address is not guaranteed to be usable directly as a virtual
326 * address.
327 *
328 * This version of ioremap ensures that the memory is marked uncachable
329 * on the CPU as well as honouring existing caching rules from things like
330 * the PCI bus. Note that there are other caches and buffers on many
331 * busses. In particular driver authors should read up on PCI writes
332 *
333 * It's useful if some control registers are in such an area and
334 * write combining or read caching is not desirable:
335 *
336 * Must be freed with iounmap.
337 */
338void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
339{
340 /*
341 * Ideally, this should be:
342 * pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
343 *
344 * Till we fix all X drivers to use ioremap_wc(), we will use
345 * UC MINUS. Drivers that are certain they need or can already
346 * be converted over to strong UC can use ioremap_uc().
347 */
348 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
349
350 return __ioremap_caller(phys_addr, size, pcm,
351 __builtin_return_address(0), false);
352}
353EXPORT_SYMBOL(ioremap);
354
355/**
356 * ioremap_uc - map bus memory into CPU space as strongly uncachable
357 * @phys_addr: bus address of the memory
358 * @size: size of the resource to map
359 *
360 * ioremap_uc performs a platform specific sequence of operations to
361 * make bus memory CPU accessible via the readb/readw/readl/writeb/
362 * writew/writel functions and the other mmio helpers. The returned
363 * address is not guaranteed to be usable directly as a virtual
364 * address.
365 *
366 * This version of ioremap ensures that the memory is marked with a strong
367 * preference as completely uncachable on the CPU when possible. For non-PAT
368 * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT
369 * systems this will set the PAT entry for the pages as strong UC. This call
370 * will honor existing caching rules from things like the PCI bus. Note that
371 * there are other caches and buffers on many busses. In particular driver
372 * authors should read up on PCI writes.
373 *
374 * It's useful if some control registers are in such an area and
375 * write combining or read caching is not desirable:
376 *
377 * Must be freed with iounmap.
378 */
379void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size)
380{
381 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC;
382
383 return __ioremap_caller(phys_addr, size, pcm,
384 __builtin_return_address(0), false);
385}
386EXPORT_SYMBOL_GPL(ioremap_uc);
387
388/**
389 * ioremap_wc - map memory into CPU space write combined
390 * @phys_addr: bus address of the memory
391 * @size: size of the resource to map
392 *
393 * This version of ioremap ensures that the memory is marked write combining.
394 * Write combining allows faster writes to some hardware devices.
395 *
396 * Must be freed with iounmap.
397 */
398void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
399{
400 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
401 __builtin_return_address(0), false);
402}
403EXPORT_SYMBOL(ioremap_wc);
404
405/**
406 * ioremap_wt - map memory into CPU space write through
407 * @phys_addr: bus address of the memory
408 * @size: size of the resource to map
409 *
410 * This version of ioremap ensures that the memory is marked write through.
411 * Write through stores data into memory while keeping the cache up-to-date.
412 *
413 * Must be freed with iounmap.
414 */
415void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size)
416{
417 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT,
418 __builtin_return_address(0), false);
419}
420EXPORT_SYMBOL(ioremap_wt);
421
422void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size)
423{
424 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
425 __builtin_return_address(0), true);
426}
427EXPORT_SYMBOL(ioremap_encrypted);
428
429void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
430{
431 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
432 __builtin_return_address(0), false);
433}
434EXPORT_SYMBOL(ioremap_cache);
435
436void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
437 unsigned long prot_val)
438{
439 return __ioremap_caller(phys_addr, size,
440 pgprot2cachemode(__pgprot(prot_val)),
441 __builtin_return_address(0), false);
442}
443EXPORT_SYMBOL(ioremap_prot);
444
445/**
446 * iounmap - Free a IO remapping
447 * @addr: virtual address from ioremap_*
448 *
449 * Caller must ensure there is only one unmapping for the same pointer.
450 */
451void iounmap(volatile void __iomem *addr)
452{
453 struct vm_struct *p, *o;
454
455 if ((void __force *)addr <= high_memory)
456 return;
457
458 /*
459 * The PCI/ISA range special-casing was removed from __ioremap()
460 * so this check, in theory, can be removed. However, there are
461 * cases where iounmap() is called for addresses not obtained via
462 * ioremap() (vga16fb for example). Add a warning so that these
463 * cases can be caught and fixed.
464 */
465 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
466 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) {
467 WARN(1, "iounmap() called for ISA range not obtained using ioremap()\n");
468 return;
469 }
470
471 mmiotrace_iounmap(addr);
472
473 addr = (volatile void __iomem *)
474 (PAGE_MASK & (unsigned long __force)addr);
475
476 /* Use the vm area unlocked, assuming the caller
477 ensures there isn't another iounmap for the same address
478 in parallel. Reuse of the virtual address is prevented by
479 leaving it in the global lists until we're done with it.
480 cpa takes care of the direct mappings. */
481 p = find_vm_area((void __force *)addr);
482
483 if (!p) {
484 printk(KERN_ERR "iounmap: bad address %p\n", addr);
485 dump_stack();
486 return;
487 }
488
489 kmsan_iounmap_page_range((unsigned long)addr,
490 (unsigned long)addr + get_vm_area_size(p));
491 memtype_free(p->phys_addr, p->phys_addr + get_vm_area_size(p));
492
493 /* Finally remove it */
494 o = remove_vm_area((void __force *)addr);
495 BUG_ON(p != o || o == NULL);
496 kfree(p);
497}
498EXPORT_SYMBOL(iounmap);
499
500/*
501 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
502 * access
503 */
504void *xlate_dev_mem_ptr(phys_addr_t phys)
505{
506 unsigned long start = phys & PAGE_MASK;
507 unsigned long offset = phys & ~PAGE_MASK;
508 void *vaddr;
509
510 /* memremap() maps if RAM, otherwise falls back to ioremap() */
511 vaddr = memremap(start, PAGE_SIZE, MEMREMAP_WB);
512
513 /* Only add the offset on success and return NULL if memremap() failed */
514 if (vaddr)
515 vaddr += offset;
516
517 return vaddr;
518}
519
520void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
521{
522 memunmap((void *)((unsigned long)addr & PAGE_MASK));
523}
524
525#ifdef CONFIG_AMD_MEM_ENCRYPT
526/*
527 * Examine the physical address to determine if it is an area of memory
528 * that should be mapped decrypted. If the memory is not part of the
529 * kernel usable area it was accessed and created decrypted, so these
530 * areas should be mapped decrypted. And since the encryption key can
531 * change across reboots, persistent memory should also be mapped
532 * decrypted.
533 *
534 * If SEV is active, that implies that BIOS/UEFI also ran encrypted so
535 * only persistent memory should be mapped decrypted.
536 */
537static bool memremap_should_map_decrypted(resource_size_t phys_addr,
538 unsigned long size)
539{
540 int is_pmem;
541
542 /*
543 * Check if the address is part of a persistent memory region.
544 * This check covers areas added by E820, EFI and ACPI.
545 */
546 is_pmem = region_intersects(phys_addr, size, IORESOURCE_MEM,
547 IORES_DESC_PERSISTENT_MEMORY);
548 if (is_pmem != REGION_DISJOINT)
549 return true;
550
551 /*
552 * Check if the non-volatile attribute is set for an EFI
553 * reserved area.
554 */
555 if (efi_enabled(EFI_BOOT)) {
556 switch (efi_mem_type(phys_addr)) {
557 case EFI_RESERVED_TYPE:
558 if (efi_mem_attributes(phys_addr) & EFI_MEMORY_NV)
559 return true;
560 break;
561 default:
562 break;
563 }
564 }
565
566 /* Check if the address is outside kernel usable area */
567 switch (e820__get_entry_type(phys_addr, phys_addr + size - 1)) {
568 case E820_TYPE_RESERVED:
569 case E820_TYPE_ACPI:
570 case E820_TYPE_NVS:
571 case E820_TYPE_UNUSABLE:
572 /* For SEV, these areas are encrypted */
573 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
574 break;
575 fallthrough;
576
577 case E820_TYPE_PRAM:
578 return true;
579 default:
580 break;
581 }
582
583 return false;
584}
585
586/*
587 * Examine the physical address to determine if it is EFI data. Check
588 * it against the boot params structure and EFI tables and memory types.
589 */
590static bool memremap_is_efi_data(resource_size_t phys_addr,
591 unsigned long size)
592{
593 u64 paddr;
594
595 /* Check if the address is part of EFI boot/runtime data */
596 if (!efi_enabled(EFI_BOOT))
597 return false;
598
599 paddr = boot_params.efi_info.efi_memmap_hi;
600 paddr <<= 32;
601 paddr |= boot_params.efi_info.efi_memmap;
602 if (phys_addr == paddr)
603 return true;
604
605 paddr = boot_params.efi_info.efi_systab_hi;
606 paddr <<= 32;
607 paddr |= boot_params.efi_info.efi_systab;
608 if (phys_addr == paddr)
609 return true;
610
611 if (efi_is_table_address(phys_addr))
612 return true;
613
614 switch (efi_mem_type(phys_addr)) {
615 case EFI_BOOT_SERVICES_DATA:
616 case EFI_RUNTIME_SERVICES_DATA:
617 return true;
618 default:
619 break;
620 }
621
622 return false;
623}
624
625/*
626 * Examine the physical address to determine if it is boot data by checking
627 * it against the boot params setup_data chain.
628 */
629static bool memremap_is_setup_data(resource_size_t phys_addr,
630 unsigned long size)
631{
632 struct setup_indirect *indirect;
633 struct setup_data *data;
634 u64 paddr, paddr_next;
635
636 paddr = boot_params.hdr.setup_data;
637 while (paddr) {
638 unsigned int len;
639
640 if (phys_addr == paddr)
641 return true;
642
643 data = memremap(paddr, sizeof(*data),
644 MEMREMAP_WB | MEMREMAP_DEC);
645 if (!data) {
646 pr_warn("failed to memremap setup_data entry\n");
647 return false;
648 }
649
650 paddr_next = data->next;
651 len = data->len;
652
653 if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
654 memunmap(data);
655 return true;
656 }
657
658 if (data->type == SETUP_INDIRECT) {
659 memunmap(data);
660 data = memremap(paddr, sizeof(*data) + len,
661 MEMREMAP_WB | MEMREMAP_DEC);
662 if (!data) {
663 pr_warn("failed to memremap indirect setup_data\n");
664 return false;
665 }
666
667 indirect = (struct setup_indirect *)data->data;
668
669 if (indirect->type != SETUP_INDIRECT) {
670 paddr = indirect->addr;
671 len = indirect->len;
672 }
673 }
674
675 memunmap(data);
676
677 if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
678 return true;
679
680 paddr = paddr_next;
681 }
682
683 return false;
684}
685
686/*
687 * Examine the physical address to determine if it is boot data by checking
688 * it against the boot params setup_data chain (early boot version).
689 */
690static bool __init early_memremap_is_setup_data(resource_size_t phys_addr,
691 unsigned long size)
692{
693 struct setup_indirect *indirect;
694 struct setup_data *data;
695 u64 paddr, paddr_next;
696
697 paddr = boot_params.hdr.setup_data;
698 while (paddr) {
699 unsigned int len, size;
700
701 if (phys_addr == paddr)
702 return true;
703
704 data = early_memremap_decrypted(paddr, sizeof(*data));
705 if (!data) {
706 pr_warn("failed to early memremap setup_data entry\n");
707 return false;
708 }
709
710 size = sizeof(*data);
711
712 paddr_next = data->next;
713 len = data->len;
714
715 if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
716 early_memunmap(data, sizeof(*data));
717 return true;
718 }
719
720 if (data->type == SETUP_INDIRECT) {
721 size += len;
722 early_memunmap(data, sizeof(*data));
723 data = early_memremap_decrypted(paddr, size);
724 if (!data) {
725 pr_warn("failed to early memremap indirect setup_data\n");
726 return false;
727 }
728
729 indirect = (struct setup_indirect *)data->data;
730
731 if (indirect->type != SETUP_INDIRECT) {
732 paddr = indirect->addr;
733 len = indirect->len;
734 }
735 }
736
737 early_memunmap(data, size);
738
739 if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
740 return true;
741
742 paddr = paddr_next;
743 }
744
745 return false;
746}
747
748/*
749 * Architecture function to determine if RAM remap is allowed. By default, a
750 * RAM remap will map the data as encrypted. Determine if a RAM remap should
751 * not be done so that the data will be mapped decrypted.
752 */
753bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size,
754 unsigned long flags)
755{
756 if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
757 return true;
758
759 if (flags & MEMREMAP_ENC)
760 return true;
761
762 if (flags & MEMREMAP_DEC)
763 return false;
764
765 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
766 if (memremap_is_setup_data(phys_addr, size) ||
767 memremap_is_efi_data(phys_addr, size))
768 return false;
769 }
770
771 return !memremap_should_map_decrypted(phys_addr, size);
772}
773
774/*
775 * Architecture override of __weak function to adjust the protection attributes
776 * used when remapping memory. By default, early_memremap() will map the data
777 * as encrypted. Determine if an encrypted mapping should not be done and set
778 * the appropriate protection attributes.
779 */
780pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
781 unsigned long size,
782 pgprot_t prot)
783{
784 bool encrypted_prot;
785
786 if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
787 return prot;
788
789 encrypted_prot = true;
790
791 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
792 if (early_memremap_is_setup_data(phys_addr, size) ||
793 memremap_is_efi_data(phys_addr, size))
794 encrypted_prot = false;
795 }
796
797 if (encrypted_prot && memremap_should_map_decrypted(phys_addr, size))
798 encrypted_prot = false;
799
800 return encrypted_prot ? pgprot_encrypted(prot)
801 : pgprot_decrypted(prot);
802}
803
804bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size)
805{
806 return arch_memremap_can_ram_remap(phys_addr, size, 0);
807}
808
809/* Remap memory with encryption */
810void __init *early_memremap_encrypted(resource_size_t phys_addr,
811 unsigned long size)
812{
813 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC);
814}
815
816/*
817 * Remap memory with encryption and write-protected - cannot be called
818 * before pat_init() is called
819 */
820void __init *early_memremap_encrypted_wp(resource_size_t phys_addr,
821 unsigned long size)
822{
823 if (!x86_has_pat_wp())
824 return NULL;
825 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC_WP);
826}
827
828/* Remap memory without encryption */
829void __init *early_memremap_decrypted(resource_size_t phys_addr,
830 unsigned long size)
831{
832 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC);
833}
834
835/*
836 * Remap memory without encryption and write-protected - cannot be called
837 * before pat_init() is called
838 */
839void __init *early_memremap_decrypted_wp(resource_size_t phys_addr,
840 unsigned long size)
841{
842 if (!x86_has_pat_wp())
843 return NULL;
844 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC_WP);
845}
846#endif /* CONFIG_AMD_MEM_ENCRYPT */
847
848static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
849
850static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
851{
852 /* Don't assume we're using swapper_pg_dir at this point */
853 pgd_t *base = __va(read_cr3_pa());
854 pgd_t *pgd = &base[pgd_index(addr)];
855 p4d_t *p4d = p4d_offset(pgd, addr);
856 pud_t *pud = pud_offset(p4d, addr);
857 pmd_t *pmd = pmd_offset(pud, addr);
858
859 return pmd;
860}
861
862static inline pte_t * __init early_ioremap_pte(unsigned long addr)
863{
864 return &bm_pte[pte_index(addr)];
865}
866
867bool __init is_early_ioremap_ptep(pte_t *ptep)
868{
869 return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
870}
871
872void __init early_ioremap_init(void)
873{
874 pmd_t *pmd;
875
876#ifdef CONFIG_X86_64
877 BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
878#else
879 WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
880#endif
881
882 early_ioremap_setup();
883
884 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
885 memset(bm_pte, 0, sizeof(bm_pte));
886 pmd_populate_kernel(&init_mm, pmd, bm_pte);
887
888 /*
889 * The boot-ioremap range spans multiple pmds, for which
890 * we are not prepared:
891 */
892#define __FIXADDR_TOP (-PAGE_SIZE)
893 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
894 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
895#undef __FIXADDR_TOP
896 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
897 WARN_ON(1);
898 printk(KERN_WARNING "pmd %p != %p\n",
899 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
900 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
901 fix_to_virt(FIX_BTMAP_BEGIN));
902 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
903 fix_to_virt(FIX_BTMAP_END));
904
905 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
906 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
907 FIX_BTMAP_BEGIN);
908 }
909}
910
911void __init __early_set_fixmap(enum fixed_addresses idx,
912 phys_addr_t phys, pgprot_t flags)
913{
914 unsigned long addr = __fix_to_virt(idx);
915 pte_t *pte;
916
917 if (idx >= __end_of_fixed_addresses) {
918 BUG();
919 return;
920 }
921 pte = early_ioremap_pte(addr);
922
923 /* Sanitize 'prot' against any unsupported bits: */
924 pgprot_val(flags) &= __supported_pte_mask;
925
926 if (pgprot_val(flags))
927 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
928 else
929 pte_clear(&init_mm, addr, pte);
930 flush_tlb_one_kernel(addr);
931}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Re-map IO memory to kernel address space so that we can access it.
4 * This is needed for high PCI addresses that aren't mapped in the
5 * 640k-1MB IO memory area on PC's
6 *
7 * (C) Copyright 1995 1996 Linus Torvalds
8 */
9
10#include <linux/memblock.h>
11#include <linux/init.h>
12#include <linux/io.h>
13#include <linux/ioport.h>
14#include <linux/slab.h>
15#include <linux/vmalloc.h>
16#include <linux/mmiotrace.h>
17#include <linux/mem_encrypt.h>
18#include <linux/efi.h>
19
20#include <asm/set_memory.h>
21#include <asm/e820/api.h>
22#include <asm/efi.h>
23#include <asm/fixmap.h>
24#include <asm/pgtable.h>
25#include <asm/tlbflush.h>
26#include <asm/pgalloc.h>
27#include <asm/pat.h>
28#include <asm/setup.h>
29
30#include "physaddr.h"
31
32/*
33 * Descriptor controlling ioremap() behavior.
34 */
35struct ioremap_desc {
36 unsigned int flags;
37};
38
39/*
40 * Fix up the linear direct mapping of the kernel to avoid cache attribute
41 * conflicts.
42 */
43int ioremap_change_attr(unsigned long vaddr, unsigned long size,
44 enum page_cache_mode pcm)
45{
46 unsigned long nrpages = size >> PAGE_SHIFT;
47 int err;
48
49 switch (pcm) {
50 case _PAGE_CACHE_MODE_UC:
51 default:
52 err = _set_memory_uc(vaddr, nrpages);
53 break;
54 case _PAGE_CACHE_MODE_WC:
55 err = _set_memory_wc(vaddr, nrpages);
56 break;
57 case _PAGE_CACHE_MODE_WT:
58 err = _set_memory_wt(vaddr, nrpages);
59 break;
60 case _PAGE_CACHE_MODE_WB:
61 err = _set_memory_wb(vaddr, nrpages);
62 break;
63 }
64
65 return err;
66}
67
68/* Does the range (or a subset of) contain normal RAM? */
69static unsigned int __ioremap_check_ram(struct resource *res)
70{
71 unsigned long start_pfn, stop_pfn;
72 unsigned long i;
73
74 if ((res->flags & IORESOURCE_SYSTEM_RAM) != IORESOURCE_SYSTEM_RAM)
75 return 0;
76
77 start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT;
78 stop_pfn = (res->end + 1) >> PAGE_SHIFT;
79 if (stop_pfn > start_pfn) {
80 for (i = 0; i < (stop_pfn - start_pfn); ++i)
81 if (pfn_valid(start_pfn + i) &&
82 !PageReserved(pfn_to_page(start_pfn + i)))
83 return IORES_MAP_SYSTEM_RAM;
84 }
85
86 return 0;
87}
88
89/*
90 * In a SEV guest, NONE and RESERVED should not be mapped encrypted because
91 * there the whole memory is already encrypted.
92 */
93static unsigned int __ioremap_check_encrypted(struct resource *res)
94{
95 if (!sev_active())
96 return 0;
97
98 switch (res->desc) {
99 case IORES_DESC_NONE:
100 case IORES_DESC_RESERVED:
101 break;
102 default:
103 return IORES_MAP_ENCRYPTED;
104 }
105
106 return 0;
107}
108
109static int __ioremap_collect_map_flags(struct resource *res, void *arg)
110{
111 struct ioremap_desc *desc = arg;
112
113 if (!(desc->flags & IORES_MAP_SYSTEM_RAM))
114 desc->flags |= __ioremap_check_ram(res);
115
116 if (!(desc->flags & IORES_MAP_ENCRYPTED))
117 desc->flags |= __ioremap_check_encrypted(res);
118
119 return ((desc->flags & (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED)) ==
120 (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED));
121}
122
123/*
124 * To avoid multiple resource walks, this function walks resources marked as
125 * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a
126 * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
127 */
128static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
129 struct ioremap_desc *desc)
130{
131 u64 start, end;
132
133 start = (u64)addr;
134 end = start + size - 1;
135 memset(desc, 0, sizeof(struct ioremap_desc));
136
137 walk_mem_res(start, end, desc, __ioremap_collect_map_flags);
138}
139
140/*
141 * Remap an arbitrary physical address space into the kernel virtual
142 * address space. It transparently creates kernel huge I/O mapping when
143 * the physical address is aligned by a huge page size (1GB or 2MB) and
144 * the requested size is at least the huge page size.
145 *
146 * NOTE: MTRRs can override PAT memory types with a 4KB granularity.
147 * Therefore, the mapping code falls back to use a smaller page toward 4KB
148 * when a mapping range is covered by non-WB type of MTRRs.
149 *
150 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
151 * have to convert them into an offset in a page-aligned mapping, but the
152 * caller shouldn't need to know that small detail.
153 */
154static void __iomem *
155__ioremap_caller(resource_size_t phys_addr, unsigned long size,
156 enum page_cache_mode pcm, void *caller, bool encrypted)
157{
158 unsigned long offset, vaddr;
159 resource_size_t last_addr;
160 const resource_size_t unaligned_phys_addr = phys_addr;
161 const unsigned long unaligned_size = size;
162 struct ioremap_desc io_desc;
163 struct vm_struct *area;
164 enum page_cache_mode new_pcm;
165 pgprot_t prot;
166 int retval;
167 void __iomem *ret_addr;
168
169 /* Don't allow wraparound or zero size */
170 last_addr = phys_addr + size - 1;
171 if (!size || last_addr < phys_addr)
172 return NULL;
173
174 if (!phys_addr_valid(phys_addr)) {
175 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
176 (unsigned long long)phys_addr);
177 WARN_ON_ONCE(1);
178 return NULL;
179 }
180
181 __ioremap_check_mem(phys_addr, size, &io_desc);
182
183 /*
184 * Don't allow anybody to remap normal RAM that we're using..
185 */
186 if (io_desc.flags & IORES_MAP_SYSTEM_RAM) {
187 WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
188 &phys_addr, &last_addr);
189 return NULL;
190 }
191
192 /*
193 * Mappings have to be page-aligned
194 */
195 offset = phys_addr & ~PAGE_MASK;
196 phys_addr &= PHYSICAL_PAGE_MASK;
197 size = PAGE_ALIGN(last_addr+1) - phys_addr;
198
199 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
200 pcm, &new_pcm);
201 if (retval) {
202 printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
203 return NULL;
204 }
205
206 if (pcm != new_pcm) {
207 if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
208 printk(KERN_ERR
209 "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
210 (unsigned long long)phys_addr,
211 (unsigned long long)(phys_addr + size),
212 pcm, new_pcm);
213 goto err_free_memtype;
214 }
215 pcm = new_pcm;
216 }
217
218 /*
219 * If the page being mapped is in memory and SEV is active then
220 * make sure the memory encryption attribute is enabled in the
221 * resulting mapping.
222 */
223 prot = PAGE_KERNEL_IO;
224 if ((io_desc.flags & IORES_MAP_ENCRYPTED) || encrypted)
225 prot = pgprot_encrypted(prot);
226
227 switch (pcm) {
228 case _PAGE_CACHE_MODE_UC:
229 default:
230 prot = __pgprot(pgprot_val(prot) |
231 cachemode2protval(_PAGE_CACHE_MODE_UC));
232 break;
233 case _PAGE_CACHE_MODE_UC_MINUS:
234 prot = __pgprot(pgprot_val(prot) |
235 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
236 break;
237 case _PAGE_CACHE_MODE_WC:
238 prot = __pgprot(pgprot_val(prot) |
239 cachemode2protval(_PAGE_CACHE_MODE_WC));
240 break;
241 case _PAGE_CACHE_MODE_WT:
242 prot = __pgprot(pgprot_val(prot) |
243 cachemode2protval(_PAGE_CACHE_MODE_WT));
244 break;
245 case _PAGE_CACHE_MODE_WB:
246 break;
247 }
248
249 /*
250 * Ok, go for it..
251 */
252 area = get_vm_area_caller(size, VM_IOREMAP, caller);
253 if (!area)
254 goto err_free_memtype;
255 area->phys_addr = phys_addr;
256 vaddr = (unsigned long) area->addr;
257
258 if (kernel_map_sync_memtype(phys_addr, size, pcm))
259 goto err_free_area;
260
261 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
262 goto err_free_area;
263
264 ret_addr = (void __iomem *) (vaddr + offset);
265 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
266
267 /*
268 * Check if the request spans more than any BAR in the iomem resource
269 * tree.
270 */
271 if (iomem_map_sanity_check(unaligned_phys_addr, unaligned_size))
272 pr_warn("caller %pS mapping multiple BARs\n", caller);
273
274 return ret_addr;
275err_free_area:
276 free_vm_area(area);
277err_free_memtype:
278 free_memtype(phys_addr, phys_addr + size);
279 return NULL;
280}
281
282/**
283 * ioremap_nocache - map bus memory into CPU space
284 * @phys_addr: bus address of the memory
285 * @size: size of the resource to map
286 *
287 * ioremap_nocache performs a platform specific sequence of operations to
288 * make bus memory CPU accessible via the readb/readw/readl/writeb/
289 * writew/writel functions and the other mmio helpers. The returned
290 * address is not guaranteed to be usable directly as a virtual
291 * address.
292 *
293 * This version of ioremap ensures that the memory is marked uncachable
294 * on the CPU as well as honouring existing caching rules from things like
295 * the PCI bus. Note that there are other caches and buffers on many
296 * busses. In particular driver authors should read up on PCI writes
297 *
298 * It's useful if some control registers are in such an area and
299 * write combining or read caching is not desirable:
300 *
301 * Must be freed with iounmap.
302 */
303void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
304{
305 /*
306 * Ideally, this should be:
307 * pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
308 *
309 * Till we fix all X drivers to use ioremap_wc(), we will use
310 * UC MINUS. Drivers that are certain they need or can already
311 * be converted over to strong UC can use ioremap_uc().
312 */
313 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
314
315 return __ioremap_caller(phys_addr, size, pcm,
316 __builtin_return_address(0), false);
317}
318EXPORT_SYMBOL(ioremap_nocache);
319
320/**
321 * ioremap_uc - map bus memory into CPU space as strongly uncachable
322 * @phys_addr: bus address of the memory
323 * @size: size of the resource to map
324 *
325 * ioremap_uc performs a platform specific sequence of operations to
326 * make bus memory CPU accessible via the readb/readw/readl/writeb/
327 * writew/writel functions and the other mmio helpers. The returned
328 * address is not guaranteed to be usable directly as a virtual
329 * address.
330 *
331 * This version of ioremap ensures that the memory is marked with a strong
332 * preference as completely uncachable on the CPU when possible. For non-PAT
333 * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT
334 * systems this will set the PAT entry for the pages as strong UC. This call
335 * will honor existing caching rules from things like the PCI bus. Note that
336 * there are other caches and buffers on many busses. In particular driver
337 * authors should read up on PCI writes.
338 *
339 * It's useful if some control registers are in such an area and
340 * write combining or read caching is not desirable:
341 *
342 * Must be freed with iounmap.
343 */
344void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size)
345{
346 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC;
347
348 return __ioremap_caller(phys_addr, size, pcm,
349 __builtin_return_address(0), false);
350}
351EXPORT_SYMBOL_GPL(ioremap_uc);
352
353/**
354 * ioremap_wc - map memory into CPU space write combined
355 * @phys_addr: bus address of the memory
356 * @size: size of the resource to map
357 *
358 * This version of ioremap ensures that the memory is marked write combining.
359 * Write combining allows faster writes to some hardware devices.
360 *
361 * Must be freed with iounmap.
362 */
363void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
364{
365 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
366 __builtin_return_address(0), false);
367}
368EXPORT_SYMBOL(ioremap_wc);
369
370/**
371 * ioremap_wt - map memory into CPU space write through
372 * @phys_addr: bus address of the memory
373 * @size: size of the resource to map
374 *
375 * This version of ioremap ensures that the memory is marked write through.
376 * Write through stores data into memory while keeping the cache up-to-date.
377 *
378 * Must be freed with iounmap.
379 */
380void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size)
381{
382 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT,
383 __builtin_return_address(0), false);
384}
385EXPORT_SYMBOL(ioremap_wt);
386
387void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size)
388{
389 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
390 __builtin_return_address(0), true);
391}
392EXPORT_SYMBOL(ioremap_encrypted);
393
394void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
395{
396 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
397 __builtin_return_address(0), false);
398}
399EXPORT_SYMBOL(ioremap_cache);
400
401void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
402 unsigned long prot_val)
403{
404 return __ioremap_caller(phys_addr, size,
405 pgprot2cachemode(__pgprot(prot_val)),
406 __builtin_return_address(0), false);
407}
408EXPORT_SYMBOL(ioremap_prot);
409
410/**
411 * iounmap - Free a IO remapping
412 * @addr: virtual address from ioremap_*
413 *
414 * Caller must ensure there is only one unmapping for the same pointer.
415 */
416void iounmap(volatile void __iomem *addr)
417{
418 struct vm_struct *p, *o;
419
420 if ((void __force *)addr <= high_memory)
421 return;
422
423 /*
424 * The PCI/ISA range special-casing was removed from __ioremap()
425 * so this check, in theory, can be removed. However, there are
426 * cases where iounmap() is called for addresses not obtained via
427 * ioremap() (vga16fb for example). Add a warning so that these
428 * cases can be caught and fixed.
429 */
430 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
431 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) {
432 WARN(1, "iounmap() called for ISA range not obtained using ioremap()\n");
433 return;
434 }
435
436 mmiotrace_iounmap(addr);
437
438 addr = (volatile void __iomem *)
439 (PAGE_MASK & (unsigned long __force)addr);
440
441 /* Use the vm area unlocked, assuming the caller
442 ensures there isn't another iounmap for the same address
443 in parallel. Reuse of the virtual address is prevented by
444 leaving it in the global lists until we're done with it.
445 cpa takes care of the direct mappings. */
446 p = find_vm_area((void __force *)addr);
447
448 if (!p) {
449 printk(KERN_ERR "iounmap: bad address %p\n", addr);
450 dump_stack();
451 return;
452 }
453
454 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
455
456 /* Finally remove it */
457 o = remove_vm_area((void __force *)addr);
458 BUG_ON(p != o || o == NULL);
459 kfree(p);
460}
461EXPORT_SYMBOL(iounmap);
462
463int __init arch_ioremap_p4d_supported(void)
464{
465 return 0;
466}
467
468int __init arch_ioremap_pud_supported(void)
469{
470#ifdef CONFIG_X86_64
471 return boot_cpu_has(X86_FEATURE_GBPAGES);
472#else
473 return 0;
474#endif
475}
476
477int __init arch_ioremap_pmd_supported(void)
478{
479 return boot_cpu_has(X86_FEATURE_PSE);
480}
481
482/*
483 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
484 * access
485 */
486void *xlate_dev_mem_ptr(phys_addr_t phys)
487{
488 unsigned long start = phys & PAGE_MASK;
489 unsigned long offset = phys & ~PAGE_MASK;
490 void *vaddr;
491
492 /* memremap() maps if RAM, otherwise falls back to ioremap() */
493 vaddr = memremap(start, PAGE_SIZE, MEMREMAP_WB);
494
495 /* Only add the offset on success and return NULL if memremap() failed */
496 if (vaddr)
497 vaddr += offset;
498
499 return vaddr;
500}
501
502void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
503{
504 memunmap((void *)((unsigned long)addr & PAGE_MASK));
505}
506
507/*
508 * Examine the physical address to determine if it is an area of memory
509 * that should be mapped decrypted. If the memory is not part of the
510 * kernel usable area it was accessed and created decrypted, so these
511 * areas should be mapped decrypted. And since the encryption key can
512 * change across reboots, persistent memory should also be mapped
513 * decrypted.
514 *
515 * If SEV is active, that implies that BIOS/UEFI also ran encrypted so
516 * only persistent memory should be mapped decrypted.
517 */
518static bool memremap_should_map_decrypted(resource_size_t phys_addr,
519 unsigned long size)
520{
521 int is_pmem;
522
523 /*
524 * Check if the address is part of a persistent memory region.
525 * This check covers areas added by E820, EFI and ACPI.
526 */
527 is_pmem = region_intersects(phys_addr, size, IORESOURCE_MEM,
528 IORES_DESC_PERSISTENT_MEMORY);
529 if (is_pmem != REGION_DISJOINT)
530 return true;
531
532 /*
533 * Check if the non-volatile attribute is set for an EFI
534 * reserved area.
535 */
536 if (efi_enabled(EFI_BOOT)) {
537 switch (efi_mem_type(phys_addr)) {
538 case EFI_RESERVED_TYPE:
539 if (efi_mem_attributes(phys_addr) & EFI_MEMORY_NV)
540 return true;
541 break;
542 default:
543 break;
544 }
545 }
546
547 /* Check if the address is outside kernel usable area */
548 switch (e820__get_entry_type(phys_addr, phys_addr + size - 1)) {
549 case E820_TYPE_RESERVED:
550 case E820_TYPE_ACPI:
551 case E820_TYPE_NVS:
552 case E820_TYPE_UNUSABLE:
553 /* For SEV, these areas are encrypted */
554 if (sev_active())
555 break;
556 /* Fallthrough */
557
558 case E820_TYPE_PRAM:
559 return true;
560 default:
561 break;
562 }
563
564 return false;
565}
566
567/*
568 * Examine the physical address to determine if it is EFI data. Check
569 * it against the boot params structure and EFI tables and memory types.
570 */
571static bool memremap_is_efi_data(resource_size_t phys_addr,
572 unsigned long size)
573{
574 u64 paddr;
575
576 /* Check if the address is part of EFI boot/runtime data */
577 if (!efi_enabled(EFI_BOOT))
578 return false;
579
580 paddr = boot_params.efi_info.efi_memmap_hi;
581 paddr <<= 32;
582 paddr |= boot_params.efi_info.efi_memmap;
583 if (phys_addr == paddr)
584 return true;
585
586 paddr = boot_params.efi_info.efi_systab_hi;
587 paddr <<= 32;
588 paddr |= boot_params.efi_info.efi_systab;
589 if (phys_addr == paddr)
590 return true;
591
592 if (efi_is_table_address(phys_addr))
593 return true;
594
595 switch (efi_mem_type(phys_addr)) {
596 case EFI_BOOT_SERVICES_DATA:
597 case EFI_RUNTIME_SERVICES_DATA:
598 return true;
599 default:
600 break;
601 }
602
603 return false;
604}
605
606/*
607 * Examine the physical address to determine if it is boot data by checking
608 * it against the boot params setup_data chain.
609 */
610static bool memremap_is_setup_data(resource_size_t phys_addr,
611 unsigned long size)
612{
613 struct setup_data *data;
614 u64 paddr, paddr_next;
615
616 paddr = boot_params.hdr.setup_data;
617 while (paddr) {
618 unsigned int len;
619
620 if (phys_addr == paddr)
621 return true;
622
623 data = memremap(paddr, sizeof(*data),
624 MEMREMAP_WB | MEMREMAP_DEC);
625
626 paddr_next = data->next;
627 len = data->len;
628
629 memunmap(data);
630
631 if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
632 return true;
633
634 paddr = paddr_next;
635 }
636
637 return false;
638}
639
640/*
641 * Examine the physical address to determine if it is boot data by checking
642 * it against the boot params setup_data chain (early boot version).
643 */
644static bool __init early_memremap_is_setup_data(resource_size_t phys_addr,
645 unsigned long size)
646{
647 struct setup_data *data;
648 u64 paddr, paddr_next;
649
650 paddr = boot_params.hdr.setup_data;
651 while (paddr) {
652 unsigned int len;
653
654 if (phys_addr == paddr)
655 return true;
656
657 data = early_memremap_decrypted(paddr, sizeof(*data));
658
659 paddr_next = data->next;
660 len = data->len;
661
662 early_memunmap(data, sizeof(*data));
663
664 if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
665 return true;
666
667 paddr = paddr_next;
668 }
669
670 return false;
671}
672
673/*
674 * Architecture function to determine if RAM remap is allowed. By default, a
675 * RAM remap will map the data as encrypted. Determine if a RAM remap should
676 * not be done so that the data will be mapped decrypted.
677 */
678bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size,
679 unsigned long flags)
680{
681 if (!mem_encrypt_active())
682 return true;
683
684 if (flags & MEMREMAP_ENC)
685 return true;
686
687 if (flags & MEMREMAP_DEC)
688 return false;
689
690 if (sme_active()) {
691 if (memremap_is_setup_data(phys_addr, size) ||
692 memremap_is_efi_data(phys_addr, size))
693 return false;
694 }
695
696 return !memremap_should_map_decrypted(phys_addr, size);
697}
698
699/*
700 * Architecture override of __weak function to adjust the protection attributes
701 * used when remapping memory. By default, early_memremap() will map the data
702 * as encrypted. Determine if an encrypted mapping should not be done and set
703 * the appropriate protection attributes.
704 */
705pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
706 unsigned long size,
707 pgprot_t prot)
708{
709 bool encrypted_prot;
710
711 if (!mem_encrypt_active())
712 return prot;
713
714 encrypted_prot = true;
715
716 if (sme_active()) {
717 if (early_memremap_is_setup_data(phys_addr, size) ||
718 memremap_is_efi_data(phys_addr, size))
719 encrypted_prot = false;
720 }
721
722 if (encrypted_prot && memremap_should_map_decrypted(phys_addr, size))
723 encrypted_prot = false;
724
725 return encrypted_prot ? pgprot_encrypted(prot)
726 : pgprot_decrypted(prot);
727}
728
729bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size)
730{
731 return arch_memremap_can_ram_remap(phys_addr, size, 0);
732}
733
734#ifdef CONFIG_AMD_MEM_ENCRYPT
735/* Remap memory with encryption */
736void __init *early_memremap_encrypted(resource_size_t phys_addr,
737 unsigned long size)
738{
739 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC);
740}
741
742/*
743 * Remap memory with encryption and write-protected - cannot be called
744 * before pat_init() is called
745 */
746void __init *early_memremap_encrypted_wp(resource_size_t phys_addr,
747 unsigned long size)
748{
749 /* Be sure the write-protect PAT entry is set for write-protect */
750 if (__pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] != _PAGE_CACHE_MODE_WP)
751 return NULL;
752
753 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC_WP);
754}
755
756/* Remap memory without encryption */
757void __init *early_memremap_decrypted(resource_size_t phys_addr,
758 unsigned long size)
759{
760 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC);
761}
762
763/*
764 * Remap memory without encryption and write-protected - cannot be called
765 * before pat_init() is called
766 */
767void __init *early_memremap_decrypted_wp(resource_size_t phys_addr,
768 unsigned long size)
769{
770 /* Be sure the write-protect PAT entry is set for write-protect */
771 if (__pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] != _PAGE_CACHE_MODE_WP)
772 return NULL;
773
774 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC_WP);
775}
776#endif /* CONFIG_AMD_MEM_ENCRYPT */
777
778static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
779
780static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
781{
782 /* Don't assume we're using swapper_pg_dir at this point */
783 pgd_t *base = __va(read_cr3_pa());
784 pgd_t *pgd = &base[pgd_index(addr)];
785 p4d_t *p4d = p4d_offset(pgd, addr);
786 pud_t *pud = pud_offset(p4d, addr);
787 pmd_t *pmd = pmd_offset(pud, addr);
788
789 return pmd;
790}
791
792static inline pte_t * __init early_ioremap_pte(unsigned long addr)
793{
794 return &bm_pte[pte_index(addr)];
795}
796
797bool __init is_early_ioremap_ptep(pte_t *ptep)
798{
799 return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
800}
801
802void __init early_ioremap_init(void)
803{
804 pmd_t *pmd;
805
806#ifdef CONFIG_X86_64
807 BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
808#else
809 WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
810#endif
811
812 early_ioremap_setup();
813
814 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
815 memset(bm_pte, 0, sizeof(bm_pte));
816 pmd_populate_kernel(&init_mm, pmd, bm_pte);
817
818 /*
819 * The boot-ioremap range spans multiple pmds, for which
820 * we are not prepared:
821 */
822#define __FIXADDR_TOP (-PAGE_SIZE)
823 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
824 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
825#undef __FIXADDR_TOP
826 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
827 WARN_ON(1);
828 printk(KERN_WARNING "pmd %p != %p\n",
829 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
830 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
831 fix_to_virt(FIX_BTMAP_BEGIN));
832 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
833 fix_to_virt(FIX_BTMAP_END));
834
835 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
836 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
837 FIX_BTMAP_BEGIN);
838 }
839}
840
841void __init __early_set_fixmap(enum fixed_addresses idx,
842 phys_addr_t phys, pgprot_t flags)
843{
844 unsigned long addr = __fix_to_virt(idx);
845 pte_t *pte;
846
847 if (idx >= __end_of_fixed_addresses) {
848 BUG();
849 return;
850 }
851 pte = early_ioremap_pte(addr);
852
853 /* Sanitize 'prot' against any unsupported bits: */
854 pgprot_val(flags) &= __supported_pte_mask;
855
856 if (pgprot_val(flags))
857 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
858 else
859 pte_clear(&init_mm, addr, pte);
860 __flush_tlb_one_kernel(addr);
861}