Loading...
1#ifndef _ASM_FIXMAP_H
2#define _ASM_FIXMAP_H
3
4#define FIXADDR_START 0xffc00000UL
5#define FIXADDR_END 0xfff00000UL
6#define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE)
7
8#include <asm/kmap_types.h>
9#include <asm/pgtable.h>
10
11enum fixed_addresses {
12 FIX_EARLYCON_MEM_BASE,
13 __end_of_permanent_fixed_addresses,
14
15 FIX_KMAP_BEGIN = __end_of_permanent_fixed_addresses,
16 FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
17
18 /* Support writing RO kernel text via kprobes, jump labels, etc. */
19 FIX_TEXT_POKE0,
20 FIX_TEXT_POKE1,
21
22 __end_of_fixmap_region,
23
24 /*
25 * Share the kmap() region with early_ioremap(): this is guaranteed
26 * not to clash since early_ioremap() is only available before
27 * paging_init(), and kmap() only after.
28 */
29#define NR_FIX_BTMAPS 32
30#define FIX_BTMAPS_SLOTS 7
31#define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
32
33 FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
34 FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
35 __end_of_early_ioremap_region
36};
37
38static const enum fixed_addresses __end_of_fixed_addresses =
39 __end_of_fixmap_region > __end_of_early_ioremap_region ?
40 __end_of_fixmap_region : __end_of_early_ioremap_region;
41
42#define FIXMAP_PAGE_COMMON (L_PTE_YOUNG | L_PTE_PRESENT | L_PTE_XN | L_PTE_DIRTY)
43
44#define FIXMAP_PAGE_NORMAL (FIXMAP_PAGE_COMMON | L_PTE_MT_WRITEBACK)
45#define FIXMAP_PAGE_RO (FIXMAP_PAGE_NORMAL | L_PTE_RDONLY)
46
47/* Used by set_fixmap_(io|nocache), both meant for mapping a device */
48#define FIXMAP_PAGE_IO (FIXMAP_PAGE_COMMON | L_PTE_MT_DEV_SHARED | L_PTE_SHARED)
49#define FIXMAP_PAGE_NOCACHE FIXMAP_PAGE_IO
50
51#define __early_set_fixmap __set_fixmap
52
53#ifdef CONFIG_MMU
54
55void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot);
56void __init early_fixmap_init(void);
57
58#include <asm-generic/fixmap.h>
59
60#else
61
62static inline void early_fixmap_init(void) { }
63
64#endif
65#endif
1#ifndef _ASM_FIXMAP_H
2#define _ASM_FIXMAP_H
3
4/*
5 * Nothing too fancy for now.
6 *
7 * On ARM we already have well known fixed virtual addresses imposed by
8 * the architecture such as the vector page which is located at 0xffff0000,
9 * therefore a second level page table is already allocated covering
10 * 0xfff00000 upwards.
11 *
12 * The cache flushing code in proc-xscale.S uses the virtual area between
13 * 0xfffe0000 and 0xfffeffff.
14 */
15
16#define FIXADDR_START 0xfff00000UL
17#define FIXADDR_TOP 0xfffe0000UL
18#define FIXADDR_SIZE (FIXADDR_TOP - FIXADDR_START)
19
20#define FIX_KMAP_BEGIN 0
21#define FIX_KMAP_END (FIXADDR_SIZE >> PAGE_SHIFT)
22
23#define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT))
24#define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT)
25
26extern void __this_fixmap_does_not_exist(void);
27
28static inline unsigned long fix_to_virt(const unsigned int idx)
29{
30 if (idx >= FIX_KMAP_END)
31 __this_fixmap_does_not_exist();
32 return __fix_to_virt(idx);
33}
34
35static inline unsigned int virt_to_fix(const unsigned long vaddr)
36{
37 BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
38 return __virt_to_fix(vaddr);
39}
40
41#endif