Loading...
Note: File does not exist in v4.6.
1# SPDX-License-Identifier: GPL-2.0
2#
3# Copyright (c) 2023 MediaTek Inc.
4#
5# Authors:
6# Kuan-Ying Lee <Kuan-Ying.Lee@mediatek.com>
7#
8
9import gdb
10import math
11from linux import utils, constants
12
13def DIV_ROUND_UP(n,d):
14 return ((n) + (d) - 1) // (d)
15
16def test_bit(nr, addr):
17 if addr.dereference() & (0x1 << nr):
18 return True
19 else:
20 return False
21
22class page_ops():
23 ops = None
24 def __init__(self):
25 if not constants.LX_CONFIG_SPARSEMEM_VMEMMAP:
26 raise gdb.GdbError('Only support CONFIG_SPARSEMEM_VMEMMAP now')
27 if constants.LX_CONFIG_ARM64 and utils.is_target_arch('aarch64'):
28 self.ops = aarch64_page_ops()
29 else:
30 raise gdb.GdbError('Only support aarch64 now')
31
32class aarch64_page_ops():
33 def __init__(self):
34 self.SUBSECTION_SHIFT = 21
35 self.SEBSECTION_SIZE = 1 << self.SUBSECTION_SHIFT
36 self.MODULES_VSIZE = 2 * 1024 * 1024 * 1024
37
38 if constants.LX_CONFIG_ARM64_64K_PAGES:
39 self.SECTION_SIZE_BITS = 29
40 else:
41 self.SECTION_SIZE_BITS = 27
42 self.MAX_PHYSMEM_BITS = constants.LX_CONFIG_ARM64_VA_BITS
43
44 self.PAGE_SHIFT = constants.LX_CONFIG_PAGE_SHIFT
45 self.PAGE_SIZE = 1 << self.PAGE_SHIFT
46 self.PAGE_MASK = (~(self.PAGE_SIZE - 1)) & ((1 << 64) - 1)
47
48 self.VA_BITS = constants.LX_CONFIG_ARM64_VA_BITS
49 if self.VA_BITS > 48:
50 if constants.LX_CONFIG_ARM64_16K_PAGES:
51 self.VA_BITS_MIN = 47
52 else:
53 self.VA_BITS_MIN = 48
54 tcr_el1 = gdb.execute("info registers $TCR_EL1", to_string=True)
55 tcr_el1 = int(tcr_el1.split()[1], 16)
56 self.vabits_actual = 64 - ((tcr_el1 >> 16) & 63)
57 else:
58 self.VA_BITS_MIN = self.VA_BITS
59 self.vabits_actual = self.VA_BITS
60 self.kimage_voffset = gdb.parse_and_eval('kimage_voffset') & ((1 << 64) - 1)
61
62 self.SECTIONS_SHIFT = self.MAX_PHYSMEM_BITS - self.SECTION_SIZE_BITS
63
64 if str(constants.LX_CONFIG_ARCH_FORCE_MAX_ORDER).isdigit():
65 self.MAX_ORDER = constants.LX_CONFIG_ARCH_FORCE_MAX_ORDER
66 else:
67 self.MAX_ORDER = 10
68
69 self.MAX_ORDER_NR_PAGES = 1 << (self.MAX_ORDER)
70 self.PFN_SECTION_SHIFT = self.SECTION_SIZE_BITS - self.PAGE_SHIFT
71 self.NR_MEM_SECTIONS = 1 << self.SECTIONS_SHIFT
72 self.PAGES_PER_SECTION = 1 << self.PFN_SECTION_SHIFT
73 self.PAGE_SECTION_MASK = (~(self.PAGES_PER_SECTION - 1)) & ((1 << 64) - 1)
74
75 if constants.LX_CONFIG_SPARSEMEM_EXTREME:
76 self.SECTIONS_PER_ROOT = self.PAGE_SIZE // gdb.lookup_type("struct mem_section").sizeof
77 else:
78 self.SECTIONS_PER_ROOT = 1
79
80 self.NR_SECTION_ROOTS = DIV_ROUND_UP(self.NR_MEM_SECTIONS, self.SECTIONS_PER_ROOT)
81 self.SECTION_ROOT_MASK = self.SECTIONS_PER_ROOT - 1
82 self.SUBSECTION_SHIFT = 21
83 self.SEBSECTION_SIZE = 1 << self.SUBSECTION_SHIFT
84 self.PFN_SUBSECTION_SHIFT = self.SUBSECTION_SHIFT - self.PAGE_SHIFT
85 self.PAGES_PER_SUBSECTION = 1 << self.PFN_SUBSECTION_SHIFT
86
87 self.SECTION_HAS_MEM_MAP = 1 << int(gdb.parse_and_eval('SECTION_HAS_MEM_MAP_BIT'))
88 self.SECTION_IS_EARLY = 1 << int(gdb.parse_and_eval('SECTION_IS_EARLY_BIT'))
89
90 self.struct_page_size = utils.get_page_type().sizeof
91 self.STRUCT_PAGE_MAX_SHIFT = (int)(math.log(self.struct_page_size, 2))
92
93 self.PAGE_OFFSET = self._PAGE_OFFSET(self.VA_BITS)
94 self.MODULES_VADDR = self._PAGE_END(self.VA_BITS_MIN)
95 self.MODULES_END = self.MODULES_VADDR + self.MODULES_VSIZE
96
97 self.VMEMMAP_RANGE = self._PAGE_END(self.VA_BITS_MIN) - self.PAGE_OFFSET
98 self.VMEMMAP_SIZE = (self.VMEMMAP_RANGE >> self.PAGE_SHIFT) * self.struct_page_size
99 self.VMEMMAP_END = (-(1 * 1024 * 1024 * 1024)) & 0xffffffffffffffff
100 self.VMEMMAP_START = self.VMEMMAP_END - self.VMEMMAP_SIZE
101
102 self.VMALLOC_START = self.MODULES_END
103 self.VMALLOC_END = self.VMEMMAP_START - 256 * 1024 * 1024
104
105 self.memstart_addr = gdb.parse_and_eval("memstart_addr")
106 self.PHYS_OFFSET = self.memstart_addr
107 self.vmemmap = gdb.Value(self.VMEMMAP_START).cast(utils.get_page_type().pointer()) - (self.memstart_addr >> self.PAGE_SHIFT)
108
109 self.KERNEL_START = gdb.parse_and_eval("_text")
110 self.KERNEL_END = gdb.parse_and_eval("_end")
111
112 if constants.LX_CONFIG_KASAN_GENERIC or constants.LX_CONFIG_KASAN_SW_TAGS:
113 if constants.LX_CONFIG_KASAN_GENERIC:
114 self.KASAN_SHADOW_SCALE_SHIFT = 3
115 else:
116 self.KASAN_SHADOW_SCALE_SHIFT = 4
117 self.KASAN_SHADOW_OFFSET = constants.LX_CONFIG_KASAN_SHADOW_OFFSET
118 self.KASAN_SHADOW_END = (1 << (64 - self.KASAN_SHADOW_SCALE_SHIFT)) + self.KASAN_SHADOW_OFFSET
119 self.PAGE_END = self.KASAN_SHADOW_END - (1 << (self.vabits_actual - self.KASAN_SHADOW_SCALE_SHIFT))
120 else:
121 self.PAGE_END = self._PAGE_END(self.VA_BITS_MIN)
122
123 if constants.LX_CONFIG_NUMA and constants.LX_CONFIG_NODES_SHIFT:
124 self.NODE_SHIFT = constants.LX_CONFIG_NODES_SHIFT
125 else:
126 self.NODE_SHIFT = 0
127
128 self.MAX_NUMNODES = 1 << self.NODE_SHIFT
129
130 def SECTION_NR_TO_ROOT(self, sec):
131 return sec // self.SECTIONS_PER_ROOT
132
133 def __nr_to_section(self, nr):
134 root = self.SECTION_NR_TO_ROOT(nr)
135 mem_section = gdb.parse_and_eval("mem_section")
136 return mem_section[root][nr & self.SECTION_ROOT_MASK]
137
138 def pfn_to_section_nr(self, pfn):
139 return pfn >> self.PFN_SECTION_SHIFT
140
141 def section_nr_to_pfn(self, sec):
142 return sec << self.PFN_SECTION_SHIFT
143
144 def __pfn_to_section(self, pfn):
145 return self.__nr_to_section(self.pfn_to_section_nr(pfn))
146
147 def pfn_to_section(self, pfn):
148 return self.__pfn_to_section(pfn)
149
150 def subsection_map_index(self, pfn):
151 return (pfn & ~(self.PAGE_SECTION_MASK)) // self.PAGES_PER_SUBSECTION
152
153 def pfn_section_valid(self, ms, pfn):
154 if constants.LX_CONFIG_SPARSEMEM_VMEMMAP:
155 idx = self.subsection_map_index(pfn)
156 return test_bit(idx, ms['usage']['subsection_map'])
157 else:
158 return True
159
160 def valid_section(self, mem_section):
161 if mem_section != None and (mem_section['section_mem_map'] & self.SECTION_HAS_MEM_MAP):
162 return True
163 return False
164
165 def early_section(self, mem_section):
166 if mem_section != None and (mem_section['section_mem_map'] & self.SECTION_IS_EARLY):
167 return True
168 return False
169
170 def pfn_valid(self, pfn):
171 ms = None
172 if self.PHYS_PFN(self.PFN_PHYS(pfn)) != pfn:
173 return False
174 if self.pfn_to_section_nr(pfn) >= self.NR_MEM_SECTIONS:
175 return False
176 ms = self.__pfn_to_section(pfn)
177
178 if not self.valid_section(ms):
179 return False
180 return self.early_section(ms) or self.pfn_section_valid(ms, pfn)
181
182 def _PAGE_OFFSET(self, va):
183 return (-(1 << (va))) & 0xffffffffffffffff
184
185 def _PAGE_END(self, va):
186 return (-(1 << (va - 1))) & 0xffffffffffffffff
187
188 def kasan_reset_tag(self, addr):
189 if constants.LX_CONFIG_KASAN_SW_TAGS or constants.LX_CONFIG_KASAN_HW_TAGS:
190 return int(addr) | (0xff << 56)
191 else:
192 return addr
193
194 def __is_lm_address(self, addr):
195 if (addr - self.PAGE_OFFSET) < (self.PAGE_END - self.PAGE_OFFSET):
196 return True
197 else:
198 return False
199 def __lm_to_phys(self, addr):
200 return addr - self.PAGE_OFFSET + self.PHYS_OFFSET
201
202 def __kimg_to_phys(self, addr):
203 return addr - self.kimage_voffset
204
205 def __virt_to_phys_nodebug(self, va):
206 untagged_va = self.kasan_reset_tag(va)
207 if self.__is_lm_address(untagged_va):
208 return self.__lm_to_phys(untagged_va)
209 else:
210 return self.__kimg_to_phys(untagged_va)
211
212 def __virt_to_phys(self, va):
213 if constants.LX_CONFIG_DEBUG_VIRTUAL:
214 if not self.__is_lm_address(self.kasan_reset_tag(va)):
215 raise gdb.GdbError("Warning: virt_to_phys used for non-linear address: 0x%lx\n" % va)
216 return self.__virt_to_phys_nodebug(va)
217
218 def virt_to_phys(self, va):
219 return self.__virt_to_phys(va)
220
221 def PFN_PHYS(self, pfn):
222 return pfn << self.PAGE_SHIFT
223
224 def PHYS_PFN(self, phys):
225 return phys >> self.PAGE_SHIFT
226
227 def __phys_to_virt(self, pa):
228 return (pa - self.PHYS_OFFSET) | self.PAGE_OFFSET
229
230 def __phys_to_pfn(self, pa):
231 return self.PHYS_PFN(pa)
232
233 def __pfn_to_phys(self, pfn):
234 return self.PFN_PHYS(pfn)
235
236 def __pa_symbol_nodebug(self, x):
237 return self.__kimg_to_phys(x)
238
239 def __phys_addr_symbol(self, x):
240 if constants.LX_CONFIG_DEBUG_VIRTUAL:
241 if x < self.KERNEL_START or x > self.KERNEL_END:
242 raise gdb.GdbError("0x%x exceed kernel range" % x)
243 return self.__pa_symbol_nodebug(x)
244
245 def __pa_symbol(self, x):
246 return self.__phys_addr_symbol(x)
247
248 def __va(self, pa):
249 return self.__phys_to_virt(pa)
250
251 def pfn_to_kaddr(self, pfn):
252 return self.__va(pfn << self.PAGE_SHIFT)
253
254 def virt_to_pfn(self, va):
255 return self.__phys_to_pfn(self.__virt_to_phys(va))
256
257 def sym_to_pfn(self, x):
258 return self.__phys_to_pfn(self.__pa_symbol(x))
259
260 def page_to_pfn(self, page):
261 return int(page.cast(utils.get_page_type().pointer()) - self.vmemmap.cast(utils.get_page_type().pointer()))
262
263 def page_to_phys(self, page):
264 return self.__pfn_to_phys(self.page_to_pfn(page))
265
266 def pfn_to_page(self, pfn):
267 return (self.vmemmap + pfn).cast(utils.get_page_type().pointer())
268
269 def page_to_virt(self, page):
270 if constants.LX_CONFIG_DEBUG_VIRTUAL:
271 return self.__va(self.page_to_phys(page))
272 else:
273 __idx = int((page.cast(gdb.lookup_type("unsigned long")) - self.VMEMMAP_START).cast(utils.get_ulong_type())) // self.struct_page_size
274 return self.PAGE_OFFSET + (__idx * self.PAGE_SIZE)
275
276 def virt_to_page(self, va):
277 if constants.LX_CONFIG_DEBUG_VIRTUAL:
278 return self.pfn_to_page(self.virt_to_pfn(va))
279 else:
280 __idx = int(self.kasan_reset_tag(va) - self.PAGE_OFFSET) // self.PAGE_SIZE
281 addr = self.VMEMMAP_START + (__idx * self.struct_page_size)
282 return gdb.Value(addr).cast(utils.get_page_type().pointer())
283
284 def page_address(self, page):
285 return self.page_to_virt(page)
286
287 def folio_address(self, folio):
288 return self.page_address(folio['page'].address)
289
290class LxPFN2Page(gdb.Command):
291 """PFN to struct page"""
292
293 def __init__(self):
294 super(LxPFN2Page, self).__init__("lx-pfn_to_page", gdb.COMMAND_USER)
295
296 def invoke(self, arg, from_tty):
297 argv = gdb.string_to_argv(arg)
298 pfn = int(argv[0])
299 page = page_ops().ops.pfn_to_page(pfn)
300 gdb.write("pfn_to_page(0x%x) = 0x%x\n" % (pfn, page))
301
302LxPFN2Page()
303
304class LxPage2PFN(gdb.Command):
305 """struct page to PFN"""
306
307 def __init__(self):
308 super(LxPage2PFN, self).__init__("lx-page_to_pfn", gdb.COMMAND_USER)
309
310 def invoke(self, arg, from_tty):
311 argv = gdb.string_to_argv(arg)
312 struct_page_addr = int(argv[0], 16)
313 page = gdb.Value(struct_page_addr).cast(utils.get_page_type().pointer())
314 pfn = page_ops().ops.page_to_pfn(page)
315 gdb.write("page_to_pfn(0x%x) = 0x%x\n" % (page, pfn))
316
317LxPage2PFN()
318
319class LxPageAddress(gdb.Command):
320 """struct page to linear mapping address"""
321
322 def __init__(self):
323 super(LxPageAddress, self).__init__("lx-page_address", gdb.COMMAND_USER)
324
325 def invoke(self, arg, from_tty):
326 argv = gdb.string_to_argv(arg)
327 struct_page_addr = int(argv[0], 16)
328 page = gdb.Value(struct_page_addr).cast(utils.get_page_type().pointer())
329 addr = page_ops().ops.page_address(page)
330 gdb.write("page_address(0x%x) = 0x%x\n" % (page, addr))
331
332LxPageAddress()
333
334class LxPage2Phys(gdb.Command):
335 """struct page to physical address"""
336
337 def __init__(self):
338 super(LxPage2Phys, self).__init__("lx-page_to_phys", gdb.COMMAND_USER)
339
340 def invoke(self, arg, from_tty):
341 argv = gdb.string_to_argv(arg)
342 struct_page_addr = int(argv[0], 16)
343 page = gdb.Value(struct_page_addr).cast(utils.get_page_type().pointer())
344 phys_addr = page_ops().ops.page_to_phys(page)
345 gdb.write("page_to_phys(0x%x) = 0x%x\n" % (page, phys_addr))
346
347LxPage2Phys()
348
349class LxVirt2Phys(gdb.Command):
350 """virtual address to physical address"""
351
352 def __init__(self):
353 super(LxVirt2Phys, self).__init__("lx-virt_to_phys", gdb.COMMAND_USER)
354
355 def invoke(self, arg, from_tty):
356 argv = gdb.string_to_argv(arg)
357 linear_addr = int(argv[0], 16)
358 phys_addr = page_ops().ops.virt_to_phys(linear_addr)
359 gdb.write("virt_to_phys(0x%x) = 0x%x\n" % (linear_addr, phys_addr))
360
361LxVirt2Phys()
362
363class LxVirt2Page(gdb.Command):
364 """virtual address to struct page"""
365
366 def __init__(self):
367 super(LxVirt2Page, self).__init__("lx-virt_to_page", gdb.COMMAND_USER)
368
369 def invoke(self, arg, from_tty):
370 argv = gdb.string_to_argv(arg)
371 linear_addr = int(argv[0], 16)
372 page = page_ops().ops.virt_to_page(linear_addr)
373 gdb.write("virt_to_page(0x%x) = 0x%x\n" % (linear_addr, page))
374
375LxVirt2Page()
376
377class LxSym2PFN(gdb.Command):
378 """symbol address to PFN"""
379
380 def __init__(self):
381 super(LxSym2PFN, self).__init__("lx-sym_to_pfn", gdb.COMMAND_USER)
382
383 def invoke(self, arg, from_tty):
384 argv = gdb.string_to_argv(arg)
385 sym_addr = int(argv[0], 16)
386 pfn = page_ops().ops.sym_to_pfn(sym_addr)
387 gdb.write("sym_to_pfn(0x%x) = %d\n" % (sym_addr, pfn))
388
389LxSym2PFN()
390
391class LxPFN2Kaddr(gdb.Command):
392 """PFN to kernel address"""
393
394 def __init__(self):
395 super(LxPFN2Kaddr, self).__init__("lx-pfn_to_kaddr", gdb.COMMAND_USER)
396
397 def invoke(self, arg, from_tty):
398 argv = gdb.string_to_argv(arg)
399 pfn = int(argv[0])
400 kaddr = page_ops().ops.pfn_to_kaddr(pfn)
401 gdb.write("pfn_to_kaddr(%d) = 0x%x\n" % (pfn, kaddr))
402
403LxPFN2Kaddr()