Loading...
Note: File does not exist in v3.15.
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2015-2018 Etnaviv Project
4 */
5
6#include <linux/devcoredump.h>
7#include <linux/moduleparam.h>
8
9#include "etnaviv_cmdbuf.h"
10#include "etnaviv_dump.h"
11#include "etnaviv_gem.h"
12#include "etnaviv_gpu.h"
13#include "etnaviv_mmu.h"
14#include "etnaviv_sched.h"
15#include "state.xml.h"
16#include "state_hi.xml.h"
17
18static bool etnaviv_dump_core = true;
19module_param_named(dump_core, etnaviv_dump_core, bool, 0600);
20
21struct core_dump_iterator {
22 void *start;
23 struct etnaviv_dump_object_header *hdr;
24 void *data;
25};
26
27static const unsigned short etnaviv_dump_registers[] = {
28 VIVS_HI_AXI_STATUS,
29 VIVS_HI_CLOCK_CONTROL,
30 VIVS_HI_IDLE_STATE,
31 VIVS_HI_AXI_CONFIG,
32 VIVS_HI_INTR_ENBL,
33 VIVS_HI_CHIP_IDENTITY,
34 VIVS_HI_CHIP_FEATURE,
35 VIVS_HI_CHIP_MODEL,
36 VIVS_HI_CHIP_REV,
37 VIVS_HI_CHIP_DATE,
38 VIVS_HI_CHIP_TIME,
39 VIVS_HI_CHIP_MINOR_FEATURE_0,
40 VIVS_HI_CACHE_CONTROL,
41 VIVS_HI_AXI_CONTROL,
42 VIVS_PM_POWER_CONTROLS,
43 VIVS_PM_MODULE_CONTROLS,
44 VIVS_PM_MODULE_STATUS,
45 VIVS_PM_PULSE_EATER,
46 VIVS_MC_MMU_FE_PAGE_TABLE,
47 VIVS_MC_MMU_TX_PAGE_TABLE,
48 VIVS_MC_MMU_PE_PAGE_TABLE,
49 VIVS_MC_MMU_PEZ_PAGE_TABLE,
50 VIVS_MC_MMU_RA_PAGE_TABLE,
51 VIVS_MC_DEBUG_MEMORY,
52 VIVS_MC_MEMORY_BASE_ADDR_RA,
53 VIVS_MC_MEMORY_BASE_ADDR_FE,
54 VIVS_MC_MEMORY_BASE_ADDR_TX,
55 VIVS_MC_MEMORY_BASE_ADDR_PEZ,
56 VIVS_MC_MEMORY_BASE_ADDR_PE,
57 VIVS_MC_MEMORY_TIMING_CONTROL,
58 VIVS_MC_BUS_CONFIG,
59 VIVS_FE_DMA_STATUS,
60 VIVS_FE_DMA_DEBUG_STATE,
61 VIVS_FE_DMA_ADDRESS,
62 VIVS_FE_DMA_LOW,
63 VIVS_FE_DMA_HIGH,
64 VIVS_FE_AUTO_FLUSH,
65};
66
67static void etnaviv_core_dump_header(struct core_dump_iterator *iter,
68 u32 type, void *data_end)
69{
70 struct etnaviv_dump_object_header *hdr = iter->hdr;
71
72 hdr->magic = cpu_to_le32(ETDUMP_MAGIC);
73 hdr->type = cpu_to_le32(type);
74 hdr->file_offset = cpu_to_le32(iter->data - iter->start);
75 hdr->file_size = cpu_to_le32(data_end - iter->data);
76
77 iter->hdr++;
78 iter->data += le32_to_cpu(hdr->file_size);
79}
80
81static void etnaviv_core_dump_registers(struct core_dump_iterator *iter,
82 struct etnaviv_gpu *gpu)
83{
84 struct etnaviv_dump_registers *reg = iter->data;
85 unsigned int i;
86 u32 read_addr;
87
88 for (i = 0; i < ARRAY_SIZE(etnaviv_dump_registers); i++, reg++) {
89 read_addr = etnaviv_dump_registers[i];
90 if (read_addr >= VIVS_PM_POWER_CONTROLS &&
91 read_addr <= VIVS_PM_PULSE_EATER)
92 read_addr = gpu_fix_power_address(gpu, read_addr);
93 reg->reg = cpu_to_le32(etnaviv_dump_registers[i]);
94 reg->value = cpu_to_le32(gpu_read(gpu, read_addr));
95 }
96
97 etnaviv_core_dump_header(iter, ETDUMP_BUF_REG, reg);
98}
99
100static void etnaviv_core_dump_mmu(struct core_dump_iterator *iter,
101 struct etnaviv_iommu_context *mmu, size_t mmu_size)
102{
103 etnaviv_iommu_dump(mmu, iter->data);
104
105 etnaviv_core_dump_header(iter, ETDUMP_BUF_MMU, iter->data + mmu_size);
106}
107
108static void etnaviv_core_dump_mem(struct core_dump_iterator *iter, u32 type,
109 void *ptr, size_t size, u64 iova)
110{
111 memcpy(iter->data, ptr, size);
112
113 iter->hdr->iova = cpu_to_le64(iova);
114
115 etnaviv_core_dump_header(iter, type, iter->data + size);
116}
117
118void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
119{
120 struct etnaviv_gpu *gpu = submit->gpu;
121 struct core_dump_iterator iter;
122 struct etnaviv_gem_object *obj;
123 unsigned int n_obj, n_bomap_pages;
124 size_t file_size, mmu_size;
125 __le64 *bomap, *bomap_start;
126 int i;
127
128 /* Only catch the first event, or when manually re-armed */
129 if (!etnaviv_dump_core)
130 return;
131 etnaviv_dump_core = false;
132
133 mutex_lock(&submit->mmu_context->lock);
134
135 mmu_size = etnaviv_iommu_dump_size(submit->mmu_context);
136
137 /* We always dump registers, mmu, ring, hanging cmdbuf and end marker */
138 n_obj = 5;
139 n_bomap_pages = 0;
140 file_size = ARRAY_SIZE(etnaviv_dump_registers) *
141 sizeof(struct etnaviv_dump_registers) +
142 mmu_size + gpu->buffer.size + submit->cmdbuf.size;
143
144 /* Add in the active buffer objects */
145 for (i = 0; i < submit->nr_bos; i++) {
146 obj = submit->bos[i].obj;
147 file_size += obj->base.size;
148 n_bomap_pages += obj->base.size >> PAGE_SHIFT;
149 n_obj++;
150 }
151
152 /* If we have any buffer objects, add a bomap object */
153 if (n_bomap_pages) {
154 file_size += n_bomap_pages * sizeof(__le64);
155 n_obj++;
156 }
157
158 /* Add the size of the headers */
159 file_size += sizeof(*iter.hdr) * n_obj;
160
161 /* Allocate the file in vmalloc memory, it's likely to be big */
162 iter.start = __vmalloc(file_size, GFP_NOWAIT);
163 if (!iter.start) {
164 mutex_unlock(&submit->mmu_context->lock);
165 dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
166 return;
167 }
168
169 /* Point the data member after the headers */
170 iter.hdr = iter.start;
171 iter.data = &iter.hdr[n_obj];
172
173 memset(iter.hdr, 0, iter.data - iter.start);
174
175 etnaviv_core_dump_registers(&iter, gpu);
176 etnaviv_core_dump_mmu(&iter, submit->mmu_context, mmu_size);
177 etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer.vaddr,
178 gpu->buffer.size,
179 etnaviv_cmdbuf_get_va(&gpu->buffer,
180 &submit->mmu_context->cmdbuf_mapping));
181
182 etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
183 submit->cmdbuf.vaddr, submit->cmdbuf.size,
184 etnaviv_cmdbuf_get_va(&submit->cmdbuf,
185 &submit->mmu_context->cmdbuf_mapping));
186
187 mutex_unlock(&submit->mmu_context->lock);
188
189 /* Reserve space for the bomap */
190 if (n_bomap_pages) {
191 bomap_start = bomap = iter.data;
192 memset(bomap, 0, sizeof(*bomap) * n_bomap_pages);
193 etnaviv_core_dump_header(&iter, ETDUMP_BUF_BOMAP,
194 bomap + n_bomap_pages);
195 } else {
196 /* Silence warning */
197 bomap_start = bomap = NULL;
198 }
199
200 for (i = 0; i < submit->nr_bos; i++) {
201 struct etnaviv_vram_mapping *vram;
202 struct page **pages;
203 void *vaddr;
204
205 obj = submit->bos[i].obj;
206 vram = submit->bos[i].mapping;
207
208 mutex_lock(&obj->lock);
209 pages = etnaviv_gem_get_pages(obj);
210 mutex_unlock(&obj->lock);
211 if (!IS_ERR(pages)) {
212 int j;
213
214 iter.hdr->data[0] = cpu_to_le32((bomap - bomap_start));
215
216 for (j = 0; j < obj->base.size >> PAGE_SHIFT; j++)
217 *bomap++ = cpu_to_le64(page_to_phys(*pages++));
218 }
219
220 iter.hdr->iova = cpu_to_le64(vram->iova);
221
222 vaddr = etnaviv_gem_vmap(&obj->base);
223 if (vaddr)
224 memcpy(iter.data, vaddr, obj->base.size);
225
226 etnaviv_core_dump_header(&iter, ETDUMP_BUF_BO, iter.data +
227 obj->base.size);
228 }
229
230 etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
231
232 dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_NOWAIT);
233}