Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2015-2018 Etnaviv Project
  4 */
  5
  6#include <linux/devcoredump.h>
  7#include <linux/moduleparam.h>
  8
  9#include "etnaviv_cmdbuf.h"
 10#include "etnaviv_dump.h"
 11#include "etnaviv_gem.h"
 12#include "etnaviv_gpu.h"
 13#include "etnaviv_mmu.h"
 14#include "etnaviv_sched.h"
 15#include "state.xml.h"
 16#include "state_hi.xml.h"
 17
 18static bool etnaviv_dump_core = true;
 19module_param_named(dump_core, etnaviv_dump_core, bool, 0600);
 20
 21struct core_dump_iterator {
 22	void *start;
 23	struct etnaviv_dump_object_header *hdr;
 24	void *data;
 25};
 26
 27static const unsigned short etnaviv_dump_registers[] = {
 28	VIVS_HI_AXI_STATUS,
 29	VIVS_HI_CLOCK_CONTROL,
 30	VIVS_HI_IDLE_STATE,
 31	VIVS_HI_AXI_CONFIG,
 32	VIVS_HI_INTR_ENBL,
 33	VIVS_HI_CHIP_IDENTITY,
 34	VIVS_HI_CHIP_FEATURE,
 35	VIVS_HI_CHIP_MODEL,
 36	VIVS_HI_CHIP_REV,
 37	VIVS_HI_CHIP_DATE,
 38	VIVS_HI_CHIP_TIME,
 39	VIVS_HI_CHIP_MINOR_FEATURE_0,
 40	VIVS_HI_CACHE_CONTROL,
 41	VIVS_HI_AXI_CONTROL,
 42	VIVS_PM_POWER_CONTROLS,
 43	VIVS_PM_MODULE_CONTROLS,
 44	VIVS_PM_MODULE_STATUS,
 45	VIVS_PM_PULSE_EATER,
 46	VIVS_MC_MMU_FE_PAGE_TABLE,
 47	VIVS_MC_MMU_TX_PAGE_TABLE,
 48	VIVS_MC_MMU_PE_PAGE_TABLE,
 49	VIVS_MC_MMU_PEZ_PAGE_TABLE,
 50	VIVS_MC_MMU_RA_PAGE_TABLE,
 51	VIVS_MC_DEBUG_MEMORY,
 52	VIVS_MC_MEMORY_BASE_ADDR_RA,
 53	VIVS_MC_MEMORY_BASE_ADDR_FE,
 54	VIVS_MC_MEMORY_BASE_ADDR_TX,
 55	VIVS_MC_MEMORY_BASE_ADDR_PEZ,
 56	VIVS_MC_MEMORY_BASE_ADDR_PE,
 57	VIVS_MC_MEMORY_TIMING_CONTROL,
 58	VIVS_MC_BUS_CONFIG,
 59	VIVS_FE_DMA_STATUS,
 60	VIVS_FE_DMA_DEBUG_STATE,
 61	VIVS_FE_DMA_ADDRESS,
 62	VIVS_FE_DMA_LOW,
 63	VIVS_FE_DMA_HIGH,
 64	VIVS_FE_AUTO_FLUSH,
 65};
 66
 67static void etnaviv_core_dump_header(struct core_dump_iterator *iter,
 68	u32 type, void *data_end)
 69{
 70	struct etnaviv_dump_object_header *hdr = iter->hdr;
 71
 72	hdr->magic = cpu_to_le32(ETDUMP_MAGIC);
 73	hdr->type = cpu_to_le32(type);
 74	hdr->file_offset = cpu_to_le32(iter->data - iter->start);
 75	hdr->file_size = cpu_to_le32(data_end - iter->data);
 76
 77	iter->hdr++;
 78	iter->data += hdr->file_size;
 79}
 80
 81static void etnaviv_core_dump_registers(struct core_dump_iterator *iter,
 82	struct etnaviv_gpu *gpu)
 83{
 84	struct etnaviv_dump_registers *reg = iter->data;
 85	unsigned int i;
 
 86
 87	for (i = 0; i < ARRAY_SIZE(etnaviv_dump_registers); i++, reg++) {
 88		reg->reg = etnaviv_dump_registers[i];
 89		reg->value = gpu_read(gpu, etnaviv_dump_registers[i]);
 
 
 
 
 90	}
 91
 92	etnaviv_core_dump_header(iter, ETDUMP_BUF_REG, reg);
 93}
 94
 95static void etnaviv_core_dump_mmu(struct core_dump_iterator *iter,
 96	struct etnaviv_iommu_context *mmu, size_t mmu_size)
 97{
 98	etnaviv_iommu_dump(mmu, iter->data);
 99
100	etnaviv_core_dump_header(iter, ETDUMP_BUF_MMU, iter->data + mmu_size);
101}
102
103static void etnaviv_core_dump_mem(struct core_dump_iterator *iter, u32 type,
104	void *ptr, size_t size, u64 iova)
105{
106	memcpy(iter->data, ptr, size);
107
108	iter->hdr->iova = cpu_to_le64(iova);
109
110	etnaviv_core_dump_header(iter, type, iter->data + size);
111}
112
113void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
114{
115	struct etnaviv_gpu *gpu = submit->gpu;
116	struct core_dump_iterator iter;
117	struct etnaviv_gem_object *obj;
118	unsigned int n_obj, n_bomap_pages;
119	size_t file_size, mmu_size;
120	__le64 *bomap, *bomap_start;
121	int i;
122
123	/* Only catch the first event, or when manually re-armed */
124	if (!etnaviv_dump_core)
125		return;
126	etnaviv_dump_core = false;
127
128	mutex_lock(&gpu->mmu_context->lock);
129
130	mmu_size = etnaviv_iommu_dump_size(gpu->mmu_context);
131
132	/* We always dump registers, mmu, ring, hanging cmdbuf and end marker */
133	n_obj = 5;
134	n_bomap_pages = 0;
135	file_size = ARRAY_SIZE(etnaviv_dump_registers) *
136			sizeof(struct etnaviv_dump_registers) +
137		    mmu_size + gpu->buffer.size + submit->cmdbuf.size;
138
139	/* Add in the active buffer objects */
140	for (i = 0; i < submit->nr_bos; i++) {
141		obj = submit->bos[i].obj;
142		file_size += obj->base.size;
143		n_bomap_pages += obj->base.size >> PAGE_SHIFT;
144		n_obj++;
145	}
146
147	/* If we have any buffer objects, add a bomap object */
148	if (n_bomap_pages) {
149		file_size += n_bomap_pages * sizeof(__le64);
150		n_obj++;
151	}
152
153	/* Add the size of the headers */
154	file_size += sizeof(*iter.hdr) * n_obj;
155
156	/* Allocate the file in vmalloc memory, it's likely to be big */
157	iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
158			       PAGE_KERNEL);
159	if (!iter.start) {
160		mutex_unlock(&gpu->mmu_context->lock);
161		dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
162		return;
163	}
164
165	/* Point the data member after the headers */
166	iter.hdr = iter.start;
167	iter.data = &iter.hdr[n_obj];
168
169	memset(iter.hdr, 0, iter.data - iter.start);
170
171	etnaviv_core_dump_registers(&iter, gpu);
172	etnaviv_core_dump_mmu(&iter, gpu->mmu_context, mmu_size);
173	etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer.vaddr,
174			      gpu->buffer.size,
175			      etnaviv_cmdbuf_get_va(&gpu->buffer,
176					&gpu->mmu_context->cmdbuf_mapping));
177
178	etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
179			      submit->cmdbuf.vaddr, submit->cmdbuf.size,
180			      etnaviv_cmdbuf_get_va(&submit->cmdbuf,
181					&gpu->mmu_context->cmdbuf_mapping));
182
183	mutex_unlock(&gpu->mmu_context->lock);
184
185	/* Reserve space for the bomap */
186	if (n_bomap_pages) {
187		bomap_start = bomap = iter.data;
188		memset(bomap, 0, sizeof(*bomap) * n_bomap_pages);
189		etnaviv_core_dump_header(&iter, ETDUMP_BUF_BOMAP,
190					 bomap + n_bomap_pages);
191	} else {
192		/* Silence warning */
193		bomap_start = bomap = NULL;
194	}
195
196	for (i = 0; i < submit->nr_bos; i++) {
197		struct etnaviv_vram_mapping *vram;
198		struct page **pages;
199		void *vaddr;
200
201		obj = submit->bos[i].obj;
202		vram = submit->bos[i].mapping;
203
204		mutex_lock(&obj->lock);
205		pages = etnaviv_gem_get_pages(obj);
206		mutex_unlock(&obj->lock);
207		if (!IS_ERR(pages)) {
208			int j;
209
210			iter.hdr->data[0] = bomap - bomap_start;
211
212			for (j = 0; j < obj->base.size >> PAGE_SHIFT; j++)
213				*bomap++ = cpu_to_le64(page_to_phys(*pages++));
214		}
215
216		iter.hdr->iova = cpu_to_le64(vram->iova);
217
218		vaddr = etnaviv_gem_vmap(&obj->base);
219		if (vaddr)
220			memcpy(iter.data, vaddr, obj->base.size);
221
222		etnaviv_core_dump_header(&iter, ETDUMP_BUF_BO, iter.data +
223					 obj->base.size);
224	}
225
226	etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
227
228	dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL);
229}
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2015-2018 Etnaviv Project
  4 */
  5
  6#include <linux/devcoredump.h>
  7#include <linux/moduleparam.h>
  8
  9#include "etnaviv_cmdbuf.h"
 10#include "etnaviv_dump.h"
 11#include "etnaviv_gem.h"
 12#include "etnaviv_gpu.h"
 13#include "etnaviv_mmu.h"
 14#include "etnaviv_sched.h"
 15#include "state.xml.h"
 16#include "state_hi.xml.h"
 17
 18static bool etnaviv_dump_core = true;
 19module_param_named(dump_core, etnaviv_dump_core, bool, 0600);
 20
 21struct core_dump_iterator {
 22	void *start;
 23	struct etnaviv_dump_object_header *hdr;
 24	void *data;
 25};
 26
 27static const unsigned short etnaviv_dump_registers[] = {
 28	VIVS_HI_AXI_STATUS,
 29	VIVS_HI_CLOCK_CONTROL,
 30	VIVS_HI_IDLE_STATE,
 31	VIVS_HI_AXI_CONFIG,
 32	VIVS_HI_INTR_ENBL,
 33	VIVS_HI_CHIP_IDENTITY,
 34	VIVS_HI_CHIP_FEATURE,
 35	VIVS_HI_CHIP_MODEL,
 36	VIVS_HI_CHIP_REV,
 37	VIVS_HI_CHIP_DATE,
 38	VIVS_HI_CHIP_TIME,
 39	VIVS_HI_CHIP_MINOR_FEATURE_0,
 40	VIVS_HI_CACHE_CONTROL,
 41	VIVS_HI_AXI_CONTROL,
 42	VIVS_PM_POWER_CONTROLS,
 43	VIVS_PM_MODULE_CONTROLS,
 44	VIVS_PM_MODULE_STATUS,
 45	VIVS_PM_PULSE_EATER,
 46	VIVS_MC_MMU_FE_PAGE_TABLE,
 47	VIVS_MC_MMU_TX_PAGE_TABLE,
 48	VIVS_MC_MMU_PE_PAGE_TABLE,
 49	VIVS_MC_MMU_PEZ_PAGE_TABLE,
 50	VIVS_MC_MMU_RA_PAGE_TABLE,
 51	VIVS_MC_DEBUG_MEMORY,
 52	VIVS_MC_MEMORY_BASE_ADDR_RA,
 53	VIVS_MC_MEMORY_BASE_ADDR_FE,
 54	VIVS_MC_MEMORY_BASE_ADDR_TX,
 55	VIVS_MC_MEMORY_BASE_ADDR_PEZ,
 56	VIVS_MC_MEMORY_BASE_ADDR_PE,
 57	VIVS_MC_MEMORY_TIMING_CONTROL,
 58	VIVS_MC_BUS_CONFIG,
 59	VIVS_FE_DMA_STATUS,
 60	VIVS_FE_DMA_DEBUG_STATE,
 61	VIVS_FE_DMA_ADDRESS,
 62	VIVS_FE_DMA_LOW,
 63	VIVS_FE_DMA_HIGH,
 64	VIVS_FE_AUTO_FLUSH,
 65};
 66
 67static void etnaviv_core_dump_header(struct core_dump_iterator *iter,
 68	u32 type, void *data_end)
 69{
 70	struct etnaviv_dump_object_header *hdr = iter->hdr;
 71
 72	hdr->magic = cpu_to_le32(ETDUMP_MAGIC);
 73	hdr->type = cpu_to_le32(type);
 74	hdr->file_offset = cpu_to_le32(iter->data - iter->start);
 75	hdr->file_size = cpu_to_le32(data_end - iter->data);
 76
 77	iter->hdr++;
 78	iter->data += le32_to_cpu(hdr->file_size);
 79}
 80
 81static void etnaviv_core_dump_registers(struct core_dump_iterator *iter,
 82	struct etnaviv_gpu *gpu)
 83{
 84	struct etnaviv_dump_registers *reg = iter->data;
 85	unsigned int i;
 86	u32 read_addr;
 87
 88	for (i = 0; i < ARRAY_SIZE(etnaviv_dump_registers); i++, reg++) {
 89		read_addr = etnaviv_dump_registers[i];
 90		if (read_addr >= VIVS_PM_POWER_CONTROLS &&
 91		    read_addr <= VIVS_PM_PULSE_EATER)
 92			read_addr = gpu_fix_power_address(gpu, read_addr);
 93		reg->reg = cpu_to_le32(etnaviv_dump_registers[i]);
 94		reg->value = cpu_to_le32(gpu_read(gpu, read_addr));
 95	}
 96
 97	etnaviv_core_dump_header(iter, ETDUMP_BUF_REG, reg);
 98}
 99
100static void etnaviv_core_dump_mmu(struct core_dump_iterator *iter,
101	struct etnaviv_iommu_context *mmu, size_t mmu_size)
102{
103	etnaviv_iommu_dump(mmu, iter->data);
104
105	etnaviv_core_dump_header(iter, ETDUMP_BUF_MMU, iter->data + mmu_size);
106}
107
108static void etnaviv_core_dump_mem(struct core_dump_iterator *iter, u32 type,
109	void *ptr, size_t size, u64 iova)
110{
111	memcpy(iter->data, ptr, size);
112
113	iter->hdr->iova = cpu_to_le64(iova);
114
115	etnaviv_core_dump_header(iter, type, iter->data + size);
116}
117
118void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
119{
120	struct etnaviv_gpu *gpu = submit->gpu;
121	struct core_dump_iterator iter;
122	struct etnaviv_gem_object *obj;
123	unsigned int n_obj, n_bomap_pages;
124	size_t file_size, mmu_size;
125	__le64 *bomap, *bomap_start;
126	int i;
127
128	/* Only catch the first event, or when manually re-armed */
129	if (!etnaviv_dump_core)
130		return;
131	etnaviv_dump_core = false;
132
133	mutex_lock(&submit->mmu_context->lock);
134
135	mmu_size = etnaviv_iommu_dump_size(submit->mmu_context);
136
137	/* We always dump registers, mmu, ring, hanging cmdbuf and end marker */
138	n_obj = 5;
139	n_bomap_pages = 0;
140	file_size = ARRAY_SIZE(etnaviv_dump_registers) *
141			sizeof(struct etnaviv_dump_registers) +
142		    mmu_size + gpu->buffer.size + submit->cmdbuf.size;
143
144	/* Add in the active buffer objects */
145	for (i = 0; i < submit->nr_bos; i++) {
146		obj = submit->bos[i].obj;
147		file_size += obj->base.size;
148		n_bomap_pages += obj->base.size >> PAGE_SHIFT;
149		n_obj++;
150	}
151
152	/* If we have any buffer objects, add a bomap object */
153	if (n_bomap_pages) {
154		file_size += n_bomap_pages * sizeof(__le64);
155		n_obj++;
156	}
157
158	/* Add the size of the headers */
159	file_size += sizeof(*iter.hdr) * n_obj;
160
161	/* Allocate the file in vmalloc memory, it's likely to be big */
162	iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN |
163			__GFP_NORETRY);
164	if (!iter.start) {
165		mutex_unlock(&submit->mmu_context->lock);
166		dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
167		return;
168	}
169
170	/* Point the data member after the headers */
171	iter.hdr = iter.start;
172	iter.data = &iter.hdr[n_obj];
173
174	memset(iter.hdr, 0, iter.data - iter.start);
175
176	etnaviv_core_dump_registers(&iter, gpu);
177	etnaviv_core_dump_mmu(&iter, submit->mmu_context, mmu_size);
178	etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer.vaddr,
179			      gpu->buffer.size,
180			      etnaviv_cmdbuf_get_va(&gpu->buffer,
181					&submit->mmu_context->cmdbuf_mapping));
182
183	etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
184			      submit->cmdbuf.vaddr, submit->cmdbuf.size,
185			      etnaviv_cmdbuf_get_va(&submit->cmdbuf,
186					&submit->mmu_context->cmdbuf_mapping));
187
188	mutex_unlock(&submit->mmu_context->lock);
189
190	/* Reserve space for the bomap */
191	if (n_bomap_pages) {
192		bomap_start = bomap = iter.data;
193		memset(bomap, 0, sizeof(*bomap) * n_bomap_pages);
194		etnaviv_core_dump_header(&iter, ETDUMP_BUF_BOMAP,
195					 bomap + n_bomap_pages);
196	} else {
197		/* Silence warning */
198		bomap_start = bomap = NULL;
199	}
200
201	for (i = 0; i < submit->nr_bos; i++) {
202		struct etnaviv_vram_mapping *vram;
203		struct page **pages;
204		void *vaddr;
205
206		obj = submit->bos[i].obj;
207		vram = submit->bos[i].mapping;
208
209		mutex_lock(&obj->lock);
210		pages = etnaviv_gem_get_pages(obj);
211		mutex_unlock(&obj->lock);
212		if (!IS_ERR(pages)) {
213			int j;
214
215			iter.hdr->data[0] = cpu_to_le32((bomap - bomap_start));
216
217			for (j = 0; j < obj->base.size >> PAGE_SHIFT; j++)
218				*bomap++ = cpu_to_le64(page_to_phys(*pages++));
219		}
220
221		iter.hdr->iova = cpu_to_le64(vram->iova);
222
223		vaddr = etnaviv_gem_vmap(&obj->base);
224		if (vaddr)
225			memcpy(iter.data, vaddr, obj->base.size);
226
227		etnaviv_core_dump_header(&iter, ETDUMP_BUF_BO, iter.data +
228					 obj->base.size);
229	}
230
231	etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
232
233	dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL);
234}