Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
  1/******************************************************************************
  2 *
  3 * This file is provided under a dual BSD/GPLv2 license.  When using or
  4 * redistributing this file, you may do so under either license.
  5 *
  6 * GPL LICENSE SUMMARY
  7 *
  8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
  9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
 10 * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
 11 *
 12 * This program is free software; you can redistribute it and/or modify
 13 * it under the terms of version 2 of the GNU General Public License as
 14 * published by the Free Software Foundation.
 15 *
 16 * This program is distributed in the hope that it will be useful, but
 17 * WITHOUT ANY WARRANTY; without even the implied warranty of
 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 19 * General Public License for more details.
 20 *
 21 * You should have received a copy of the GNU General Public License
 22 * along with this program;
 23 *
 24 * The full GNU General Public License is included in this distribution
 25 * in the file called COPYING.
 26 *
 27 * Contact Information:
 28 *  Intel Linux Wireless <linuxwifi@intel.com>
 29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 30 *
 31 * BSD LICENSE
 32 *
 33 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
 34 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
 35 * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
 36 * All rights reserved.
 37 *
 38 * Redistribution and use in source and binary forms, with or without
 39 * modification, are permitted provided that the following conditions
 40 * are met:
 41 *
 42 *  * Redistributions of source code must retain the above copyright
 43 *    notice, this list of conditions and the following disclaimer.
 44 *  * Redistributions in binary form must reproduce the above copyright
 45 *    notice, this list of conditions and the following disclaimer in
 46 *    the documentation and/or other materials provided with the
 47 *    distribution.
 48 *  * Neither the name Intel Corporation nor the names of its
 49 *    contributors may be used to endorse or promote products derived
 50 *    from this software without specific prior written permission.
 51 *
 52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 63 *
 64 *****************************************************************************/
 65#include <linux/devcoredump.h>
 66
 67#include "fw-dbg.h"
 68#include "iwl-io.h"
 69#include "mvm.h"
 70#include "iwl-prph.h"
 71#include "iwl-csr.h"
 72
 73#define RADIO_REG_MAX_READ 0x2ad
 74static void iwl_mvm_read_radio_reg(struct iwl_mvm *mvm,
 75				   struct iwl_fw_error_dump_data **dump_data)
 76{
 77	u8 *pos = (void *)(*dump_data)->data;
 78	unsigned long flags;
 79	int i;
 80
 81	if (!iwl_trans_grab_nic_access(mvm->trans, &flags))
 82		return;
 83
 84	(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RADIO_REG);
 85	(*dump_data)->len = cpu_to_le32(RADIO_REG_MAX_READ);
 86
 87	for (i = 0; i < RADIO_REG_MAX_READ; i++) {
 88		u32 rd_cmd = RADIO_RSP_RD_CMD;
 89
 90		rd_cmd |= i << RADIO_RSP_ADDR_POS;
 91		iwl_write_prph_no_grab(mvm->trans, RSP_RADIO_CMD, rd_cmd);
 92		*pos =  (u8)iwl_read_prph_no_grab(mvm->trans, RSP_RADIO_RDDAT);
 93
 94		pos++;
 95	}
 96
 97	*dump_data = iwl_fw_error_next_data(*dump_data);
 98
 99	iwl_trans_release_nic_access(mvm->trans, &flags);
100}
101
102static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
103			       struct iwl_fw_error_dump_data **dump_data)
104{
105	struct iwl_fw_error_dump_fifo *fifo_hdr;
106	u32 *fifo_data;
107	u32 fifo_len;
108	unsigned long flags;
109	int i, j;
110
111	if (!iwl_trans_grab_nic_access(mvm->trans, &flags))
112		return;
113
114	/* Pull RXF data from all RXFs */
115	for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) {
116		/*
117		 * Keep aside the additional offset that might be needed for
118		 * next RXF
119		 */
120		u32 offset_diff = RXF_DIFF_FROM_PREV * i;
121
122		fifo_hdr = (void *)(*dump_data)->data;
123		fifo_data = (void *)fifo_hdr->data;
124		fifo_len = mvm->shared_mem_cfg.rxfifo_size[i];
125
126		/* No need to try to read the data if the length is 0 */
127		if (fifo_len == 0)
128			continue;
129
130		/* Add a TLV for the RXF */
131		(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
132		(*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
133
134		fifo_hdr->fifo_num = cpu_to_le32(i);
135		fifo_hdr->available_bytes =
136			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
137							RXF_RD_D_SPACE +
138							offset_diff));
139		fifo_hdr->wr_ptr =
140			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
141							RXF_RD_WR_PTR +
142							offset_diff));
143		fifo_hdr->rd_ptr =
144			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
145							RXF_RD_RD_PTR +
146							offset_diff));
147		fifo_hdr->fence_ptr =
148			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
149							RXF_RD_FENCE_PTR +
150							offset_diff));
151		fifo_hdr->fence_mode =
152			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
153							RXF_SET_FENCE_MODE +
154							offset_diff));
155
156		/* Lock fence */
157		iwl_trans_write_prph(mvm->trans,
158				     RXF_SET_FENCE_MODE + offset_diff, 0x1);
159		/* Set fence pointer to the same place like WR pointer */
160		iwl_trans_write_prph(mvm->trans,
161				     RXF_LD_WR2FENCE + offset_diff, 0x1);
162		/* Set fence offset */
163		iwl_trans_write_prph(mvm->trans,
164				     RXF_LD_FENCE_OFFSET_ADDR + offset_diff,
165				     0x0);
166
167		/* Read FIFO */
168		fifo_len /= sizeof(u32); /* Size in DWORDS */
169		for (j = 0; j < fifo_len; j++)
170			fifo_data[j] = iwl_trans_read_prph(mvm->trans,
171							 RXF_FIFO_RD_FENCE_INC +
172							 offset_diff);
173		*dump_data = iwl_fw_error_next_data(*dump_data);
174	}
175
176	/* Pull TXF data from all TXFs */
177	for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++) {
178		/* Mark the number of TXF we're pulling now */
179		iwl_trans_write_prph(mvm->trans, TXF_LARC_NUM, i);
180
181		fifo_hdr = (void *)(*dump_data)->data;
182		fifo_data = (void *)fifo_hdr->data;
183		fifo_len = mvm->shared_mem_cfg.txfifo_size[i];
184
185		/* No need to try to read the data if the length is 0 */
186		if (fifo_len == 0)
187			continue;
188
189		/* Add a TLV for the FIFO */
190		(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF);
191		(*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
192
193		fifo_hdr->fifo_num = cpu_to_le32(i);
194		fifo_hdr->available_bytes =
195			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
196							TXF_FIFO_ITEM_CNT));
197		fifo_hdr->wr_ptr =
198			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
199							TXF_WR_PTR));
200		fifo_hdr->rd_ptr =
201			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
202							TXF_RD_PTR));
203		fifo_hdr->fence_ptr =
204			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
205							TXF_FENCE_PTR));
206		fifo_hdr->fence_mode =
207			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
208							TXF_LOCK_FENCE));
209
210		/* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
211		iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR,
212				     TXF_WR_PTR);
213
214		/* Dummy-read to advance the read pointer to the head */
215		iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA);
216
217		/* Read FIFO */
218		fifo_len /= sizeof(u32); /* Size in DWORDS */
219		for (j = 0; j < fifo_len; j++)
220			fifo_data[j] = iwl_trans_read_prph(mvm->trans,
221							  TXF_READ_MODIFY_DATA);
222		*dump_data = iwl_fw_error_next_data(*dump_data);
223	}
224
225	if (fw_has_capa(&mvm->fw->ucode_capa,
226			IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
227		/* Pull UMAC internal TXF data from all TXFs */
228		for (i = 0;
229		     i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
230		     i++) {
231			fifo_hdr = (void *)(*dump_data)->data;
232			fifo_data = (void *)fifo_hdr->data;
233			fifo_len = mvm->shared_mem_cfg.internal_txfifo_size[i];
234
235			/* No need to try to read the data if the length is 0 */
236			if (fifo_len == 0)
237				continue;
238
239			/* Add a TLV for the internal FIFOs */
240			(*dump_data)->type =
241				cpu_to_le32(IWL_FW_ERROR_DUMP_INTERNAL_TXF);
242			(*dump_data)->len =
243				cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
244
245			fifo_hdr->fifo_num = cpu_to_le32(i);
246
247			/* Mark the number of TXF we're pulling now */
248			iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i +
249				ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size));
250
251			fifo_hdr->available_bytes =
252				cpu_to_le32(iwl_trans_read_prph(mvm->trans,
253								TXF_CPU2_FIFO_ITEM_CNT));
254			fifo_hdr->wr_ptr =
255				cpu_to_le32(iwl_trans_read_prph(mvm->trans,
256								TXF_CPU2_WR_PTR));
257			fifo_hdr->rd_ptr =
258				cpu_to_le32(iwl_trans_read_prph(mvm->trans,
259								TXF_CPU2_RD_PTR));
260			fifo_hdr->fence_ptr =
261				cpu_to_le32(iwl_trans_read_prph(mvm->trans,
262								TXF_CPU2_FENCE_PTR));
263			fifo_hdr->fence_mode =
264				cpu_to_le32(iwl_trans_read_prph(mvm->trans,
265								TXF_CPU2_LOCK_FENCE));
266
267			/* Set TXF_CPU2_READ_MODIFY_ADDR to TXF_CPU2_WR_PTR */
268			iwl_trans_write_prph(mvm->trans,
269					     TXF_CPU2_READ_MODIFY_ADDR,
270					     TXF_CPU2_WR_PTR);
271
272			/* Dummy-read to advance the read pointer to head */
273			iwl_trans_read_prph(mvm->trans,
274					    TXF_CPU2_READ_MODIFY_DATA);
275
276			/* Read FIFO */
277			fifo_len /= sizeof(u32); /* Size in DWORDS */
278			for (j = 0; j < fifo_len; j++)
279				fifo_data[j] =
280					iwl_trans_read_prph(mvm->trans,
281							    TXF_CPU2_READ_MODIFY_DATA);
282			*dump_data = iwl_fw_error_next_data(*dump_data);
283		}
284	}
285
286	iwl_trans_release_nic_access(mvm->trans, &flags);
287}
288
289void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm)
290{
291	if (mvm->fw_dump_desc == &iwl_mvm_dump_desc_assert)
292		return;
293
294	kfree(mvm->fw_dump_desc);
295	mvm->fw_dump_desc = NULL;
296}
297
298#define IWL8260_ICCM_OFFSET		0x44000 /* Only for B-step */
299#define IWL8260_ICCM_LEN		0xC000 /* Only for B-step */
300
301struct iwl_prph_range {
302	u32 start, end;
303};
304
305static const struct iwl_prph_range iwl_prph_dump_addr_comm[] = {
306	{ .start = 0x00a00000, .end = 0x00a00000 },
307	{ .start = 0x00a0000c, .end = 0x00a00024 },
308	{ .start = 0x00a0002c, .end = 0x00a0003c },
309	{ .start = 0x00a00410, .end = 0x00a00418 },
310	{ .start = 0x00a00420, .end = 0x00a00420 },
311	{ .start = 0x00a00428, .end = 0x00a00428 },
312	{ .start = 0x00a00430, .end = 0x00a0043c },
313	{ .start = 0x00a00444, .end = 0x00a00444 },
314	{ .start = 0x00a004c0, .end = 0x00a004cc },
315	{ .start = 0x00a004d8, .end = 0x00a004d8 },
316	{ .start = 0x00a004e0, .end = 0x00a004f0 },
317	{ .start = 0x00a00840, .end = 0x00a00840 },
318	{ .start = 0x00a00850, .end = 0x00a00858 },
319	{ .start = 0x00a01004, .end = 0x00a01008 },
320	{ .start = 0x00a01010, .end = 0x00a01010 },
321	{ .start = 0x00a01018, .end = 0x00a01018 },
322	{ .start = 0x00a01024, .end = 0x00a01024 },
323	{ .start = 0x00a0102c, .end = 0x00a01034 },
324	{ .start = 0x00a0103c, .end = 0x00a01040 },
325	{ .start = 0x00a01048, .end = 0x00a01094 },
326	{ .start = 0x00a01c00, .end = 0x00a01c20 },
327	{ .start = 0x00a01c58, .end = 0x00a01c58 },
328	{ .start = 0x00a01c7c, .end = 0x00a01c7c },
329	{ .start = 0x00a01c28, .end = 0x00a01c54 },
330	{ .start = 0x00a01c5c, .end = 0x00a01c5c },
331	{ .start = 0x00a01c60, .end = 0x00a01cdc },
332	{ .start = 0x00a01ce0, .end = 0x00a01d0c },
333	{ .start = 0x00a01d18, .end = 0x00a01d20 },
334	{ .start = 0x00a01d2c, .end = 0x00a01d30 },
335	{ .start = 0x00a01d40, .end = 0x00a01d5c },
336	{ .start = 0x00a01d80, .end = 0x00a01d80 },
337	{ .start = 0x00a01d98, .end = 0x00a01d9c },
338	{ .start = 0x00a01da8, .end = 0x00a01da8 },
339	{ .start = 0x00a01db8, .end = 0x00a01df4 },
340	{ .start = 0x00a01dc0, .end = 0x00a01dfc },
341	{ .start = 0x00a01e00, .end = 0x00a01e2c },
342	{ .start = 0x00a01e40, .end = 0x00a01e60 },
343	{ .start = 0x00a01e68, .end = 0x00a01e6c },
344	{ .start = 0x00a01e74, .end = 0x00a01e74 },
345	{ .start = 0x00a01e84, .end = 0x00a01e90 },
346	{ .start = 0x00a01e9c, .end = 0x00a01ec4 },
347	{ .start = 0x00a01ed0, .end = 0x00a01ee0 },
348	{ .start = 0x00a01f00, .end = 0x00a01f1c },
349	{ .start = 0x00a01f44, .end = 0x00a01ffc },
350	{ .start = 0x00a02000, .end = 0x00a02048 },
351	{ .start = 0x00a02068, .end = 0x00a020f0 },
352	{ .start = 0x00a02100, .end = 0x00a02118 },
353	{ .start = 0x00a02140, .end = 0x00a0214c },
354	{ .start = 0x00a02168, .end = 0x00a0218c },
355	{ .start = 0x00a021c0, .end = 0x00a021c0 },
356	{ .start = 0x00a02400, .end = 0x00a02410 },
357	{ .start = 0x00a02418, .end = 0x00a02420 },
358	{ .start = 0x00a02428, .end = 0x00a0242c },
359	{ .start = 0x00a02434, .end = 0x00a02434 },
360	{ .start = 0x00a02440, .end = 0x00a02460 },
361	{ .start = 0x00a02468, .end = 0x00a024b0 },
362	{ .start = 0x00a024c8, .end = 0x00a024cc },
363	{ .start = 0x00a02500, .end = 0x00a02504 },
364	{ .start = 0x00a0250c, .end = 0x00a02510 },
365	{ .start = 0x00a02540, .end = 0x00a02554 },
366	{ .start = 0x00a02580, .end = 0x00a025f4 },
367	{ .start = 0x00a02600, .end = 0x00a0260c },
368	{ .start = 0x00a02648, .end = 0x00a02650 },
369	{ .start = 0x00a02680, .end = 0x00a02680 },
370	{ .start = 0x00a026c0, .end = 0x00a026d0 },
371	{ .start = 0x00a02700, .end = 0x00a0270c },
372	{ .start = 0x00a02804, .end = 0x00a02804 },
373	{ .start = 0x00a02818, .end = 0x00a0281c },
374	{ .start = 0x00a02c00, .end = 0x00a02db4 },
375	{ .start = 0x00a02df4, .end = 0x00a02fb0 },
376	{ .start = 0x00a03000, .end = 0x00a03014 },
377	{ .start = 0x00a0301c, .end = 0x00a0302c },
378	{ .start = 0x00a03034, .end = 0x00a03038 },
379	{ .start = 0x00a03040, .end = 0x00a03048 },
380	{ .start = 0x00a03060, .end = 0x00a03068 },
381	{ .start = 0x00a03070, .end = 0x00a03074 },
382	{ .start = 0x00a0307c, .end = 0x00a0307c },
383	{ .start = 0x00a03080, .end = 0x00a03084 },
384	{ .start = 0x00a0308c, .end = 0x00a03090 },
385	{ .start = 0x00a03098, .end = 0x00a03098 },
386	{ .start = 0x00a030a0, .end = 0x00a030a0 },
387	{ .start = 0x00a030a8, .end = 0x00a030b4 },
388	{ .start = 0x00a030bc, .end = 0x00a030bc },
389	{ .start = 0x00a030c0, .end = 0x00a0312c },
390	{ .start = 0x00a03c00, .end = 0x00a03c5c },
391	{ .start = 0x00a04400, .end = 0x00a04454 },
392	{ .start = 0x00a04460, .end = 0x00a04474 },
393	{ .start = 0x00a044c0, .end = 0x00a044ec },
394	{ .start = 0x00a04500, .end = 0x00a04504 },
395	{ .start = 0x00a04510, .end = 0x00a04538 },
396	{ .start = 0x00a04540, .end = 0x00a04548 },
397	{ .start = 0x00a04560, .end = 0x00a0457c },
398	{ .start = 0x00a04590, .end = 0x00a04598 },
399	{ .start = 0x00a045c0, .end = 0x00a045f4 },
400};
401
402static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = {
403	{ .start = 0x00a05c00, .end = 0x00a05c18 },
404	{ .start = 0x00a05400, .end = 0x00a056e8 },
405	{ .start = 0x00a08000, .end = 0x00a098bc },
406	{ .start = 0x00a02400, .end = 0x00a02758 },
407};
408
409static u32 iwl_dump_prph(struct iwl_trans *trans,
410			 struct iwl_fw_error_dump_data **data,
411			 const struct iwl_prph_range *iwl_prph_dump_addr,
412			 u32 range_len)
413{
414	struct iwl_fw_error_dump_prph *prph;
415	unsigned long flags;
416	u32 prph_len = 0, i;
417
418	if (!iwl_trans_grab_nic_access(trans, &flags))
419		return 0;
420
421	for (i = 0; i < range_len; i++) {
422		/* The range includes both boundaries */
423		int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
424			 iwl_prph_dump_addr[i].start + 4;
425		int reg;
426		__le32 *val;
427
428		prph_len += sizeof(**data) + sizeof(*prph) + num_bytes_in_chunk;
429
430		(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH);
431		(*data)->len = cpu_to_le32(sizeof(*prph) +
432					num_bytes_in_chunk);
433		prph = (void *)(*data)->data;
434		prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start);
435		val = (void *)prph->data;
436
437		for (reg = iwl_prph_dump_addr[i].start;
438		     reg <= iwl_prph_dump_addr[i].end;
439		     reg += 4)
440			*val++ = cpu_to_le32(iwl_read_prph_no_grab(trans,
441								   reg));
442
443		*data = iwl_fw_error_next_data(*data);
444	}
445
446	iwl_trans_release_nic_access(trans, &flags);
447
448	return prph_len;
449}
450
451/*
452 * alloc_sgtable - allocates scallerlist table in the given size,
453 * fills it with pages and returns it
454 * @size: the size (in bytes) of the table
455*/
456static struct scatterlist *alloc_sgtable(int size)
457{
458	int alloc_size, nents, i;
459	struct page *new_page;
460	struct scatterlist *iter;
461	struct scatterlist *table;
462
463	nents = DIV_ROUND_UP(size, PAGE_SIZE);
464	table = kcalloc(nents, sizeof(*table), GFP_KERNEL);
465	if (!table)
466		return NULL;
467	sg_init_table(table, nents);
468	iter = table;
469	for_each_sg(table, iter, sg_nents(table), i) {
470		new_page = alloc_page(GFP_KERNEL);
471		if (!new_page) {
472			/* release all previous allocated pages in the table */
473			iter = table;
474			for_each_sg(table, iter, sg_nents(table), i) {
475				new_page = sg_page(iter);
476				if (new_page)
477					__free_page(new_page);
478			}
479			return NULL;
480		}
481		alloc_size = min_t(int, size, PAGE_SIZE);
482		size -= PAGE_SIZE;
483		sg_set_page(iter, new_page, alloc_size, 0);
484	}
485	return table;
486}
487
488void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
489{
490	struct iwl_fw_error_dump_file *dump_file;
491	struct iwl_fw_error_dump_data *dump_data;
492	struct iwl_fw_error_dump_info *dump_info;
493	struct iwl_fw_error_dump_mem *dump_mem;
494	struct iwl_fw_error_dump_trigger_desc *dump_trig;
495	struct iwl_mvm_dump_ptrs *fw_error_dump;
496	struct scatterlist *sg_dump_data;
497	u32 sram_len, sram_ofs;
498	struct iwl_fw_dbg_mem_seg_tlv * const *fw_dbg_mem =
499		mvm->fw->dbg_mem_tlv;
500	u32 file_len, fifo_data_len = 0, prph_len = 0, radio_len = 0;
501	u32 smem_len = mvm->fw->dbg_dynamic_mem ? 0 : mvm->cfg->smem_len;
502	u32 sram2_len = mvm->fw->dbg_dynamic_mem ? 0 : mvm->cfg->dccm2_len;
503	bool monitor_dump_only = false;
504	int i;
505
506	if (!IWL_MVM_COLLECT_FW_ERR_DUMP &&
507	    !mvm->trans->dbg_dest_tlv)
508		return;
509
510	lockdep_assert_held(&mvm->mutex);
511
512	/* there's no point in fw dump if the bus is dead */
513	if (test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) {
514		IWL_ERR(mvm, "Skip fw error dump since bus is dead\n");
515		goto out;
516	}
517
518	if (mvm->fw_dump_trig &&
519	    mvm->fw_dump_trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)
520		monitor_dump_only = true;
521
522	fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
523	if (!fw_error_dump)
524		goto out;
525
526	/* SRAM - include stack CCM if driver knows the values for it */
527	if (!mvm->cfg->dccm_offset || !mvm->cfg->dccm_len) {
528		const struct fw_img *img;
529
530		img = &mvm->fw->img[mvm->cur_ucode];
531		sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
532		sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
533	} else {
534		sram_ofs = mvm->cfg->dccm_offset;
535		sram_len = mvm->cfg->dccm_len;
536	}
537
538	/* reading RXF/TXF sizes */
539	if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) {
540		struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->shared_mem_cfg;
541
542		fifo_data_len = 0;
543
544		/* Count RXF size */
545		for (i = 0; i < ARRAY_SIZE(mem_cfg->rxfifo_size); i++) {
546			if (!mem_cfg->rxfifo_size[i])
547				continue;
548
549			/* Add header info */
550			fifo_data_len += mem_cfg->rxfifo_size[i] +
551					 sizeof(*dump_data) +
552					 sizeof(struct iwl_fw_error_dump_fifo);
553		}
554
555		for (i = 0; i < mem_cfg->num_txfifo_entries; i++) {
556			if (!mem_cfg->txfifo_size[i])
557				continue;
558
559			/* Add header info */
560			fifo_data_len += mem_cfg->txfifo_size[i] +
561					 sizeof(*dump_data) +
562					 sizeof(struct iwl_fw_error_dump_fifo);
563		}
564
565		if (fw_has_capa(&mvm->fw->ucode_capa,
566				IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
567			for (i = 0;
568			     i < ARRAY_SIZE(mem_cfg->internal_txfifo_size);
569			     i++) {
570				if (!mem_cfg->internal_txfifo_size[i])
571					continue;
572
573				/* Add header info */
574				fifo_data_len +=
575					mem_cfg->internal_txfifo_size[i] +
576					sizeof(*dump_data) +
577					sizeof(struct iwl_fw_error_dump_fifo);
578			}
579		}
580
581		/* Make room for PRPH registers */
582		for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm); i++) {
583			/* The range includes both boundaries */
584			int num_bytes_in_chunk =
585				iwl_prph_dump_addr_comm[i].end -
586				iwl_prph_dump_addr_comm[i].start + 4;
587
588			prph_len += sizeof(*dump_data) +
589				sizeof(struct iwl_fw_error_dump_prph) +
590				num_bytes_in_chunk;
591		}
592
593		if (mvm->cfg->mq_rx_supported) {
594			for (i = 0; i <
595				ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) {
596				/* The range includes both boundaries */
597				int num_bytes_in_chunk =
598					iwl_prph_dump_addr_9000[i].end -
599					iwl_prph_dump_addr_9000[i].start + 4;
600
601				prph_len += sizeof(*dump_data) +
602					sizeof(struct iwl_fw_error_dump_prph) +
603					num_bytes_in_chunk;
604			}
605		}
606
607		if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000)
608			radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ;
609	}
610
611	file_len = sizeof(*dump_file) +
612		   sizeof(*dump_data) * 2 +
613		   fifo_data_len +
614		   prph_len +
615		   radio_len +
616		   sizeof(*dump_info);
617
618	/* Make room for the SMEM, if it exists */
619	if (smem_len)
620		file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
621
622	/* Make room for the secondary SRAM, if it exists */
623	if (sram2_len)
624		file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
625
626	/* Make room for MEM segments */
627	for (i = 0; i < ARRAY_SIZE(mvm->fw->dbg_mem_tlv); i++) {
628		if (fw_dbg_mem[i])
629			file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
630				le32_to_cpu(fw_dbg_mem[i]->len);
631	}
632
633	/* Make room for fw's virtual image pages, if it exists */
634	if (mvm->fw->img[mvm->cur_ucode].paging_mem_size &&
635	    mvm->fw_paging_db[0].fw_paging_block)
636		file_len += mvm->num_of_paging_blk *
637			(sizeof(*dump_data) +
638			 sizeof(struct iwl_fw_error_dump_paging) +
639			 PAGING_BLOCK_SIZE);
640
641	/* If we only want a monitor dump, reset the file length */
642	if (monitor_dump_only) {
643		file_len = sizeof(*dump_file) + sizeof(*dump_data) +
644			   sizeof(*dump_info);
645	}
646
647	/*
648	 * In 8000 HW family B-step include the ICCM (which resides separately)
649	 */
650	if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
651	    CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP)
652		file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
653			    IWL8260_ICCM_LEN;
654
655	if (mvm->fw_dump_desc)
656		file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
657			    mvm->fw_dump_desc->len;
658
659	if (!mvm->fw->dbg_dynamic_mem)
660		file_len += sram_len + sizeof(*dump_mem);
661
662	dump_file = vzalloc(file_len);
663	if (!dump_file) {
664		kfree(fw_error_dump);
665		goto out;
666	}
667
668	fw_error_dump->op_mode_ptr = dump_file;
669
670	dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
671	dump_data = (void *)dump_file->data;
672
673	dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
674	dump_data->len = cpu_to_le32(sizeof(*dump_info));
675	dump_info = (void *)dump_data->data;
676	dump_info->device_family =
677		mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000 ?
678			cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) :
679			cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
680	dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(mvm->trans->hw_rev));
681	memcpy(dump_info->fw_human_readable, mvm->fw->human_readable,
682	       sizeof(dump_info->fw_human_readable));
683	strncpy(dump_info->dev_human_readable, mvm->cfg->name,
684		sizeof(dump_info->dev_human_readable));
685	strncpy(dump_info->bus_human_readable, mvm->dev->bus->name,
686		sizeof(dump_info->bus_human_readable));
687
688	dump_data = iwl_fw_error_next_data(dump_data);
689	/* We only dump the FIFOs if the FW is in error state */
690	if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) {
691		iwl_mvm_dump_fifos(mvm, &dump_data);
692		if (radio_len)
693			iwl_mvm_read_radio_reg(mvm, &dump_data);
694	}
695
696	if (mvm->fw_dump_desc) {
697		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
698		dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
699					     mvm->fw_dump_desc->len);
700		dump_trig = (void *)dump_data->data;
701		memcpy(dump_trig, &mvm->fw_dump_desc->trig_desc,
702		       sizeof(*dump_trig) + mvm->fw_dump_desc->len);
703
704		dump_data = iwl_fw_error_next_data(dump_data);
705	}
706
707	/* In case we only want monitor dump, skip to dump trasport data */
708	if (monitor_dump_only)
709		goto dump_trans_data;
710
711	if (!mvm->fw->dbg_dynamic_mem) {
712		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
713		dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
714		dump_mem = (void *)dump_data->data;
715		dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
716		dump_mem->offset = cpu_to_le32(sram_ofs);
717		iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data,
718					 sram_len);
719		dump_data = iwl_fw_error_next_data(dump_data);
720	}
721
722	for (i = 0; i < ARRAY_SIZE(mvm->fw->dbg_mem_tlv); i++) {
723		if (fw_dbg_mem[i]) {
724			u32 len = le32_to_cpu(fw_dbg_mem[i]->len);
725			u32 ofs = le32_to_cpu(fw_dbg_mem[i]->ofs);
726
727			dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
728			dump_data->len = cpu_to_le32(len +
729					sizeof(*dump_mem));
730			dump_mem = (void *)dump_data->data;
731			dump_mem->type = fw_dbg_mem[i]->data_type;
732			dump_mem->offset = cpu_to_le32(ofs);
733			iwl_trans_read_mem_bytes(mvm->trans, ofs,
734						 dump_mem->data,
735						 len);
736			dump_data = iwl_fw_error_next_data(dump_data);
737		}
738	}
739
740	if (smem_len) {
741		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
742		dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem));
743		dump_mem = (void *)dump_data->data;
744		dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SMEM);
745		dump_mem->offset = cpu_to_le32(mvm->cfg->smem_offset);
746		iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->smem_offset,
747					 dump_mem->data, smem_len);
748		dump_data = iwl_fw_error_next_data(dump_data);
749	}
750
751	if (sram2_len) {
752		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
753		dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
754		dump_mem = (void *)dump_data->data;
755		dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
756		dump_mem->offset = cpu_to_le32(mvm->cfg->dccm2_offset);
757		iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->dccm2_offset,
758					 dump_mem->data, sram2_len);
759		dump_data = iwl_fw_error_next_data(dump_data);
760	}
761
762	if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
763	    CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP) {
764		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
765		dump_data->len = cpu_to_le32(IWL8260_ICCM_LEN +
766					     sizeof(*dump_mem));
767		dump_mem = (void *)dump_data->data;
768		dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
769		dump_mem->offset = cpu_to_le32(IWL8260_ICCM_OFFSET);
770		iwl_trans_read_mem_bytes(mvm->trans, IWL8260_ICCM_OFFSET,
771					 dump_mem->data, IWL8260_ICCM_LEN);
772		dump_data = iwl_fw_error_next_data(dump_data);
773	}
774
775	/* Dump fw's virtual image */
776	if (mvm->fw->img[mvm->cur_ucode].paging_mem_size &&
777	    mvm->fw_paging_db[0].fw_paging_block) {
778		for (i = 1; i < mvm->num_of_paging_blk + 1; i++) {
779			struct iwl_fw_error_dump_paging *paging;
780			struct page *pages =
781				mvm->fw_paging_db[i].fw_paging_block;
782
783			dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
784			dump_data->len = cpu_to_le32(sizeof(*paging) +
785						     PAGING_BLOCK_SIZE);
786			paging = (void *)dump_data->data;
787			paging->index = cpu_to_le32(i);
788			memcpy(paging->data, page_address(pages),
789			       PAGING_BLOCK_SIZE);
790			dump_data = iwl_fw_error_next_data(dump_data);
791		}
792	}
793
794	if (prph_len) {
795		iwl_dump_prph(mvm->trans, &dump_data,
796			      iwl_prph_dump_addr_comm,
797			      ARRAY_SIZE(iwl_prph_dump_addr_comm));
798
799		if (mvm->cfg->mq_rx_supported)
800			iwl_dump_prph(mvm->trans, &dump_data,
801				      iwl_prph_dump_addr_9000,
802				      ARRAY_SIZE(iwl_prph_dump_addr_9000));
803	}
804
805dump_trans_data:
806	fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans,
807						       mvm->fw_dump_trig);
808	fw_error_dump->op_mode_len = file_len;
809	if (fw_error_dump->trans_ptr)
810		file_len += fw_error_dump->trans_ptr->len;
811	dump_file->file_len = cpu_to_le32(file_len);
812
813	sg_dump_data = alloc_sgtable(file_len);
814	if (sg_dump_data) {
815		sg_pcopy_from_buffer(sg_dump_data,
816				     sg_nents(sg_dump_data),
817				     fw_error_dump->op_mode_ptr,
818				     fw_error_dump->op_mode_len, 0);
819		sg_pcopy_from_buffer(sg_dump_data,
820				     sg_nents(sg_dump_data),
821				     fw_error_dump->trans_ptr->data,
822				     fw_error_dump->trans_ptr->len,
823				     fw_error_dump->op_mode_len);
824		dev_coredumpsg(mvm->trans->dev, sg_dump_data, file_len,
825			       GFP_KERNEL);
826	}
827	vfree(fw_error_dump->op_mode_ptr);
828	vfree(fw_error_dump->trans_ptr);
829	kfree(fw_error_dump);
830
831out:
832	iwl_mvm_free_fw_dump_desc(mvm);
833	mvm->fw_dump_trig = NULL;
834	clear_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status);
835}
836
837const struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert = {
838	.trig_desc = {
839		.type = cpu_to_le32(FW_DBG_TRIGGER_FW_ASSERT),
840	},
841};
842
843int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
844				const struct iwl_mvm_dump_desc *desc,
845				const struct iwl_fw_dbg_trigger_tlv *trigger)
846{
847	unsigned int delay = 0;
848
849	if (trigger)
850		delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
851
852	if (test_and_set_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status))
853		return -EBUSY;
854
855	if (WARN_ON(mvm->fw_dump_desc))
856		iwl_mvm_free_fw_dump_desc(mvm);
857
858	IWL_WARN(mvm, "Collecting data: trigger %d fired.\n",
859		 le32_to_cpu(desc->trig_desc.type));
860
861	mvm->fw_dump_desc = desc;
862	mvm->fw_dump_trig = trigger;
863
864	queue_delayed_work(system_wq, &mvm->fw_dump_wk, delay);
865
866	return 0;
867}
868
869int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
870			   const char *str, size_t len,
871			   const struct iwl_fw_dbg_trigger_tlv *trigger)
872{
873	struct iwl_mvm_dump_desc *desc;
874
875	desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
876	if (!desc)
877		return -ENOMEM;
878
879	desc->len = len;
880	desc->trig_desc.type = cpu_to_le32(trig);
881	memcpy(desc->trig_desc.data, str, len);
882
883	return iwl_mvm_fw_dbg_collect_desc(mvm, desc, trigger);
884}
885
886int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
887				struct iwl_fw_dbg_trigger_tlv *trigger,
888				const char *fmt, ...)
889{
890	u16 occurrences = le16_to_cpu(trigger->occurrences);
891	int ret, len = 0;
892	char buf[64];
893
894	if (!occurrences)
895		return 0;
896
897	if (fmt) {
898		va_list ap;
899
900		buf[sizeof(buf) - 1] = '\0';
901
902		va_start(ap, fmt);
903		vsnprintf(buf, sizeof(buf), fmt, ap);
904		va_end(ap);
905
906		/* check for truncation */
907		if (WARN_ON_ONCE(buf[sizeof(buf) - 1]))
908			buf[sizeof(buf) - 1] = '\0';
909
910		len = strlen(buf) + 1;
911	}
912
913	ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), buf, len,
914				     trigger);
915
916	if (ret)
917		return ret;
918
919	trigger->occurrences = cpu_to_le16(occurrences - 1);
920	return 0;
921}
922
923static inline void iwl_mvm_restart_early_start(struct iwl_mvm *mvm)
924{
925	if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000)
926		iwl_clear_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
927	else
928		iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 1);
929}
930
931int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
932{
933	u8 *ptr;
934	int ret;
935	int i;
936
937	if (WARN_ONCE(conf_id >= ARRAY_SIZE(mvm->fw->dbg_conf_tlv),
938		      "Invalid configuration %d\n", conf_id))
939		return -EINVAL;
940
941	/* EARLY START - firmware's configuration is hard coded */
942	if ((!mvm->fw->dbg_conf_tlv[conf_id] ||
943	     !mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) &&
944	    conf_id == FW_DBG_START_FROM_ALIVE) {
945		iwl_mvm_restart_early_start(mvm);
946		return 0;
947	}
948
949	if (!mvm->fw->dbg_conf_tlv[conf_id])
950		return -EINVAL;
951
952	if (mvm->fw_dbg_conf != FW_DBG_INVALID)
953		IWL_WARN(mvm, "FW already configured (%d) - re-configuring\n",
954			 mvm->fw_dbg_conf);
955
956	/* Send all HCMDs for configuring the FW debug */
957	ptr = (void *)&mvm->fw->dbg_conf_tlv[conf_id]->hcmd;
958	for (i = 0; i < mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds; i++) {
959		struct iwl_fw_dbg_conf_hcmd *cmd = (void *)ptr;
960
961		ret = iwl_mvm_send_cmd_pdu(mvm, cmd->id, 0,
962					   le16_to_cpu(cmd->len), cmd->data);
963		if (ret)
964			return ret;
965
966		ptr += sizeof(*cmd);
967		ptr += le16_to_cpu(cmd->len);
968	}
969
970	mvm->fw_dbg_conf = conf_id;
971
972	return 0;
973}