Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
v6.8
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
   2/* QLogic qed NIC Driver
   3 * Copyright (c) 2015 QLogic Corporation
   4 * Copyright (c) 2019-2021 Marvell International Ltd.
   5 */
   6
   7#include <linux/module.h>
   8#include <linux/vmalloc.h>
   9#include <linux/crc32.h>
  10#include "qed.h"
  11#include "qed_cxt.h"
  12#include "qed_hsi.h"
  13#include "qed_dbg_hsi.h"
  14#include "qed_hw.h"
  15#include "qed_mcp.h"
  16#include "qed_reg_addr.h"
  17
  18/* Memory groups enum */
  19enum mem_groups {
  20	MEM_GROUP_PXP_MEM,
  21	MEM_GROUP_DMAE_MEM,
  22	MEM_GROUP_CM_MEM,
  23	MEM_GROUP_QM_MEM,
  24	MEM_GROUP_DORQ_MEM,
  25	MEM_GROUP_BRB_RAM,
  26	MEM_GROUP_BRB_MEM,
  27	MEM_GROUP_PRS_MEM,
  28	MEM_GROUP_SDM_MEM,
  29	MEM_GROUP_PBUF,
  30	MEM_GROUP_IOR,
  31	MEM_GROUP_RAM,
  32	MEM_GROUP_BTB_RAM,
  33	MEM_GROUP_RDIF_CTX,
  34	MEM_GROUP_TDIF_CTX,
  35	MEM_GROUP_CFC_MEM,
  36	MEM_GROUP_CONN_CFC_MEM,
 
  37	MEM_GROUP_CAU_PI,
  38	MEM_GROUP_CAU_MEM,
  39	MEM_GROUP_CAU_MEM_EXT,
  40	MEM_GROUP_PXP_ILT,
 
 
 
 
  41	MEM_GROUP_MULD_MEM,
  42	MEM_GROUP_BTB_MEM,
 
 
 
  43	MEM_GROUP_IGU_MEM,
  44	MEM_GROUP_IGU_MSIX,
  45	MEM_GROUP_CAU_SB,
  46	MEM_GROUP_BMB_RAM,
  47	MEM_GROUP_BMB_MEM,
  48	MEM_GROUP_TM_MEM,
  49	MEM_GROUP_TASK_CFC_MEM,
  50	MEM_GROUPS_NUM
  51};
  52
  53/* Memory groups names */
  54static const char * const s_mem_group_names[] = {
  55	"PXP_MEM",
  56	"DMAE_MEM",
  57	"CM_MEM",
  58	"QM_MEM",
  59	"DORQ_MEM",
  60	"BRB_RAM",
  61	"BRB_MEM",
  62	"PRS_MEM",
  63	"SDM_MEM",
  64	"PBUF",
  65	"IOR",
  66	"RAM",
  67	"BTB_RAM",
  68	"RDIF_CTX",
  69	"TDIF_CTX",
  70	"CFC_MEM",
  71	"CONN_CFC_MEM",
 
  72	"CAU_PI",
  73	"CAU_MEM",
  74	"CAU_MEM_EXT",
  75	"PXP_ILT",
 
 
 
 
  76	"MULD_MEM",
  77	"BTB_MEM",
 
 
 
  78	"IGU_MEM",
  79	"IGU_MSIX",
  80	"CAU_SB",
  81	"BMB_RAM",
  82	"BMB_MEM",
  83	"TM_MEM",
  84	"TASK_CFC_MEM",
  85};
  86
  87/* Idle check conditions */
  88
  89static u32 cond5(const u32 *r, const u32 *imm)
  90{
  91	return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]);
  92}
  93
  94static u32 cond7(const u32 *r, const u32 *imm)
  95{
  96	return ((r[0] >> imm[0]) & imm[1]) != imm[2];
  97}
  98
  99static u32 cond6(const u32 *r, const u32 *imm)
 100{
 101	return (r[0] & imm[0]) != imm[1];
 102}
 103
 104static u32 cond9(const u32 *r, const u32 *imm)
 105{
 106	return ((r[0] & imm[0]) >> imm[1]) !=
 107	    (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
 108}
 109
 110static u32 cond10(const u32 *r, const u32 *imm)
 111{
 112	return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
 113}
 114
 115static u32 cond4(const u32 *r, const u32 *imm)
 116{
 117	return (r[0] & ~imm[0]) != imm[1];
 118}
 119
 120static u32 cond0(const u32 *r, const u32 *imm)
 121{
 122	return (r[0] & ~r[1]) != imm[0];
 123}
 124
 125static u32 cond14(const u32 *r, const u32 *imm)
 126{
 127	return (r[0] | imm[0]) != imm[1];
 128}
 129
 130static u32 cond1(const u32 *r, const u32 *imm)
 131{
 132	return r[0] != imm[0];
 133}
 134
 135static u32 cond11(const u32 *r, const u32 *imm)
 136{
 137	return r[0] != r[1] && r[2] == imm[0];
 138}
 139
 140static u32 cond12(const u32 *r, const u32 *imm)
 141{
 142	return r[0] != r[1] && r[2] > imm[0];
 143}
 144
 145static u32 cond3(const u32 *r, const u32 *imm)
 146{
 147	return r[0] != r[1];
 148}
 149
 150static u32 cond13(const u32 *r, const u32 *imm)
 151{
 152	return r[0] & imm[0];
 153}
 154
 155static u32 cond8(const u32 *r, const u32 *imm)
 156{
 157	return r[0] < (r[1] - imm[0]);
 158}
 159
 160static u32 cond2(const u32 *r, const u32 *imm)
 161{
 162	return r[0] > imm[0];
 163}
 164
 165/* Array of Idle Check conditions */
 166static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
 167	cond0,
 168	cond1,
 169	cond2,
 170	cond3,
 171	cond4,
 172	cond5,
 173	cond6,
 174	cond7,
 175	cond8,
 176	cond9,
 177	cond10,
 178	cond11,
 179	cond12,
 180	cond13,
 181	cond14,
 182};
 183
 184#define NUM_PHYS_BLOCKS 84
 185
 186#define NUM_DBG_RESET_REGS 8
 187
 188/******************************* Data Types **********************************/
 189
 190enum hw_types {
 191	HW_TYPE_ASIC,
 192	PLATFORM_RESERVED,
 193	PLATFORM_RESERVED2,
 194	PLATFORM_RESERVED3,
 195	PLATFORM_RESERVED4,
 196	MAX_HW_TYPES
 197};
 198
 199/* CM context types */
 200enum cm_ctx_types {
 201	CM_CTX_CONN_AG,
 202	CM_CTX_CONN_ST,
 203	CM_CTX_TASK_AG,
 204	CM_CTX_TASK_ST,
 205	NUM_CM_CTX_TYPES
 206};
 207
 208/* Debug bus frame modes */
 209enum dbg_bus_frame_modes {
 210	DBG_BUS_FRAME_MODE_4ST = 0,	/* 4 Storm dwords (no HW) */
 211	DBG_BUS_FRAME_MODE_2ST_2HW = 1,	/* 2 Storm dwords, 2 HW dwords */
 212	DBG_BUS_FRAME_MODE_1ST_3HW = 2,	/* 1 Storm dwords, 3 HW dwords */
 213	DBG_BUS_FRAME_MODE_4HW = 3,	/* 4 HW dwords (no Storms) */
 214	DBG_BUS_FRAME_MODE_8HW = 4,	/* 8 HW dwords (no Storms) */
 215	DBG_BUS_NUM_FRAME_MODES
 216};
 217
 218/* Debug bus SEMI frame modes */
 219enum dbg_bus_semi_frame_modes {
 220	DBG_BUS_SEMI_FRAME_MODE_4FAST = 0,	/* 4 fast dw */
 221	DBG_BUS_SEMI_FRAME_MODE_2FAST_2SLOW = 1, /* 2 fast dw, 2 slow dw */
 222	DBG_BUS_SEMI_FRAME_MODE_1FAST_3SLOW = 2, /* 1 fast dw,3 slow dw */
 223	DBG_BUS_SEMI_FRAME_MODE_4SLOW = 3,	/* 4 slow dw */
 224	DBG_BUS_SEMI_NUM_FRAME_MODES
 225};
 226
 227/* Debug bus filter types */
 228enum dbg_bus_filter_types {
 229	DBG_BUS_FILTER_TYPE_OFF,	/* Filter always off */
 230	DBG_BUS_FILTER_TYPE_PRE,	/* Filter before trigger only */
 231	DBG_BUS_FILTER_TYPE_POST,	/* Filter after trigger only */
 232	DBG_BUS_FILTER_TYPE_ON	/* Filter always on */
 233};
 234
 235/* Debug bus pre-trigger recording types */
 236enum dbg_bus_pre_trigger_types {
 237	DBG_BUS_PRE_TRIGGER_FROM_ZERO,	/* Record from time 0 */
 238	DBG_BUS_PRE_TRIGGER_NUM_CHUNKS,	/* Record some chunks before trigger */
 239	DBG_BUS_PRE_TRIGGER_DROP	/* Drop data before trigger */
 240};
 241
 242/* Debug bus post-trigger recording types */
 243enum dbg_bus_post_trigger_types {
 244	DBG_BUS_POST_TRIGGER_RECORD,	/* Start recording after trigger */
 245	DBG_BUS_POST_TRIGGER_DROP	/* Drop data after trigger */
 246};
 247
 248/* Debug bus other engine mode */
 249enum dbg_bus_other_engine_modes {
 250	DBG_BUS_OTHER_ENGINE_MODE_NONE,
 251	DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
 252	DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX,
 253	DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX,
 254	DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX
 255};
 256
 257/* DBG block Framing mode definitions */
 258struct framing_mode_defs {
 259	u8 id;
 260	u8 blocks_dword_mask;
 261	u8 storms_dword_mask;
 262	u8 semi_framing_mode_id;
 263	u8 full_buf_thr;
 264};
 265
 266/* Chip constant definitions */
 267struct chip_defs {
 268	const char *name;
 269	u8 dwords_per_cycle;
 270	u8 num_framing_modes;
 271	u32 num_ilt_pages;
 272	struct framing_mode_defs *framing_modes;
 273};
 274
 275/* HW type constant definitions */
 276struct hw_type_defs {
 277	const char *name;
 278	u32 delay_factor;
 279	u32 dmae_thresh;
 280	u32 log_thresh;
 281};
 282
 283/* RBC reset definitions */
 284struct rbc_reset_defs {
 285	u32 reset_reg_addr;
 286	u32 reset_val[MAX_CHIP_IDS];
 287};
 288
 289/* Storm constant definitions.
 290 * Addresses are in bytes, sizes are in quad-regs.
 291 */
 292struct storm_defs {
 293	char letter;
 294	enum block_id sem_block_id;
 295	enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
 296	bool has_vfc;
 297	u32 sem_fast_mem_addr;
 298	u32 sem_frame_mode_addr;
 299	u32 sem_slow_enable_addr;
 300	u32 sem_slow_mode_addr;
 301	u32 sem_slow_mode1_conf_addr;
 302	u32 sem_sync_dbg_empty_addr;
 303	u32 sem_gpre_vect_addr;
 304	u32 cm_ctx_wr_addr;
 305	u32 cm_ctx_rd_addr[NUM_CM_CTX_TYPES];
 306	u32 cm_ctx_lid_sizes[MAX_CHIP_IDS][NUM_CM_CTX_TYPES];
 
 
 
 
 
 
 307};
 308
 309/* Debug Bus Constraint operation constant definitions */
 310struct dbg_bus_constraint_op_defs {
 311	u8 hw_op_val;
 312	bool is_cyclic;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 313};
 314
 315/* Storm Mode definitions */
 316struct storm_mode_defs {
 317	const char *name;
 318	bool is_fast_dbg;
 319	u8 id_in_hw;
 320	u32 src_disable_reg_addr;
 321	u32 src_enable_val;
 322	bool exists[MAX_CHIP_IDS];
 
 323};
 324
 325struct grc_param_defs {
 326	u32 default_val[MAX_CHIP_IDS];
 327	u32 min;
 328	u32 max;
 329	bool is_preset;
 330	bool is_persistent;
 331	u32 exclude_all_preset_val;
 332	u32 crash_preset_val[MAX_CHIP_IDS];
 333};
 334
 335/* Address is in 128b units. Width is in bits. */
 336struct rss_mem_defs {
 337	const char *mem_name;
 338	const char *type_name;
 339	u32 addr;
 340	u32 entry_width;
 341	u32 num_entries[MAX_CHIP_IDS];
 342};
 343
 344struct vfc_ram_defs {
 345	const char *mem_name;
 346	const char *type_name;
 347	u32 base_row;
 348	u32 num_rows;
 349};
 350
 351struct big_ram_defs {
 352	const char *instance_name;
 353	enum mem_groups mem_group_id;
 354	enum mem_groups ram_mem_group_id;
 355	enum dbg_grc_params grc_param;
 356	u32 addr_reg_addr;
 357	u32 data_reg_addr;
 358	u32 is_256b_reg_addr;
 359	u32 is_256b_bit_offset[MAX_CHIP_IDS];
 360	u32 ram_size[MAX_CHIP_IDS]; /* In dwords */
 361};
 362
 363struct phy_defs {
 364	const char *phy_name;
 365
 366	/* PHY base GRC address */
 367	u32 base_addr;
 368
 369	/* Relative address of indirect TBUS address register (bits 0..7) */
 370	u32 tbus_addr_lo_addr;
 371
 372	/* Relative address of indirect TBUS address register (bits 8..10) */
 373	u32 tbus_addr_hi_addr;
 374
 375	/* Relative address of indirect TBUS data register (bits 0..7) */
 376	u32 tbus_data_lo_addr;
 377
 378	/* Relative address of indirect TBUS data register (bits 8..11) */
 379	u32 tbus_data_hi_addr;
 380};
 381
 382/* Split type definitions */
 383struct split_type_defs {
 384	const char *name;
 385};
 386
 387/******************************** Constants **********************************/
 388
 
 
 
 
 
 
 
 389#define BYTES_IN_DWORD			sizeof(u32)
 
 390/* In the macros below, size and offset are specified in bits */
 391#define CEIL_DWORDS(size)		DIV_ROUND_UP(size, 32)
 392#define FIELD_BIT_OFFSET(type, field)	type ## _ ## field ## _ ## OFFSET
 393#define FIELD_BIT_SIZE(type, field)	type ## _ ## field ## _ ## SIZE
 394#define FIELD_DWORD_OFFSET(type, field) \
 395	 ((int)(FIELD_BIT_OFFSET(type, field) / 32))
 396#define FIELD_DWORD_SHIFT(type, field)	(FIELD_BIT_OFFSET(type, field) % 32)
 397#define FIELD_BIT_MASK(type, field) \
 398	(((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
 399	 FIELD_DWORD_SHIFT(type, field))
 400
 401#define SET_VAR_FIELD(var, type, field, val) \
 402	do { \
 403		var[FIELD_DWORD_OFFSET(type, field)] &=	\
 404		(~FIELD_BIT_MASK(type, field));	\
 405		var[FIELD_DWORD_OFFSET(type, field)] |= \
 406		(val) << FIELD_DWORD_SHIFT(type, field); \
 407	} while (0)
 408
 409#define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
 410	do { \
 411		for (i = 0; i < (arr_size); i++) \
 412			qed_wr(dev, ptt, addr,	(arr)[i]); \
 413	} while (0)
 414
 
 
 
 
 
 
 415#define DWORDS_TO_BYTES(dwords)		((dwords) * BYTES_IN_DWORD)
 416#define BYTES_TO_DWORDS(bytes)		((bytes) / BYTES_IN_DWORD)
 417
 418/* extra lines include a signature line + optional latency events line */
 419#define NUM_EXTRA_DBG_LINES(block) \
 420	(GET_FIELD((block)->flags, DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS) ? 2 : 1)
 421#define NUM_DBG_LINES(block) \
 422	((block)->num_of_dbg_bus_lines + NUM_EXTRA_DBG_LINES(block))
 423
 424#define USE_DMAE			true
 425#define PROTECT_WIDE_BUS		true
 426
 427#define RAM_LINES_TO_DWORDS(lines)	((lines) * 2)
 428#define RAM_LINES_TO_BYTES(lines) \
 429	DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
 430
 431#define REG_DUMP_LEN_SHIFT		24
 432#define MEM_DUMP_ENTRY_SIZE_DWORDS \
 433	BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
 434
 435#define IDLE_CHK_RULE_SIZE_DWORDS \
 436	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
 437
 438#define IDLE_CHK_RESULT_HDR_DWORDS \
 439	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
 440
 441#define IDLE_CHK_RESULT_REG_HDR_DWORDS \
 442	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
 443
 444#define PAGE_MEM_DESC_SIZE_DWORDS \
 445	BYTES_TO_DWORDS(sizeof(struct phys_mem_desc))
 446
 447#define IDLE_CHK_MAX_ENTRIES_SIZE	32
 448
 449/* The sizes and offsets below are specified in bits */
 450#define VFC_CAM_CMD_STRUCT_SIZE		64
 451#define VFC_CAM_CMD_ROW_OFFSET		48
 452#define VFC_CAM_CMD_ROW_SIZE		9
 453#define VFC_CAM_ADDR_STRUCT_SIZE	16
 454#define VFC_CAM_ADDR_OP_OFFSET		0
 455#define VFC_CAM_ADDR_OP_SIZE		4
 456#define VFC_CAM_RESP_STRUCT_SIZE	256
 457#define VFC_RAM_ADDR_STRUCT_SIZE	16
 458#define VFC_RAM_ADDR_OP_OFFSET		0
 459#define VFC_RAM_ADDR_OP_SIZE		2
 460#define VFC_RAM_ADDR_ROW_OFFSET		2
 461#define VFC_RAM_ADDR_ROW_SIZE		10
 462#define VFC_RAM_RESP_STRUCT_SIZE	256
 463
 464#define VFC_CAM_CMD_DWORDS		CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
 465#define VFC_CAM_ADDR_DWORDS		CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
 466#define VFC_CAM_RESP_DWORDS		CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
 467#define VFC_RAM_CMD_DWORDS		VFC_CAM_CMD_DWORDS
 468#define VFC_RAM_ADDR_DWORDS		CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
 469#define VFC_RAM_RESP_DWORDS		CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
 470
 471#define NUM_VFC_RAM_TYPES		4
 472
 473#define VFC_CAM_NUM_ROWS		512
 474
 475#define VFC_OPCODE_CAM_RD		14
 476#define VFC_OPCODE_RAM_RD		0
 477
 478#define NUM_RSS_MEM_TYPES		5
 479
 480#define NUM_BIG_RAM_TYPES		3
 481#define BIG_RAM_NAME_LEN		3
 482
 483#define NUM_PHY_TBUS_ADDRESSES		2048
 484#define PHY_DUMP_SIZE_DWORDS		(NUM_PHY_TBUS_ADDRESSES / 2)
 485
 486#define RESET_REG_UNRESET_OFFSET	4
 487
 488#define STALL_DELAY_MS			500
 489
 490#define STATIC_DEBUG_LINE_DWORDS	9
 491
 492#define NUM_COMMON_GLOBAL_PARAMS	10
 493
 494#define MAX_RECURSION_DEPTH		10
 495
 496#define FW_IMG_KUKU                     0
 497#define FW_IMG_MAIN			1
 498#define FW_IMG_L2B                      2
 499
 500#define REG_FIFO_ELEMENT_DWORDS		2
 501#define REG_FIFO_DEPTH_ELEMENTS		32
 502#define REG_FIFO_DEPTH_DWORDS \
 503	(REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
 504
 505#define IGU_FIFO_ELEMENT_DWORDS		4
 506#define IGU_FIFO_DEPTH_ELEMENTS		64
 507#define IGU_FIFO_DEPTH_DWORDS \
 508	(IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
 509
 510#define PROTECTION_OVERRIDE_ELEMENT_DWORDS	2
 511#define PROTECTION_OVERRIDE_DEPTH_ELEMENTS	20
 512#define PROTECTION_OVERRIDE_DEPTH_DWORDS \
 513	(PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
 514	 PROTECTION_OVERRIDE_ELEMENT_DWORDS)
 515
 516#define MCP_SPAD_TRACE_OFFSIZE_ADDR \
 517	(MCP_REG_SCRATCH + \
 518	 offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
 519
 520#define MAX_SW_PLTAFORM_STR_SIZE	64
 521
 522#define EMPTY_FW_VERSION_STR		"???_???_???_???"
 523#define EMPTY_FW_IMAGE_STR		"???????????????"
 524
 525/***************************** Constant Arrays *******************************/
 526
 527/* DBG block framing mode definitions, in descending preference order */
 528static struct framing_mode_defs s_framing_mode_defs[4] = {
 529	{DBG_BUS_FRAME_MODE_4ST, 0x0, 0xf,
 530	 DBG_BUS_SEMI_FRAME_MODE_4FAST,
 531	 10},
 532	{DBG_BUS_FRAME_MODE_4HW, 0xf, 0x0, DBG_BUS_SEMI_FRAME_MODE_4SLOW,
 533	 10},
 534	{DBG_BUS_FRAME_MODE_2ST_2HW, 0x3, 0xc,
 535	 DBG_BUS_SEMI_FRAME_MODE_2FAST_2SLOW, 10},
 536	{DBG_BUS_FRAME_MODE_1ST_3HW, 0x7, 0x8,
 537	 DBG_BUS_SEMI_FRAME_MODE_1FAST_3SLOW, 10}
 538};
 539
 
 
 
 540/* Chip constant definitions array */
 541static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
 542	{"bb", 4, DBG_BUS_NUM_FRAME_MODES, PSWRQ2_REG_ILT_MEMORY_SIZE_BB / 2,
 543	 s_framing_mode_defs},
 544	{"ah", 4, DBG_BUS_NUM_FRAME_MODES, PSWRQ2_REG_ILT_MEMORY_SIZE_K2 / 2,
 545	 s_framing_mode_defs}
 546};
 547
 548/* Storm constant definitions array */
 549static struct storm_defs s_storm_defs[] = {
 550	/* Tstorm */
 551	{'T', BLOCK_TSEM,
 552		{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
 553		true,
 554		TSEM_REG_FAST_MEMORY,
 555		TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE,
 556		TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG,
 557		TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_DBG_GPRE_VECT,
 558		TCM_REG_CTX_RBC_ACCS,
 559		{TCM_REG_AGG_CON_CTX, TCM_REG_SM_CON_CTX, TCM_REG_AGG_TASK_CTX,
 560		 TCM_REG_SM_TASK_CTX},
 561		{{4, 16, 2, 4}, {4, 16, 2, 4}} /* {bb} {k2} */
 562	},
 563
 564	/* Mstorm */
 565	{'M', BLOCK_MSEM,
 566		{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
 567		false,
 568		MSEM_REG_FAST_MEMORY,
 569		MSEM_REG_DBG_FRAME_MODE,
 570		MSEM_REG_SLOW_DBG_ACTIVE,
 571		MSEM_REG_SLOW_DBG_MODE,
 572		MSEM_REG_DBG_MODE1_CFG,
 573		MSEM_REG_SYNC_DBG_EMPTY,
 574		MSEM_REG_DBG_GPRE_VECT,
 575		MCM_REG_CTX_RBC_ACCS,
 576		{MCM_REG_AGG_CON_CTX, MCM_REG_SM_CON_CTX, MCM_REG_AGG_TASK_CTX,
 577		 MCM_REG_SM_TASK_CTX },
 578		{{1, 10, 2, 7}, {1, 10, 2, 7}} /* {bb} {k2}*/
 579	},
 580
 581	/* Ustorm */
 582	{'U', BLOCK_USEM,
 583		{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
 584		false,
 585		USEM_REG_FAST_MEMORY,
 586		USEM_REG_DBG_FRAME_MODE,
 587		USEM_REG_SLOW_DBG_ACTIVE,
 588		USEM_REG_SLOW_DBG_MODE,
 589		USEM_REG_DBG_MODE1_CFG,
 590		USEM_REG_SYNC_DBG_EMPTY,
 591		USEM_REG_DBG_GPRE_VECT,
 592		UCM_REG_CTX_RBC_ACCS,
 593		{UCM_REG_AGG_CON_CTX, UCM_REG_SM_CON_CTX, UCM_REG_AGG_TASK_CTX,
 594		 UCM_REG_SM_TASK_CTX},
 595		{{2, 13, 3, 3}, {2, 13, 3, 3}} /* {bb} {k2} */
 596	},
 597
 598	/* Xstorm */
 599	{'X', BLOCK_XSEM,
 600		{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
 601		false,
 602		XSEM_REG_FAST_MEMORY,
 603		XSEM_REG_DBG_FRAME_MODE,
 604		XSEM_REG_SLOW_DBG_ACTIVE,
 605		XSEM_REG_SLOW_DBG_MODE,
 606		XSEM_REG_DBG_MODE1_CFG,
 607		XSEM_REG_SYNC_DBG_EMPTY,
 608		XSEM_REG_DBG_GPRE_VECT,
 609		XCM_REG_CTX_RBC_ACCS,
 610		{XCM_REG_AGG_CON_CTX, XCM_REG_SM_CON_CTX, 0, 0},
 611		{{9, 15, 0, 0}, {9, 15,	0, 0}} /* {bb} {k2} */
 612	},
 613
 614	/* Ystorm */
 615	{'Y', BLOCK_YSEM,
 616		{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
 617		false,
 618		YSEM_REG_FAST_MEMORY,
 619		YSEM_REG_DBG_FRAME_MODE,
 620		YSEM_REG_SLOW_DBG_ACTIVE,
 621		YSEM_REG_SLOW_DBG_MODE,
 622		YSEM_REG_DBG_MODE1_CFG,
 623		YSEM_REG_SYNC_DBG_EMPTY,
 624		YSEM_REG_DBG_GPRE_VECT,
 625		YCM_REG_CTX_RBC_ACCS,
 626		{YCM_REG_AGG_CON_CTX, YCM_REG_SM_CON_CTX, YCM_REG_AGG_TASK_CTX,
 627		 YCM_REG_SM_TASK_CTX},
 628		{{2, 3, 2, 12}, {2, 3, 2, 12}} /* {bb} {k2} */
 629	},
 630
 631	/* Pstorm */
 632	{'P', BLOCK_PSEM,
 633		{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
 634		true,
 635		PSEM_REG_FAST_MEMORY,
 636		PSEM_REG_DBG_FRAME_MODE,
 637		PSEM_REG_SLOW_DBG_ACTIVE,
 638		PSEM_REG_SLOW_DBG_MODE,
 639		PSEM_REG_DBG_MODE1_CFG,
 640		PSEM_REG_SYNC_DBG_EMPTY,
 641		PSEM_REG_DBG_GPRE_VECT,
 642		PCM_REG_CTX_RBC_ACCS,
 643		{0, PCM_REG_SM_CON_CTX, 0, 0},
 644		{{0, 10, 0, 0}, {0, 10, 0, 0}} /* {bb} {k2} */
 645	},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 646};
 647
 648static struct hw_type_defs s_hw_type_defs[] = {
 649	/* HW_TYPE_ASIC */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 650	{"asic", 1, 256, 32768},
 651	{"reserved", 0, 0, 0},
 652	{"reserved2", 0, 0, 0},
 653	{"reserved3", 0, 0, 0},
 654	{"reserved4", 0, 0, 0}
 655};
 656
 657static struct grc_param_defs s_grc_param_defs[] = {
 658	/* DBG_GRC_PARAM_DUMP_TSTORM */
 659	{{1, 1}, 0, 1, false, false, 1, {1, 1}},
 660
 661	/* DBG_GRC_PARAM_DUMP_MSTORM */
 662	{{1, 1}, 0, 1, false, false, 1, {1, 1}},
 663
 664	/* DBG_GRC_PARAM_DUMP_USTORM */
 665	{{1, 1}, 0, 1, false, false, 1, {1, 1}},
 666
 667	/* DBG_GRC_PARAM_DUMP_XSTORM */
 668	{{1, 1}, 0, 1, false, false, 1, {1, 1}},
 669
 670	/* DBG_GRC_PARAM_DUMP_YSTORM */
 671	{{1, 1}, 0, 1, false, false, 1, {1, 1}},
 672
 673	/* DBG_GRC_PARAM_DUMP_PSTORM */
 674	{{1, 1}, 0, 1, false, false, 1, {1, 1}},
 675
 676	/* DBG_GRC_PARAM_DUMP_REGS */
 677	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
 678
 679	/* DBG_GRC_PARAM_DUMP_RAM */
 680	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
 681
 682	/* DBG_GRC_PARAM_DUMP_PBUF */
 683	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
 684
 685	/* DBG_GRC_PARAM_DUMP_IOR */
 686	{{0, 0}, 0, 1, false, false, 0, {1, 1}},
 687
 688	/* DBG_GRC_PARAM_DUMP_VFC */
 689	{{0, 0}, 0, 1, false, false, 0, {1, 1}},
 690
 691	/* DBG_GRC_PARAM_DUMP_CM_CTX */
 692	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
 693
 694	/* DBG_GRC_PARAM_DUMP_ILT */
 695	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
 696
 697	/* DBG_GRC_PARAM_DUMP_RSS */
 698	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
 699
 700	/* DBG_GRC_PARAM_DUMP_CAU */
 701	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
 702
 703	/* DBG_GRC_PARAM_DUMP_QM */
 704	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
 705
 706	/* DBG_GRC_PARAM_DUMP_MCP */
 707	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
 708
 709	/* DBG_GRC_PARAM_DUMP_DORQ */
 710	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
 711
 712	/* DBG_GRC_PARAM_DUMP_CFC */
 713	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
 714
 715	/* DBG_GRC_PARAM_DUMP_IGU */
 716	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
 717
 718	/* DBG_GRC_PARAM_DUMP_BRB */
 719	{{0, 0}, 0, 1, false, false, 0, {1, 1}},
 720
 721	/* DBG_GRC_PARAM_DUMP_BTB */
 722	{{0, 0}, 0, 1, false, false, 0, {1, 1}},
 723
 724	/* DBG_GRC_PARAM_DUMP_BMB */
 725	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
 726
 727	/* DBG_GRC_PARAM_RESERVED1 */
 728	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
 729
 730	/* DBG_GRC_PARAM_DUMP_MULD */
 731	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
 732
 733	/* DBG_GRC_PARAM_DUMP_PRS */
 734	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
 735
 736	/* DBG_GRC_PARAM_DUMP_DMAE */
 737	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
 738
 739	/* DBG_GRC_PARAM_DUMP_TM */
 740	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
 741
 742	/* DBG_GRC_PARAM_DUMP_SDM */
 743	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
 744
 745	/* DBG_GRC_PARAM_DUMP_DIF */
 746	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
 747
 748	/* DBG_GRC_PARAM_DUMP_STATIC */
 749	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
 750
 751	/* DBG_GRC_PARAM_UNSTALL */
 752	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
 753
 754	/* DBG_GRC_PARAM_RESERVED2 */
 755	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
 756
 757	/* DBG_GRC_PARAM_MCP_TRACE_META_SIZE */
 758	{{0, 0}, 1, 0xffffffff, false, true, 0, {0, 0}},
 
 
 759
 760	/* DBG_GRC_PARAM_EXCLUDE_ALL */
 761	{{0, 0}, 0, 1, true, false, 0, {0, 0}},
 762
 763	/* DBG_GRC_PARAM_CRASH */
 764	{{0, 0}, 0, 1, true, false, 0, {0, 0}},
 765
 766	/* DBG_GRC_PARAM_PARITY_SAFE */
 767	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
 768
 769	/* DBG_GRC_PARAM_DUMP_CM */
 770	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
 771
 772	/* DBG_GRC_PARAM_DUMP_PHY */
 773	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
 774
 775	/* DBG_GRC_PARAM_NO_MCP */
 776	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
 777
 778	/* DBG_GRC_PARAM_NO_FW_VER */
 779	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
 780
 781	/* DBG_GRC_PARAM_RESERVED3 */
 782	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
 783
 784	/* DBG_GRC_PARAM_DUMP_MCP_HW_DUMP */
 785	{{0, 1}, 0, 1, false, false, 0, {0, 1}},
 786
 787	/* DBG_GRC_PARAM_DUMP_ILT_CDUC */
 788	{{1, 1}, 0, 1, false, false, 0, {0, 0}},
 789
 790	/* DBG_GRC_PARAM_DUMP_ILT_CDUT */
 791	{{1, 1}, 0, 1, false, false, 0, {0, 0}},
 792
 793	/* DBG_GRC_PARAM_DUMP_CAU_EXT */
 794	{{0, 0}, 0, 1, false, false, 0, {1, 1}}
 795};
 796
 797static struct rss_mem_defs s_rss_mem_defs[] = {
 798	{"rss_mem_cid", "rss_cid", 0, 32,
 799	 {256, 320}},
 800
 801	{"rss_mem_key_msb", "rss_key", 1024, 256,
 802	 {128, 208}},
 803
 804	{"rss_mem_key_lsb", "rss_key", 2048, 64,
 805	 {128, 208}},
 806
 807	{"rss_mem_info", "rss_info", 3072, 16,
 808	 {128, 208}},
 809
 810	{"rss_mem_ind", "rss_ind", 4096, 16,
 811	 {16384, 26624}}
 812};
 813
 814static struct vfc_ram_defs s_vfc_ram_defs[] = {
 815	{"vfc_ram_tt1", "vfc_ram", 0, 512},
 816	{"vfc_ram_mtt2", "vfc_ram", 512, 128},
 817	{"vfc_ram_stt2", "vfc_ram", 640, 32},
 818	{"vfc_ram_ro_vect", "vfc_ram", 672, 32}
 819};
 820
 821static struct big_ram_defs s_big_ram_defs[] = {
 822	{"BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
 823	 BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
 824	 MISC_REG_BLOCK_256B_EN, {0, 0},
 825	 {153600, 180224}},
 826
 827	{"BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
 828	 BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
 829	 MISC_REG_BLOCK_256B_EN, {0, 1},
 830	 {92160, 117760}},
 831
 832	{"BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
 833	 BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
 834	 MISCS_REG_BLOCK_256B_EN, {0, 0},
 835	 {36864, 36864}}
 836};
 837
 838static struct rbc_reset_defs s_rbc_reset_defs[] = {
 839	{MISCS_REG_RESET_PL_HV,
 840	 {0x0, 0x400}},
 841	{MISC_REG_RESET_PL_PDA_VMAIN_1,
 842	 {0x4404040, 0x4404040}},
 843	{MISC_REG_RESET_PL_PDA_VMAIN_2,
 844	 {0x7, 0x7c00007}},
 845	{MISC_REG_RESET_PL_PDA_VAUX,
 846	 {0x2, 0x2}},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 847};
 848
 849static struct phy_defs s_phy_defs[] = {
 850	{"nw_phy", NWS_REG_NWS_CMU_K2,
 851	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2,
 852	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2,
 853	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2,
 854	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2},
 855	{"sgmii_phy", MS_REG_MS_CMU_K2,
 856	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2,
 857	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2,
 858	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2,
 859	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2},
 860	{"pcie_phy0", PHY_PCIE_REG_PHY0_K2,
 861	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
 862	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
 863	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
 864	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
 865	{"pcie_phy1", PHY_PCIE_REG_PHY1_K2,
 866	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
 867	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
 868	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
 869	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
 870};
 871
 872static struct split_type_defs s_split_type_defs[] = {
 873	/* SPLIT_TYPE_NONE */
 874	{"eng"},
 875
 876	/* SPLIT_TYPE_PORT */
 877	{"port"},
 878
 879	/* SPLIT_TYPE_PF */
 880	{"pf"},
 881
 882	/* SPLIT_TYPE_PORT_PF */
 883	{"port"},
 884
 885	/* SPLIT_TYPE_VF */
 886	{"vf"}
 887};
 888
 889/******************************** Variables **********************************/
 890
 891/* The version of the calling app */
 892static u32 s_app_ver;
 893
 894/**************************** Private Functions ******************************/
 895
 896static void qed_static_asserts(void)
 897{
 898}
 899
 900/* Reads and returns a single dword from the specified unaligned buffer */
 901static u32 qed_read_unaligned_dword(u8 *buf)
 902{
 903	u32 dword;
 904
 905	memcpy((u8 *)&dword, buf, sizeof(dword));
 906	return dword;
 907}
 908
 909/* Sets the value of the specified GRC param */
 910static void qed_grc_set_param(struct qed_hwfn *p_hwfn,
 911			      enum dbg_grc_params grc_param, u32 val)
 912{
 913	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
 914
 915	dev_data->grc.param_val[grc_param] = val;
 916}
 917
 918/* Returns the value of the specified GRC param */
 919static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
 920			     enum dbg_grc_params grc_param)
 921{
 922	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
 923
 924	return dev_data->grc.param_val[grc_param];
 925}
 926
 927/* Initializes the GRC parameters */
 928static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
 929{
 930	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
 931
 932	if (!dev_data->grc.params_initialized) {
 933		qed_dbg_grc_set_params_default(p_hwfn);
 934		dev_data->grc.params_initialized = 1;
 935	}
 936}
 937
 938/* Sets pointer and size for the specified binary buffer type */
 939static void qed_set_dbg_bin_buf(struct qed_hwfn *p_hwfn,
 940				enum bin_dbg_buffer_type buf_type,
 941				const u32 *ptr, u32 size)
 942{
 943	struct virt_mem_desc *buf = &p_hwfn->dbg_arrays[buf_type];
 944
 945	buf->ptr = (void *)ptr;
 946	buf->size = size;
 947}
 948
 949/* Initializes debug data for the specified device */
 950static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn)
 
 951{
 952	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
 953	u8 num_pfs = 0, max_pfs_per_port = 0;
 954
 955	if (dev_data->initialized)
 956		return DBG_STATUS_OK;
 957
 958	if (!s_app_ver)
 959		return DBG_STATUS_APP_VERSION_NOT_SET;
 960
 961	/* Set chip */
 962	if (QED_IS_K2(p_hwfn->cdev)) {
 963		dev_data->chip_id = CHIP_K2;
 964		dev_data->mode_enable[MODE_K2] = 1;
 965		dev_data->num_vfs = MAX_NUM_VFS_K2;
 966		num_pfs = MAX_NUM_PFS_K2;
 967		max_pfs_per_port = MAX_NUM_PFS_K2 / 2;
 968	} else if (QED_IS_BB_B0(p_hwfn->cdev)) {
 969		dev_data->chip_id = CHIP_BB;
 970		dev_data->mode_enable[MODE_BB] = 1;
 971		dev_data->num_vfs = MAX_NUM_VFS_BB;
 972		num_pfs = MAX_NUM_PFS_BB;
 973		max_pfs_per_port = MAX_NUM_PFS_BB;
 974	} else {
 975		return DBG_STATUS_UNKNOWN_CHIP;
 976	}
 977
 978	/* Set HW type */
 979	dev_data->hw_type = HW_TYPE_ASIC;
 980	dev_data->mode_enable[MODE_ASIC] = 1;
 981
 982	/* Set port mode */
 983	switch (p_hwfn->cdev->num_ports_in_engine) {
 984	case 1:
 985		dev_data->mode_enable[MODE_PORTS_PER_ENG_1] = 1;
 986		break;
 987	case 2:
 988		dev_data->mode_enable[MODE_PORTS_PER_ENG_2] = 1;
 989		break;
 990	case 4:
 991		dev_data->mode_enable[MODE_PORTS_PER_ENG_4] = 1;
 992		break;
 993	}
 994
 995	/* Set 100G mode */
 996	if (QED_IS_CMT(p_hwfn->cdev))
 
 997		dev_data->mode_enable[MODE_100G] = 1;
 998
 999	/* Set number of ports */
1000	if (dev_data->mode_enable[MODE_PORTS_PER_ENG_1] ||
1001	    dev_data->mode_enable[MODE_100G])
1002		dev_data->num_ports = 1;
1003	else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_2])
1004		dev_data->num_ports = 2;
1005	else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_4])
1006		dev_data->num_ports = 4;
1007
1008	/* Set number of PFs per port */
1009	dev_data->num_pfs_per_port = min_t(u32,
1010					   num_pfs / dev_data->num_ports,
1011					   max_pfs_per_port);
1012
1013	/* Initializes the GRC parameters */
1014	qed_dbg_grc_init_params(p_hwfn);
1015
1016	dev_data->use_dmae = true;
1017	dev_data->initialized = 1;
1018
1019	return DBG_STATUS_OK;
1020}
1021
1022static const struct dbg_block *get_dbg_block(struct qed_hwfn *p_hwfn,
1023					     enum block_id block_id)
1024{
1025	const struct dbg_block *dbg_block;
1026
1027	dbg_block = p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS].ptr;
1028	return dbg_block + block_id;
1029}
1030
1031static const struct dbg_block_chip *qed_get_dbg_block_per_chip(struct qed_hwfn
1032							       *p_hwfn,
1033							       enum block_id
1034							       block_id)
1035{
1036	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1037
1038	return (const struct dbg_block_chip *)
1039	    p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS_CHIP_DATA].ptr +
1040	    block_id * MAX_CHIP_IDS + dev_data->chip_id;
1041}
1042
1043static const struct dbg_reset_reg *qed_get_dbg_reset_reg(struct qed_hwfn
1044							 *p_hwfn,
1045							 u8 reset_reg_id)
1046{
1047	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1048
1049	return (const struct dbg_reset_reg *)
1050	    p_hwfn->dbg_arrays[BIN_BUF_DBG_RESET_REGS].ptr +
1051	    reset_reg_id * MAX_CHIP_IDS + dev_data->chip_id;
1052}
1053
1054/* Reads the FW info structure for the specified Storm from the chip,
1055 * and writes it to the specified fw_info pointer.
1056 */
1057static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
1058				   struct qed_ptt *p_ptt,
1059				   u8 storm_id, struct fw_info *fw_info)
1060{
1061	struct storm_defs *storm = &s_storm_defs[storm_id];
1062	struct fw_info_location fw_info_location;
1063	u32 addr, i, size, *dest;
1064
1065	memset(&fw_info_location, 0, sizeof(fw_info_location));
1066	memset(fw_info, 0, sizeof(*fw_info));
1067
1068	/* Read first the address that points to fw_info location.
1069	 * The address is located in the last line of the Storm RAM.
1070	 */
1071	addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
1072	    DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
1073	    sizeof(fw_info_location);
1074
1075	dest = (u32 *)&fw_info_location;
1076	size = BYTES_TO_DWORDS(sizeof(fw_info_location));
1077
1078	for (i = 0; i < size; i++, addr += BYTES_IN_DWORD)
 
1079		dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1080
1081	/* Read FW version info from Storm RAM */
1082	size = le32_to_cpu(fw_info_location.size);
1083	if (!size || size > sizeof(*fw_info))
1084		return;
1085
1086	addr = le32_to_cpu(fw_info_location.grc_addr);
1087	dest = (u32 *)fw_info;
1088	size = BYTES_TO_DWORDS(size);
1089
1090	for (i = 0; i < size; i++, addr += BYTES_IN_DWORD)
1091		dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1092}
1093
1094/* Dumps the specified string to the specified buffer.
1095 * Returns the dumped size in bytes.
1096 */
1097static u32 qed_dump_str(char *dump_buf, bool dump, const char *str)
1098{
1099	if (dump)
1100		strcpy(dump_buf, str);
1101
1102	return (u32)strlen(str) + 1;
1103}
1104
1105/* Dumps zeros to align the specified buffer to dwords.
1106 * Returns the dumped size in bytes.
1107 */
1108static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset)
1109{
1110	u8 offset_in_dword, align_size;
1111
1112	offset_in_dword = (u8)(byte_offset & 0x3);
1113	align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
1114
1115	if (dump && align_size)
1116		memset(dump_buf, 0, align_size);
1117
1118	return align_size;
1119}
1120
1121/* Writes the specified string param to the specified buffer.
1122 * Returns the dumped size in dwords.
1123 */
1124static u32 qed_dump_str_param(u32 *dump_buf,
1125			      bool dump,
1126			      const char *param_name, const char *param_val)
1127{
1128	char *char_buf = (char *)dump_buf;
1129	u32 offset = 0;
1130
1131	/* Dump param name */
1132	offset += qed_dump_str(char_buf + offset, dump, param_name);
1133
1134	/* Indicate a string param value */
1135	if (dump)
1136		*(char_buf + offset) = 1;
1137	offset++;
1138
1139	/* Dump param value */
1140	offset += qed_dump_str(char_buf + offset, dump, param_val);
1141
1142	/* Align buffer to next dword */
1143	offset += qed_dump_align(char_buf + offset, dump, offset);
1144
1145	return BYTES_TO_DWORDS(offset);
1146}
1147
1148/* Writes the specified numeric param to the specified buffer.
1149 * Returns the dumped size in dwords.
1150 */
1151static u32 qed_dump_num_param(u32 *dump_buf,
1152			      bool dump, const char *param_name, u32 param_val)
1153{
1154	char *char_buf = (char *)dump_buf;
1155	u32 offset = 0;
1156
1157	/* Dump param name */
1158	offset += qed_dump_str(char_buf + offset, dump, param_name);
1159
1160	/* Indicate a numeric param value */
1161	if (dump)
1162		*(char_buf + offset) = 0;
1163	offset++;
1164
1165	/* Align buffer to next dword */
1166	offset += qed_dump_align(char_buf + offset, dump, offset);
1167
1168	/* Dump param value (and change offset from bytes to dwords) */
1169	offset = BYTES_TO_DWORDS(offset);
1170	if (dump)
1171		*(dump_buf + offset) = param_val;
1172	offset++;
1173
1174	return offset;
1175}
1176
1177/* Reads the FW version and writes it as a param to the specified buffer.
1178 * Returns the dumped size in dwords.
1179 */
1180static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
1181				 struct qed_ptt *p_ptt,
1182				 u32 *dump_buf, bool dump)
1183{
1184	char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
1185	char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
1186	struct fw_info fw_info = { {0}, {0} };
1187	u32 offset = 0;
1188
1189	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1190		/* Read FW info from chip */
1191		qed_read_fw_info(p_hwfn, p_ptt, &fw_info);
1192
1193		/* Create FW version/image strings */
1194		if (snprintf(fw_ver_str, sizeof(fw_ver_str),
1195			     "%d_%d_%d_%d", fw_info.ver.num.major,
1196			     fw_info.ver.num.minor, fw_info.ver.num.rev,
1197			     fw_info.ver.num.eng) < 0)
1198			DP_NOTICE(p_hwfn,
1199				  "Unexpected debug error: invalid FW version string\n");
1200		switch (fw_info.ver.image_id) {
1201		case FW_IMG_KUKU:
1202			strcpy(fw_img_str, "kuku");
1203			break;
1204		case FW_IMG_MAIN:
1205			strcpy(fw_img_str, "main");
1206			break;
1207		case FW_IMG_L2B:
1208			strcpy(fw_img_str, "l2b");
1209			break;
1210		default:
1211			strcpy(fw_img_str, "unknown");
1212			break;
1213		}
1214	}
1215
1216	/* Dump FW version, image and timestamp */
1217	offset += qed_dump_str_param(dump_buf + offset,
1218				     dump, "fw-version", fw_ver_str);
1219	offset += qed_dump_str_param(dump_buf + offset,
1220				     dump, "fw-image", fw_img_str);
1221	offset += qed_dump_num_param(dump_buf + offset, dump, "fw-timestamp",
1222				     le32_to_cpu(fw_info.ver.timestamp));
 
1223
1224	return offset;
1225}
1226
1227/* Reads the MFW version and writes it as a param to the specified buffer.
1228 * Returns the dumped size in dwords.
1229 */
1230static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
1231				  struct qed_ptt *p_ptt,
1232				  u32 *dump_buf, bool dump)
1233{
1234	char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
1235
1236	if (dump &&
1237	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1238		u32 global_section_offsize, global_section_addr, mfw_ver;
1239		u32 public_data_addr, global_section_offsize_addr;
1240
1241		/* Find MCP public data GRC address. Needs to be ORed with
1242		 * MCP_REG_SCRATCH due to a HW bug.
1243		 */
1244		public_data_addr = qed_rd(p_hwfn,
1245					  p_ptt,
1246					  MISC_REG_SHARED_MEM_ADDR) |
1247				   MCP_REG_SCRATCH;
1248
1249		/* Find MCP public global section offset */
1250		global_section_offsize_addr = public_data_addr +
1251					      offsetof(struct mcp_public_data,
1252						       sections) +
1253					      sizeof(offsize_t) * PUBLIC_GLOBAL;
1254		global_section_offsize = qed_rd(p_hwfn, p_ptt,
1255						global_section_offsize_addr);
1256		global_section_addr =
1257			MCP_REG_SCRATCH +
1258			(global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
1259
1260		/* Read MFW version from MCP public global section */
1261		mfw_ver = qed_rd(p_hwfn, p_ptt,
1262				 global_section_addr +
1263				 offsetof(struct public_global, mfw_ver));
1264
1265		/* Dump MFW version param */
1266		if (snprintf(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d",
1267			     (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16),
1268			     (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
1269			DP_NOTICE(p_hwfn,
1270				  "Unexpected debug error: invalid MFW version string\n");
1271	}
1272
1273	return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
1274}
1275
1276/* Reads the chip revision from the chip and writes it as a param to the
1277 * specified buffer. Returns the dumped size in dwords.
1278 */
1279static u32 qed_dump_chip_revision_param(struct qed_hwfn *p_hwfn,
1280					struct qed_ptt *p_ptt,
1281					u32 *dump_buf, bool dump)
1282{
1283	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1284	char param_str[3] = "??";
1285
1286	if (dev_data->hw_type == HW_TYPE_ASIC) {
1287		u32 chip_rev, chip_metal;
1288
1289		chip_rev = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV);
1290		chip_metal = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL);
1291
1292		param_str[0] = 'a' + (u8)chip_rev;
1293		param_str[1] = '0' + (u8)chip_metal;
1294	}
1295
1296	return qed_dump_str_param(dump_buf, dump, "chip-revision", param_str);
1297}
1298
1299/* Writes a section header to the specified buffer.
1300 * Returns the dumped size in dwords.
1301 */
1302static u32 qed_dump_section_hdr(u32 *dump_buf,
1303				bool dump, const char *name, u32 num_params)
1304{
1305	return qed_dump_num_param(dump_buf, dump, name, num_params);
1306}
1307
1308/* Writes the common global params to the specified buffer.
1309 * Returns the dumped size in dwords.
1310 */
1311static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
1312					 struct qed_ptt *p_ptt,
1313					 u32 *dump_buf,
1314					 bool dump,
1315					 u8 num_specific_global_params)
1316{
1317	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1318	u32 offset = 0;
1319	u8 num_params;
1320
1321	/* Dump global params section header */
1322	num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params +
1323		(dev_data->chip_id == CHIP_BB ? 1 : 0);
1324	offset += qed_dump_section_hdr(dump_buf + offset,
1325				       dump, "global_params", num_params);
1326
1327	/* Store params */
1328	offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
1329	offset += qed_dump_mfw_ver_param(p_hwfn,
1330					 p_ptt, dump_buf + offset, dump);
1331	offset += qed_dump_chip_revision_param(p_hwfn,
1332					       p_ptt, dump_buf + offset, dump);
1333	offset += qed_dump_num_param(dump_buf + offset,
1334				     dump, "tools-version", TOOLS_VERSION);
1335	offset += qed_dump_str_param(dump_buf + offset,
1336				     dump,
1337				     "chip",
1338				     s_chip_defs[dev_data->chip_id].name);
1339	offset += qed_dump_str_param(dump_buf + offset,
1340				     dump,
1341				     "platform",
1342				     s_hw_type_defs[dev_data->hw_type].name);
1343	offset += qed_dump_num_param(dump_buf + offset,
1344				     dump, "pci-func", p_hwfn->abs_pf_id);
1345	offset += qed_dump_num_param(dump_buf + offset,
1346				     dump, "epoch", qed_get_epoch_time());
1347	if (dev_data->chip_id == CHIP_BB)
1348		offset += qed_dump_num_param(dump_buf + offset,
1349					     dump, "path", QED_PATH_ID(p_hwfn));
1350
1351	return offset;
1352}
1353
1354/* Writes the "last" section (including CRC) to the specified buffer at the
1355 * given offset. Returns the dumped size in dwords.
1356 */
1357static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
1358{
1359	u32 start_offset = offset;
1360
1361	/* Dump CRC section header */
1362	offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
1363
1364	/* Calculate CRC32 and add it to the dword after the "last" section */
1365	if (dump)
1366		*(dump_buf + offset) = ~crc32(0xffffffff,
1367					      (u8 *)dump_buf,
1368					      DWORDS_TO_BYTES(offset));
1369
1370	offset++;
1371
1372	return offset - start_offset;
1373}
1374
1375/* Update blocks reset state  */
1376static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
1377					  struct qed_ptt *p_ptt)
1378{
1379	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1380	u32 reg_val[NUM_DBG_RESET_REGS] = { 0 };
1381	u8 rst_reg_id;
1382	u32 blk_id;
1383
1384	/* Read reset registers */
1385	for (rst_reg_id = 0; rst_reg_id < NUM_DBG_RESET_REGS; rst_reg_id++) {
1386		const struct dbg_reset_reg *rst_reg;
1387		bool rst_reg_removed;
1388		u32 rst_reg_addr;
1389
1390		rst_reg = qed_get_dbg_reset_reg(p_hwfn, rst_reg_id);
1391		rst_reg_removed = GET_FIELD(rst_reg->data,
1392					    DBG_RESET_REG_IS_REMOVED);
1393		rst_reg_addr = DWORDS_TO_BYTES(GET_FIELD(rst_reg->data,
1394							 DBG_RESET_REG_ADDR));
1395
1396		if (!rst_reg_removed)
1397			reg_val[rst_reg_id] = qed_rd(p_hwfn, p_ptt,
1398						     rst_reg_addr);
1399	}
1400
1401	/* Check if blocks are in reset */
1402	for (blk_id = 0; blk_id < NUM_PHYS_BLOCKS; blk_id++) {
1403		const struct dbg_block_chip *blk;
1404		bool has_rst_reg;
1405		bool is_removed;
1406
1407		blk = qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)blk_id);
1408		is_removed = GET_FIELD(blk->flags, DBG_BLOCK_CHIP_IS_REMOVED);
1409		has_rst_reg = GET_FIELD(blk->flags,
1410					DBG_BLOCK_CHIP_HAS_RESET_REG);
1411
1412		if (!is_removed && has_rst_reg)
1413			dev_data->block_in_reset[blk_id] =
1414			    !(reg_val[blk->reset_reg_id] &
1415			      BIT(blk->reset_reg_bit_offset));
1416	}
1417}
1418
1419/* is_mode_match recursive function */
1420static bool qed_is_mode_match_rec(struct qed_hwfn *p_hwfn,
1421				  u16 *modes_buf_offset, u8 rec_depth)
1422{
1423	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1424	u8 *dbg_array;
1425	bool arg1, arg2;
1426	u8 tree_val;
1427
1428	if (rec_depth > MAX_RECURSION_DEPTH) {
1429		DP_NOTICE(p_hwfn,
1430			  "Unexpected error: is_mode_match_rec exceeded the max recursion depth. This is probably due to a corrupt init/debug buffer.\n");
1431		return false;
1432	}
1433
1434	/* Get next element from modes tree buffer */
1435	dbg_array = p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
1436	tree_val = dbg_array[(*modes_buf_offset)++];
1437
1438	switch (tree_val) {
1439	case INIT_MODE_OP_NOT:
1440		return !qed_is_mode_match_rec(p_hwfn,
1441					      modes_buf_offset, rec_depth + 1);
1442	case INIT_MODE_OP_OR:
1443	case INIT_MODE_OP_AND:
1444		arg1 = qed_is_mode_match_rec(p_hwfn,
1445					     modes_buf_offset, rec_depth + 1);
1446		arg2 = qed_is_mode_match_rec(p_hwfn,
1447					     modes_buf_offset, rec_depth + 1);
1448		return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
1449							arg2) : (arg1 && arg2);
1450	default:
1451		return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
1452	}
1453}
1454
1455/* Returns true if the mode (specified using modes_buf_offset) is enabled */
1456static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
1457{
1458	return qed_is_mode_match_rec(p_hwfn, modes_buf_offset, 0);
1459}
1460
1461/* Enable / disable the Debug block */
1462static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
1463				     struct qed_ptt *p_ptt, bool enable)
1464{
1465	qed_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
1466}
1467
1468/* Resets the Debug block */
1469static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
1470				    struct qed_ptt *p_ptt)
1471{
1472	u32 reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
1473	const struct dbg_reset_reg *reset_reg;
1474	const struct dbg_block_chip *block;
1475
1476	block = qed_get_dbg_block_per_chip(p_hwfn, BLOCK_DBG);
1477	reset_reg = qed_get_dbg_reset_reg(p_hwfn, block->reset_reg_id);
1478	reset_reg_addr =
1479	    DWORDS_TO_BYTES(GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR));
1480
1481	old_reset_reg_val = qed_rd(p_hwfn, p_ptt, reset_reg_addr);
 
1482	new_reset_reg_val =
1483	    old_reset_reg_val & ~BIT(block->reset_reg_bit_offset);
1484
1485	qed_wr(p_hwfn, p_ptt, reset_reg_addr, new_reset_reg_val);
1486	qed_wr(p_hwfn, p_ptt, reset_reg_addr, old_reset_reg_val);
 
 
 
 
 
 
 
1487}
1488
1489/* Enable / disable Debug Bus clients according to the specified mask
1490 * (1 = enable, 0 = disable).
1491 */
1492static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
1493				   struct qed_ptt *p_ptt, u32 client_mask)
1494{
1495	qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
1496}
1497
1498static void qed_bus_config_dbg_line(struct qed_hwfn *p_hwfn,
1499				    struct qed_ptt *p_ptt,
1500				    enum block_id block_id,
1501				    u8 line_id,
1502				    u8 enable_mask,
1503				    u8 right_shift,
1504				    u8 force_valid_mask, u8 force_frame_mask)
1505{
1506	const struct dbg_block_chip *block =
1507		qed_get_dbg_block_per_chip(p_hwfn, block_id);
1508
1509	qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_select_reg_addr),
1510	       line_id);
1511	qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_dword_enable_reg_addr),
1512	       enable_mask);
1513	qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_shift_reg_addr),
1514	       right_shift);
1515	qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_force_valid_reg_addr),
1516	       force_valid_mask);
1517	qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_force_frame_reg_addr),
1518	       force_frame_mask);
1519}
1520
1521/* Disable debug bus in all blocks */
1522static void qed_bus_disable_blocks(struct qed_hwfn *p_hwfn,
1523				   struct qed_ptt *p_ptt)
1524{
1525	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1526	u32 block_id;
1527
1528	/* Disable all blocks */
1529	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
1530		const struct dbg_block_chip *block_per_chip =
1531		    qed_get_dbg_block_per_chip(p_hwfn,
1532					       (enum block_id)block_id);
1533
1534		if (GET_FIELD(block_per_chip->flags,
1535			      DBG_BLOCK_CHIP_IS_REMOVED) ||
1536		    dev_data->block_in_reset[block_id])
1537			continue;
1538
1539		/* Disable debug bus */
1540		if (GET_FIELD(block_per_chip->flags,
1541			      DBG_BLOCK_CHIP_HAS_DBG_BUS)) {
1542			u32 dbg_en_addr =
1543				block_per_chip->dbg_dword_enable_reg_addr;
1544			u16 modes_buf_offset =
1545			    GET_FIELD(block_per_chip->dbg_bus_mode.data,
1546				      DBG_MODE_HDR_MODES_BUF_OFFSET);
1547			bool eval_mode =
1548			    GET_FIELD(block_per_chip->dbg_bus_mode.data,
1549				      DBG_MODE_HDR_EVAL_MODE) > 0;
1550
1551			if (!eval_mode ||
1552			    qed_is_mode_match(p_hwfn, &modes_buf_offset))
1553				qed_wr(p_hwfn, p_ptt,
1554				       DWORDS_TO_BYTES(dbg_en_addr),
1555				       0);
1556		}
 
 
 
 
 
1557	}
1558}
1559
1560/* Returns true if the specified entity (indicated by GRC param) should be
1561 * included in the dump, false otherwise.
1562 */
1563static bool qed_grc_is_included(struct qed_hwfn *p_hwfn,
1564				enum dbg_grc_params grc_param)
1565{
1566	return qed_grc_get_param(p_hwfn, grc_param) > 0;
1567}
1568
1569/* Returns the storm_id that matches the specified Storm letter,
1570 * or MAX_DBG_STORMS if invalid storm letter.
1571 */
1572static enum dbg_storms qed_get_id_from_letter(char storm_letter)
1573{
1574	u8 storm_id;
1575
1576	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++)
1577		if (s_storm_defs[storm_id].letter == storm_letter)
1578			return (enum dbg_storms)storm_id;
1579
1580	return MAX_DBG_STORMS;
1581}
1582
1583/* Returns true of the specified Storm should be included in the dump, false
1584 * otherwise.
1585 */
1586static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
1587				      enum dbg_storms storm)
1588{
1589	return qed_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
1590}
1591
1592/* Returns true if the specified memory should be included in the dump, false
1593 * otherwise.
1594 */
1595static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
1596				    enum block_id block_id, u8 mem_group_id)
1597{
1598	const struct dbg_block *block;
1599	u8 i;
1600
1601	block = get_dbg_block(p_hwfn, block_id);
1602
1603	/* If the block is associated with a Storm, check Storm match */
1604	if (block->associated_storm_letter) {
1605		enum dbg_storms associated_storm_id =
1606		    qed_get_id_from_letter(block->associated_storm_letter);
1607
1608		if (associated_storm_id == MAX_DBG_STORMS ||
1609		    !qed_grc_is_storm_included(p_hwfn, associated_storm_id))
1610			return false;
1611	}
1612
1613	for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
1614		struct big_ram_defs *big_ram = &s_big_ram_defs[i];
1615
1616		if (mem_group_id == big_ram->mem_group_id ||
1617		    mem_group_id == big_ram->ram_mem_group_id)
1618			return qed_grc_is_included(p_hwfn, big_ram->grc_param);
1619	}
1620
1621	switch (mem_group_id) {
1622	case MEM_GROUP_PXP_ILT:
1623	case MEM_GROUP_PXP_MEM:
1624		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
1625	case MEM_GROUP_RAM:
1626		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
1627	case MEM_GROUP_PBUF:
1628		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
1629	case MEM_GROUP_CAU_MEM:
1630	case MEM_GROUP_CAU_SB:
1631	case MEM_GROUP_CAU_PI:
1632		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
1633	case MEM_GROUP_CAU_MEM_EXT:
1634		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU_EXT);
1635	case MEM_GROUP_QM_MEM:
1636		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
1637	case MEM_GROUP_CFC_MEM:
1638	case MEM_GROUP_CONN_CFC_MEM:
1639	case MEM_GROUP_TASK_CFC_MEM:
1640		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) ||
1641		       qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
1642	case MEM_GROUP_DORQ_MEM:
1643		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DORQ);
1644	case MEM_GROUP_IGU_MEM:
1645	case MEM_GROUP_IGU_MSIX:
1646		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
1647	case MEM_GROUP_MULD_MEM:
1648		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
1649	case MEM_GROUP_PRS_MEM:
1650		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
1651	case MEM_GROUP_DMAE_MEM:
1652		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
1653	case MEM_GROUP_TM_MEM:
1654		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
1655	case MEM_GROUP_SDM_MEM:
1656		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
1657	case MEM_GROUP_TDIF_CTX:
1658	case MEM_GROUP_RDIF_CTX:
1659		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
1660	case MEM_GROUP_CM_MEM:
1661		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
1662	case MEM_GROUP_IOR:
1663		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
1664	default:
1665		return true;
1666	}
1667}
1668
1669/* Stalls all Storms */
1670static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
1671				 struct qed_ptt *p_ptt, bool stall)
1672{
1673	u32 reg_addr;
1674	u8 storm_id;
1675
1676	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
1677		if (!qed_grc_is_storm_included(p_hwfn,
1678					       (enum dbg_storms)storm_id))
1679			continue;
1680
1681		reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr +
1682		    SEM_FAST_REG_STALL_0;
1683		qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
1684	}
1685
1686	msleep(STALL_DELAY_MS);
1687}
1688
1689/* Takes all blocks out of reset. If rbc_only is true, only RBC clients are
1690 * taken out of reset.
1691 */
1692static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
1693				   struct qed_ptt *p_ptt, bool rbc_only)
1694{
1695	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1696	u8 chip_id = dev_data->chip_id;
1697	u32 i;
1698
1699	/* Take RBCs out of reset */
1700	for (i = 0; i < ARRAY_SIZE(s_rbc_reset_defs); i++)
1701		if (s_rbc_reset_defs[i].reset_val[dev_data->chip_id])
1702			qed_wr(p_hwfn,
1703			       p_ptt,
1704			       s_rbc_reset_defs[i].reset_reg_addr +
1705			       RESET_REG_UNRESET_OFFSET,
1706			       s_rbc_reset_defs[i].reset_val[chip_id]);
1707
1708	if (!rbc_only) {
1709		u32 reg_val[NUM_DBG_RESET_REGS] = { 0 };
1710		u8 reset_reg_id;
1711		u32 block_id;
1712
1713		/* Fill reset regs values */
1714		for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
1715			bool is_removed, has_reset_reg, unreset_before_dump;
1716			const struct dbg_block_chip *block;
1717
1718			block = qed_get_dbg_block_per_chip(p_hwfn,
1719							   (enum block_id)
1720							   block_id);
1721			is_removed =
1722			    GET_FIELD(block->flags, DBG_BLOCK_CHIP_IS_REMOVED);
1723			has_reset_reg =
1724			    GET_FIELD(block->flags,
1725				      DBG_BLOCK_CHIP_HAS_RESET_REG);
1726			unreset_before_dump =
1727			    GET_FIELD(block->flags,
1728				      DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP);
1729
1730			if (!is_removed && has_reset_reg && unreset_before_dump)
1731				reg_val[block->reset_reg_id] |=
1732				    BIT(block->reset_reg_bit_offset);
1733		}
1734
1735		/* Write reset registers */
1736		for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS;
1737		     reset_reg_id++) {
1738			const struct dbg_reset_reg *reset_reg;
1739			u32 reset_reg_addr;
1740
1741			reset_reg = qed_get_dbg_reset_reg(p_hwfn, reset_reg_id);
 
 
 
1742
1743			if (GET_FIELD
1744			    (reset_reg->data, DBG_RESET_REG_IS_REMOVED))
1745				continue;
1746
1747			if (reg_val[reset_reg_id]) {
1748				reset_reg_addr =
1749				    GET_FIELD(reset_reg->data,
1750					      DBG_RESET_REG_ADDR);
1751				qed_wr(p_hwfn,
1752				       p_ptt,
1753				       DWORDS_TO_BYTES(reset_reg_addr) +
1754				       RESET_REG_UNRESET_OFFSET,
1755				       reg_val[reset_reg_id]);
1756			}
1757		}
1758	}
1759}
1760
1761/* Returns the attention block data of the specified block */
1762static const struct dbg_attn_block_type_data *
1763qed_get_block_attn_data(struct qed_hwfn *p_hwfn,
1764			enum block_id block_id, enum dbg_attn_type attn_type)
1765{
1766	const struct dbg_attn_block *base_attn_block_arr =
1767	    (const struct dbg_attn_block *)
1768	    p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
1769
1770	return &base_attn_block_arr[block_id].per_type_data[attn_type];
1771}
1772
1773/* Returns the attention registers of the specified block */
1774static const struct dbg_attn_reg *
1775qed_get_block_attn_regs(struct qed_hwfn *p_hwfn,
1776			enum block_id block_id, enum dbg_attn_type attn_type,
1777			u8 *num_attn_regs)
1778{
1779	const struct dbg_attn_block_type_data *block_type_data =
1780	    qed_get_block_attn_data(p_hwfn, block_id, attn_type);
1781
1782	*num_attn_regs = block_type_data->num_regs;
1783
1784	return (const struct dbg_attn_reg *)
1785		p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr +
1786		block_type_data->regs_offset;
1787}
1788
1789/* For each block, clear the status of all parities */
1790static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
1791				   struct qed_ptt *p_ptt)
1792{
1793	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1794	const struct dbg_attn_reg *attn_reg_arr;
1795	u32 block_id, sts_clr_address;
1796	u8 reg_idx, num_attn_regs;
 
1797
1798	for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
1799		if (dev_data->block_in_reset[block_id])
1800			continue;
1801
1802		attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
1803						       (enum block_id)block_id,
1804						       ATTN_TYPE_PARITY,
1805						       &num_attn_regs);
1806
1807		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
1808			const struct dbg_attn_reg *reg_data =
1809				&attn_reg_arr[reg_idx];
1810			u16 modes_buf_offset;
1811			bool eval_mode;
1812
1813			/* Check mode */
1814			eval_mode = GET_FIELD(reg_data->mode.data,
1815					      DBG_MODE_HDR_EVAL_MODE) > 0;
1816			modes_buf_offset =
1817				GET_FIELD(reg_data->mode.data,
1818					  DBG_MODE_HDR_MODES_BUF_OFFSET);
1819
1820			sts_clr_address = reg_data->sts_clr_address;
1821			/* If Mode match: clear parity status */
1822			if (!eval_mode ||
1823			    qed_is_mode_match(p_hwfn, &modes_buf_offset))
1824				qed_rd(p_hwfn, p_ptt,
1825				       DWORDS_TO_BYTES(sts_clr_address));
 
1826		}
1827	}
1828}
1829
1830/* Finds the meta data image in NVRAM */
1831static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
1832					    struct qed_ptt *p_ptt,
1833					    u32 image_type,
1834					    u32 *nvram_offset_bytes,
1835					    u32 *nvram_size_bytes,
1836					    bool b_can_sleep)
1837{
1838	u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
1839	struct mcp_file_att file_att;
1840	int nvm_result;
1841
1842	/* Call NVRAM get file command */
1843	nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
1844					p_ptt,
1845					DRV_MSG_CODE_NVM_GET_FILE_ATT,
1846					image_type,
1847					&ret_mcp_resp,
1848					&ret_mcp_param,
1849					&ret_txn_size,
1850					(u32 *)&file_att,
1851					b_can_sleep);
1852
1853	/* Check response */
1854	if (nvm_result || (ret_mcp_resp & FW_MSG_CODE_MASK) !=
1855	    FW_MSG_CODE_NVM_OK)
1856		return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
1857
1858	/* Update return values */
1859	*nvram_offset_bytes = file_att.nvm_start_addr;
1860	*nvram_size_bytes = file_att.len;
1861
1862	DP_VERBOSE(p_hwfn,
1863		   QED_MSG_DEBUG,
1864		   "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
1865		   image_type, *nvram_offset_bytes, *nvram_size_bytes);
1866
1867	/* Check alignment */
1868	if (*nvram_size_bytes & 0x3)
1869		return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
1870
1871	return DBG_STATUS_OK;
1872}
1873
1874/* Reads data from NVRAM */
1875static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
1876				      struct qed_ptt *p_ptt,
1877				      u32 nvram_offset_bytes,
1878				      u32 nvram_size_bytes,
1879				      u32 *ret_buf,
1880				      bool b_can_sleep)
1881{
1882	u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
1883	s32 bytes_left = nvram_size_bytes;
1884	u32 read_offset = 0, param = 0;
1885
1886	DP_VERBOSE(p_hwfn,
1887		   QED_MSG_DEBUG,
1888		   "nvram_read: reading image of size %d bytes from NVRAM\n",
1889		   nvram_size_bytes);
1890
1891	do {
1892		bytes_to_copy =
1893		    (bytes_left >
1894		     MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
1895
1896		/* Call NVRAM read command */
1897		SET_MFW_FIELD(param,
1898			      DRV_MB_PARAM_NVM_OFFSET,
1899			      nvram_offset_bytes + read_offset);
1900		SET_MFW_FIELD(param, DRV_MB_PARAM_NVM_LEN, bytes_to_copy);
1901		if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
1902				       DRV_MSG_CODE_NVM_READ_NVRAM, param,
1903				       &ret_mcp_resp,
1904				       &ret_mcp_param, &ret_read_size,
1905				       (u32 *)((u8 *)ret_buf + read_offset),
1906				       b_can_sleep))
1907			return DBG_STATUS_NVRAM_READ_FAILED;
1908
1909		/* Check response */
1910		if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
1911			return DBG_STATUS_NVRAM_READ_FAILED;
1912
1913		/* Update read offset */
1914		read_offset += ret_read_size;
1915		bytes_left -= ret_read_size;
1916	} while (bytes_left > 0);
1917
1918	return DBG_STATUS_OK;
1919}
1920
1921/* Dumps GRC registers section header. Returns the dumped size in dwords.
1922 * the following parameters are dumped:
1923 * - count: no. of dumped entries
1924 * - split_type: split type
1925 * - split_id: split ID (dumped only if split_id != SPLIT_TYPE_NONE)
1926 * - reg_type_name: register type name (dumped only if reg_type_name != NULL)
 
1927 */
1928static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
1929				 bool dump,
1930				 u32 num_reg_entries,
1931				 enum init_split_types split_type,
1932				 u8 split_id, const char *reg_type_name)
 
1933{
1934	u8 num_params = 2 +
1935	    (split_type != SPLIT_TYPE_NONE ? 1 : 0) + (reg_type_name ? 1 : 0);
1936	u32 offset = 0;
1937
1938	offset += qed_dump_section_hdr(dump_buf + offset,
1939				       dump, "grc_regs", num_params);
1940	offset += qed_dump_num_param(dump_buf + offset,
1941				     dump, "count", num_reg_entries);
1942	offset += qed_dump_str_param(dump_buf + offset,
1943				     dump, "split",
1944				     s_split_type_defs[split_type].name);
1945	if (split_type != SPLIT_TYPE_NONE)
1946		offset += qed_dump_num_param(dump_buf + offset,
1947					     dump, "id", split_id);
1948	if (reg_type_name)
1949		offset += qed_dump_str_param(dump_buf + offset,
1950					     dump, "type", reg_type_name);
1951
1952	return offset;
1953}
1954
1955/* Reads the specified registers into the specified buffer.
1956 * The addr and len arguments are specified in dwords.
1957 */
1958void qed_read_regs(struct qed_hwfn *p_hwfn,
1959		   struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len)
1960{
1961	u32 i;
1962
1963	for (i = 0; i < len; i++)
1964		buf[i] = qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i));
1965}
1966
1967/* Dumps the GRC registers in the specified address range.
1968 * Returns the dumped size in dwords.
1969 * The addr and len arguments are specified in dwords.
1970 */
1971static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
1972				   struct qed_ptt *p_ptt,
1973				   u32 *dump_buf,
1974				   bool dump, u32 addr, u32 len, bool wide_bus,
1975				   enum init_split_types split_type,
1976				   u8 split_id)
1977{
1978	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1979	u8 port_id = 0, pf_id = 0, vf_id = 0;
1980	bool read_using_dmae = false;
1981	u32 thresh;
1982	u16 fid;
1983
1984	if (!dump)
1985		return len;
1986
 
 
 
 
 
 
 
 
 
 
 
1987	switch (split_type) {
1988	case SPLIT_TYPE_PORT:
1989		port_id = split_id;
1990		break;
1991	case SPLIT_TYPE_PF:
1992		pf_id = split_id;
1993		break;
1994	case SPLIT_TYPE_PORT_PF:
1995		port_id = split_id / dev_data->num_pfs_per_port;
1996		pf_id = port_id + dev_data->num_ports *
1997		    (split_id % dev_data->num_pfs_per_port);
1998		break;
1999	case SPLIT_TYPE_VF:
2000		vf_id = split_id;
2001		break;
2002	default:
2003		break;
2004	}
2005
2006	/* Try reading using DMAE */
2007	if (dev_data->use_dmae && split_type != SPLIT_TYPE_VF &&
2008	    (len >= s_hw_type_defs[dev_data->hw_type].dmae_thresh ||
2009	     (PROTECT_WIDE_BUS && wide_bus))) {
2010		struct qed_dmae_params dmae_params;
2011
2012		/* Set DMAE params */
2013		memset(&dmae_params, 0, sizeof(dmae_params));
2014		SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 1);
2015		switch (split_type) {
2016		case SPLIT_TYPE_PORT:
2017			SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_PORT_VALID,
2018				  1);
2019			dmae_params.port_id = port_id;
2020			break;
2021		case SPLIT_TYPE_PF:
2022			SET_FIELD(dmae_params.flags,
2023				  QED_DMAE_PARAMS_SRC_PF_VALID, 1);
2024			dmae_params.src_pfid = pf_id;
2025			break;
2026		case SPLIT_TYPE_PORT_PF:
2027			SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_PORT_VALID,
2028				  1);
2029			SET_FIELD(dmae_params.flags,
2030				  QED_DMAE_PARAMS_SRC_PF_VALID, 1);
2031			dmae_params.port_id = port_id;
2032			dmae_params.src_pfid = pf_id;
2033			break;
2034		default:
2035			break;
2036		}
2037
2038		/* Execute DMAE command */
2039		read_using_dmae = !qed_dmae_grc2host(p_hwfn,
2040						     p_ptt,
2041						     DWORDS_TO_BYTES(addr),
2042						     (u64)(uintptr_t)(dump_buf),
2043						     len, &dmae_params);
2044		if (!read_using_dmae) {
2045			dev_data->use_dmae = 0;
2046			DP_VERBOSE(p_hwfn,
2047				   QED_MSG_DEBUG,
2048				   "Failed reading from chip using DMAE, using GRC instead\n");
2049		}
2050	}
2051
2052	if (read_using_dmae)
2053		goto print_log;
2054
2055	/* If not read using DMAE, read using GRC */
2056
2057	/* Set pretend */
2058	if (split_type != dev_data->pretend.split_type ||
2059	    split_id != dev_data->pretend.split_id) {
2060		switch (split_type) {
2061		case SPLIT_TYPE_PORT:
2062			qed_port_pretend(p_hwfn, p_ptt, port_id);
2063			break;
2064		case SPLIT_TYPE_PF:
2065			fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
2066					  pf_id);
2067			qed_fid_pretend(p_hwfn, p_ptt, fid);
2068			break;
2069		case SPLIT_TYPE_PORT_PF:
2070			fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
2071					  pf_id);
2072			qed_port_fid_pretend(p_hwfn, p_ptt, port_id, fid);
2073			break;
2074		case SPLIT_TYPE_VF:
2075			fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFVALID, 1)
2076			      | FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFID,
2077					  vf_id);
2078			qed_fid_pretend(p_hwfn, p_ptt, fid);
2079			break;
2080		default:
2081			break;
2082		}
2083
2084		dev_data->pretend.split_type = (u8)split_type;
2085		dev_data->pretend.split_id = split_id;
2086	}
2087
2088	/* Read registers using GRC */
2089	qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
2090
2091print_log:
2092	/* Print log */
2093	dev_data->num_regs_read += len;
2094	thresh = s_hw_type_defs[dev_data->hw_type].log_thresh;
2095	if ((dev_data->num_regs_read / thresh) >
2096	    ((dev_data->num_regs_read - len) / thresh))
2097		DP_VERBOSE(p_hwfn,
2098			   QED_MSG_DEBUG,
2099			   "Dumped %d registers...\n", dev_data->num_regs_read);
2100
2101	return len;
2102}
2103
2104/* Dumps GRC registers sequence header. Returns the dumped size in dwords.
2105 * The addr and len arguments are specified in dwords.
2106 */
2107static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf,
2108				      bool dump, u32 addr, u32 len)
2109{
2110	if (dump)
2111		*dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
2112
2113	return 1;
2114}
2115
2116/* Dumps GRC registers sequence. Returns the dumped size in dwords.
2117 * The addr and len arguments are specified in dwords.
2118 */
2119static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
2120				  struct qed_ptt *p_ptt,
2121				  u32 *dump_buf,
2122				  bool dump, u32 addr, u32 len, bool wide_bus,
2123				  enum init_split_types split_type, u8 split_id)
2124{
2125	u32 offset = 0;
2126
2127	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
2128	offset += qed_grc_dump_addr_range(p_hwfn,
2129					  p_ptt,
2130					  dump_buf + offset,
2131					  dump, addr, len, wide_bus,
2132					  split_type, split_id);
2133
2134	return offset;
2135}
2136
2137/* Dumps GRC registers sequence with skip cycle.
2138 * Returns the dumped size in dwords.
2139 * - addr:	start GRC address in dwords
2140 * - total_len:	total no. of dwords to dump
2141 * - read_len:	no. consecutive dwords to read
2142 * - skip_len:	no. of dwords to skip (and fill with zeros)
2143 */
2144static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
2145				       struct qed_ptt *p_ptt,
2146				       u32 *dump_buf,
2147				       bool dump,
2148				       u32 addr,
2149				       u32 total_len,
2150				       u32 read_len, u32 skip_len)
2151{
2152	u32 offset = 0, reg_offset = 0;
2153
2154	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
2155
2156	if (!dump)
2157		return offset + total_len;
2158
2159	while (reg_offset < total_len) {
2160		u32 curr_len = min_t(u32, read_len, total_len - reg_offset);
2161
2162		offset += qed_grc_dump_addr_range(p_hwfn,
2163						  p_ptt,
2164						  dump_buf + offset,
2165						  dump,  addr, curr_len, false,
2166						  SPLIT_TYPE_NONE, 0);
2167		reg_offset += curr_len;
2168		addr += curr_len;
2169
2170		if (reg_offset < total_len) {
2171			curr_len = min_t(u32, skip_len, total_len - skip_len);
2172			memset(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
2173			offset += curr_len;
2174			reg_offset += curr_len;
2175			addr += curr_len;
2176		}
2177	}
2178
2179	return offset;
2180}
2181
2182/* Dumps GRC registers entries. Returns the dumped size in dwords. */
2183static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
2184				     struct qed_ptt *p_ptt,
2185				     struct virt_mem_desc input_regs_arr,
2186				     u32 *dump_buf,
2187				     bool dump,
2188				     enum init_split_types split_type,
2189				     u8 split_id,
2190				     bool block_enable[MAX_BLOCK_ID],
2191				     u32 *num_dumped_reg_entries)
2192{
2193	u32 i, offset = 0, input_offset = 0;
2194	bool mode_match = true;
2195
2196	*num_dumped_reg_entries = 0;
2197
2198	while (input_offset < BYTES_TO_DWORDS(input_regs_arr.size)) {
2199		const struct dbg_dump_cond_hdr *cond_hdr =
2200		    (const struct dbg_dump_cond_hdr *)
2201		    input_regs_arr.ptr + input_offset++;
2202		u16 modes_buf_offset;
2203		bool eval_mode;
2204
2205		/* Check mode/block */
2206		eval_mode = GET_FIELD(cond_hdr->mode.data,
2207				      DBG_MODE_HDR_EVAL_MODE) > 0;
2208		if (eval_mode) {
2209			modes_buf_offset =
2210				GET_FIELD(cond_hdr->mode.data,
2211					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2212			mode_match = qed_is_mode_match(p_hwfn,
2213						       &modes_buf_offset);
2214		}
2215
2216		if (!mode_match || !block_enable[cond_hdr->block_id]) {
2217			input_offset += cond_hdr->data_size;
2218			continue;
2219		}
2220
2221		for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
2222			const struct dbg_dump_reg *reg =
2223			    (const struct dbg_dump_reg *)
2224			    input_regs_arr.ptr + input_offset;
2225			u32 addr, len;
2226			bool wide_bus;
2227
2228			addr = GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS);
2229			len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
2230			wide_bus = GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS);
2231			offset += qed_grc_dump_reg_entry(p_hwfn,
2232							 p_ptt,
2233							 dump_buf + offset,
2234							 dump,
2235							 addr,
2236							 len,
2237							 wide_bus,
2238							 split_type, split_id);
2239			(*num_dumped_reg_entries)++;
2240		}
2241	}
2242
2243	return offset;
2244}
2245
2246/* Dumps GRC registers entries. Returns the dumped size in dwords. */
2247static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
2248				   struct qed_ptt *p_ptt,
2249				   struct virt_mem_desc input_regs_arr,
2250				   u32 *dump_buf,
2251				   bool dump,
2252				   bool block_enable[MAX_BLOCK_ID],
2253				   enum init_split_types split_type,
2254				   u8 split_id, const char *reg_type_name)
 
 
2255{
2256	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2257	enum init_split_types hdr_split_type = split_type;
2258	u32 num_dumped_reg_entries, offset;
2259	u8 hdr_split_id = split_id;
2260
2261	/* In PORT_PF split type, print a port split header */
2262	if (split_type == SPLIT_TYPE_PORT_PF) {
2263		hdr_split_type = SPLIT_TYPE_PORT;
2264		hdr_split_id = split_id / dev_data->num_pfs_per_port;
2265	}
2266
2267	/* Calculate register dump header size (and skip it for now) */
2268	offset = qed_grc_dump_regs_hdr(dump_buf,
2269				       false,
2270				       0,
2271				       hdr_split_type,
2272				       hdr_split_id, reg_type_name);
2273
2274	/* Dump registers */
2275	offset += qed_grc_dump_regs_entries(p_hwfn,
2276					    p_ptt,
2277					    input_regs_arr,
2278					    dump_buf + offset,
2279					    dump,
2280					    split_type,
2281					    split_id,
2282					    block_enable,
2283					    &num_dumped_reg_entries);
2284
2285	/* Write register dump header */
2286	if (dump && num_dumped_reg_entries > 0)
2287		qed_grc_dump_regs_hdr(dump_buf,
2288				      dump,
2289				      num_dumped_reg_entries,
2290				      hdr_split_type,
2291				      hdr_split_id, reg_type_name);
2292
2293	return num_dumped_reg_entries > 0 ? offset : 0;
2294}
2295
2296/* Dumps registers according to the input registers array. Returns the dumped
2297 * size in dwords.
2298 */
2299static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
2300				  struct qed_ptt *p_ptt,
2301				  u32 *dump_buf,
2302				  bool dump,
2303				  bool block_enable[MAX_BLOCK_ID],
2304				  const char *reg_type_name)
2305{
2306	struct virt_mem_desc *dbg_buf =
2307	    &p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG];
2308	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2309	u32 offset = 0, input_offset = 0;
2310
2311	while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
 
2312		const struct dbg_dump_split_hdr *split_hdr;
2313		struct virt_mem_desc curr_input_regs_arr;
2314		enum init_split_types split_type;
2315		u16 split_count = 0;
2316		u32 split_data_size;
2317		u8 split_id;
2318
2319		split_hdr =
2320		    (const struct dbg_dump_split_hdr *)
2321		    dbg_buf->ptr + input_offset++;
2322		split_type =
2323		    GET_FIELD(split_hdr->hdr,
2324			      DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2325		split_data_size = GET_FIELD(split_hdr->hdr,
2326					    DBG_DUMP_SPLIT_HDR_DATA_SIZE);
 
2327		curr_input_regs_arr.ptr =
2328		    (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr +
2329		    input_offset;
2330		curr_input_regs_arr.size = DWORDS_TO_BYTES(split_data_size);
2331
2332		switch (split_type) {
2333		case SPLIT_TYPE_NONE:
2334			split_count = 1;
2335			break;
2336		case SPLIT_TYPE_PORT:
2337			split_count = dev_data->num_ports;
2338			break;
2339		case SPLIT_TYPE_PF:
2340		case SPLIT_TYPE_PORT_PF:
2341			split_count = dev_data->num_ports *
2342			    dev_data->num_pfs_per_port;
2343			break;
2344		case SPLIT_TYPE_VF:
2345			split_count = dev_data->num_vfs;
2346			break;
2347		default:
2348			return 0;
2349		}
2350
2351		for (split_id = 0; split_id < split_count; split_id++)
2352			offset += qed_grc_dump_split_data(p_hwfn, p_ptt,
2353							  curr_input_regs_arr,
2354							  dump_buf + offset,
2355							  dump, block_enable,
2356							  split_type,
2357							  split_id,
2358							  reg_type_name);
 
2359
2360		input_offset += split_data_size;
2361	}
2362
2363	/* Cancel pretends (pretend to original PF) */
2364	if (dump) {
2365		qed_fid_pretend(p_hwfn, p_ptt,
2366				FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
2367					    p_hwfn->rel_pf_id));
2368		dev_data->pretend.split_type = SPLIT_TYPE_NONE;
2369		dev_data->pretend.split_id = 0;
2370	}
2371
2372	return offset;
2373}
2374
2375/* Dump reset registers. Returns the dumped size in dwords. */
2376static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
2377				   struct qed_ptt *p_ptt,
2378				   u32 *dump_buf, bool dump)
2379{
2380	u32 offset = 0, num_regs = 0;
2381	u8 reset_reg_id;
2382
2383	/* Calculate header size */
2384	offset += qed_grc_dump_regs_hdr(dump_buf,
2385					false,
2386					0, SPLIT_TYPE_NONE, 0, "RESET_REGS");
2387
2388	/* Write reset registers */
2389	for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS;
2390	     reset_reg_id++) {
2391		const struct dbg_reset_reg *reset_reg;
2392		u32 reset_reg_addr;
2393
2394		reset_reg = qed_get_dbg_reset_reg(p_hwfn, reset_reg_id);
2395
2396		if (GET_FIELD(reset_reg->data, DBG_RESET_REG_IS_REMOVED))
2397			continue;
2398
2399		reset_reg_addr = GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR);
2400		offset += qed_grc_dump_reg_entry(p_hwfn,
2401						 p_ptt,
2402						 dump_buf + offset,
2403						 dump,
2404						 reset_reg_addr,
2405						 1, false, SPLIT_TYPE_NONE, 0);
 
2406		num_regs++;
2407	}
2408
2409	/* Write header */
2410	if (dump)
2411		qed_grc_dump_regs_hdr(dump_buf,
2412				      true, num_regs, SPLIT_TYPE_NONE,
2413				      0, "RESET_REGS");
2414
2415	return offset;
2416}
2417
2418/* Dump registers that are modified during GRC Dump and therefore must be
2419 * dumped first. Returns the dumped size in dwords.
2420 */
2421static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
2422				      struct qed_ptt *p_ptt,
2423				      u32 *dump_buf, bool dump)
2424{
2425	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2426	u32 block_id, offset = 0, stall_regs_offset;
2427	const struct dbg_attn_reg *attn_reg_arr;
2428	u8 storm_id, reg_idx, num_attn_regs;
2429	u32 num_reg_entries = 0;
2430
2431	/* Write empty header for attention registers */
2432	offset += qed_grc_dump_regs_hdr(dump_buf,
2433					false,
2434					0, SPLIT_TYPE_NONE, 0, "ATTN_REGS");
2435
2436	/* Write parity registers */
2437	for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
2438		if (dev_data->block_in_reset[block_id] && dump)
2439			continue;
2440
2441		attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
2442						       (enum block_id)block_id,
2443						       ATTN_TYPE_PARITY,
2444						       &num_attn_regs);
2445
2446		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2447			const struct dbg_attn_reg *reg_data =
2448				&attn_reg_arr[reg_idx];
2449			u16 modes_buf_offset;
2450			bool eval_mode;
2451			u32 addr;
2452
2453			/* Check mode */
2454			eval_mode = GET_FIELD(reg_data->mode.data,
2455					      DBG_MODE_HDR_EVAL_MODE) > 0;
2456			modes_buf_offset =
2457				GET_FIELD(reg_data->mode.data,
2458					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2459			if (eval_mode &&
2460			    !qed_is_mode_match(p_hwfn, &modes_buf_offset))
2461				continue;
2462
2463			/* Mode match: read & dump registers */
2464			addr = reg_data->mask_address;
2465			offset += qed_grc_dump_reg_entry(p_hwfn,
2466							 p_ptt,
2467							 dump_buf + offset,
2468							 dump,
2469							 addr,
2470							 1, false,
2471							 SPLIT_TYPE_NONE, 0);
2472			addr = GET_FIELD(reg_data->data,
2473					 DBG_ATTN_REG_STS_ADDRESS);
2474			offset += qed_grc_dump_reg_entry(p_hwfn,
2475							 p_ptt,
2476							 dump_buf + offset,
2477							 dump,
2478							 addr,
2479							 1, false,
2480							 SPLIT_TYPE_NONE, 0);
2481			num_reg_entries += 2;
2482		}
2483	}
2484
2485	/* Overwrite header for attention registers */
2486	if (dump)
2487		qed_grc_dump_regs_hdr(dump_buf,
2488				      true,
2489				      num_reg_entries,
2490				      SPLIT_TYPE_NONE, 0, "ATTN_REGS");
2491
2492	/* Write empty header for stall registers */
2493	stall_regs_offset = offset;
2494	offset += qed_grc_dump_regs_hdr(dump_buf,
2495					false, 0, SPLIT_TYPE_NONE, 0, "REGS");
2496
2497	/* Write Storm stall status registers */
2498	for (storm_id = 0, num_reg_entries = 0; storm_id < MAX_DBG_STORMS;
2499	     storm_id++) {
2500		struct storm_defs *storm = &s_storm_defs[storm_id];
2501		u32 addr;
2502
2503		if (dev_data->block_in_reset[storm->sem_block_id] && dump)
2504			continue;
2505
2506		addr =
2507		    BYTES_TO_DWORDS(storm->sem_fast_mem_addr +
2508				    SEM_FAST_REG_STALLED);
2509		offset += qed_grc_dump_reg_entry(p_hwfn,
2510						 p_ptt,
2511						 dump_buf + offset,
2512						 dump,
2513						 addr,
2514						 1,
2515						 false, SPLIT_TYPE_NONE, 0);
2516		num_reg_entries++;
2517	}
2518
2519	/* Overwrite header for stall registers */
2520	if (dump)
2521		qed_grc_dump_regs_hdr(dump_buf + stall_regs_offset,
2522				      true,
2523				      num_reg_entries,
2524				      SPLIT_TYPE_NONE, 0, "REGS");
2525
2526	return offset;
2527}
2528
2529/* Dumps registers that can't be represented in the debug arrays */
2530static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
2531				     struct qed_ptt *p_ptt,
2532				     u32 *dump_buf, bool dump)
2533{
2534	u32 offset = 0, addr;
2535
2536	offset += qed_grc_dump_regs_hdr(dump_buf,
2537					dump, 2, SPLIT_TYPE_NONE, 0, "REGS");
 
2538
2539	/* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
2540	 * skipped).
2541	 */
2542	addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO);
2543	offset += qed_grc_dump_reg_entry_skip(p_hwfn,
2544					      p_ptt,
2545					      dump_buf + offset,
2546					      dump,
2547					      addr,
2548					      RDIF_REG_DEBUG_ERROR_INFO_SIZE,
2549					      7,
2550					      1);
2551	addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO);
2552	offset +=
2553	    qed_grc_dump_reg_entry_skip(p_hwfn,
2554					p_ptt,
2555					dump_buf + offset,
2556					dump,
2557					addr,
2558					TDIF_REG_DEBUG_ERROR_INFO_SIZE,
2559					7,
2560					1);
2561
2562	return offset;
2563}
2564
2565/* Dumps a GRC memory header (section and params). Returns the dumped size in
2566 * dwords. The following parameters are dumped:
2567 * - name:	   dumped only if it's not NULL.
2568 * - addr:	   in dwords, dumped only if name is NULL.
2569 * - len:	   in dwords, always dumped.
2570 * - width:	   dumped if it's not zero.
2571 * - packed:	   dumped only if it's not false.
2572 * - mem_group:	   always dumped.
2573 * - is_storm:	   true only if the memory is related to a Storm.
2574 * - storm_letter: valid only if is_storm is true.
2575 *
2576 */
2577static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
2578				u32 *dump_buf,
2579				bool dump,
2580				const char *name,
2581				u32 addr,
2582				u32 len,
2583				u32 bit_width,
2584				bool packed,
2585				const char *mem_group, char storm_letter)
 
2586{
2587	u8 num_params = 3;
2588	u32 offset = 0;
2589	char buf[64];
2590
2591	if (!len)
2592		DP_NOTICE(p_hwfn,
2593			  "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
2594
2595	if (bit_width)
2596		num_params++;
2597	if (packed)
2598		num_params++;
2599
2600	/* Dump section header */
2601	offset += qed_dump_section_hdr(dump_buf + offset,
2602				       dump, "grc_mem", num_params);
2603
2604	if (name) {
2605		/* Dump name */
2606		if (storm_letter) {
2607			strcpy(buf, "?STORM_");
2608			buf[0] = storm_letter;
2609			strcpy(buf + strlen(buf), name);
2610		} else {
2611			strcpy(buf, name);
2612		}
2613
2614		offset += qed_dump_str_param(dump_buf + offset,
2615					     dump, "name", buf);
2616	} else {
2617		/* Dump address */
2618		u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
2619
2620		offset += qed_dump_num_param(dump_buf + offset,
2621					     dump, "addr", addr_in_bytes);
2622	}
2623
2624	/* Dump len */
2625	offset += qed_dump_num_param(dump_buf + offset, dump, "len", len);
2626
2627	/* Dump bit width */
2628	if (bit_width)
2629		offset += qed_dump_num_param(dump_buf + offset,
2630					     dump, "width", bit_width);
2631
2632	/* Dump packed */
2633	if (packed)
2634		offset += qed_dump_num_param(dump_buf + offset,
2635					     dump, "packed", 1);
2636
2637	/* Dump reg type */
2638	if (storm_letter) {
2639		strcpy(buf, "?STORM_");
2640		buf[0] = storm_letter;
2641		strcpy(buf + strlen(buf), mem_group);
2642	} else {
2643		strcpy(buf, mem_group);
2644	}
2645
2646	offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf);
2647
2648	return offset;
2649}
2650
2651/* Dumps a single GRC memory. If name is NULL, the memory is stored by address.
2652 * Returns the dumped size in dwords.
2653 * The addr and len arguments are specified in dwords.
2654 */
2655static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
2656			    struct qed_ptt *p_ptt,
2657			    u32 *dump_buf,
2658			    bool dump,
2659			    const char *name,
2660			    u32 addr,
2661			    u32 len,
2662			    bool wide_bus,
2663			    u32 bit_width,
2664			    bool packed,
2665			    const char *mem_group, char storm_letter)
 
2666{
2667	u32 offset = 0;
2668
2669	offset += qed_grc_dump_mem_hdr(p_hwfn,
2670				       dump_buf + offset,
2671				       dump,
2672				       name,
2673				       addr,
2674				       len,
2675				       bit_width,
2676				       packed, mem_group, storm_letter);
 
2677	offset += qed_grc_dump_addr_range(p_hwfn,
2678					  p_ptt,
2679					  dump_buf + offset,
2680					  dump, addr, len, wide_bus,
2681					  SPLIT_TYPE_NONE, 0);
2682
2683	return offset;
2684}
2685
2686/* Dumps GRC memories entries. Returns the dumped size in dwords. */
2687static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
2688				    struct qed_ptt *p_ptt,
2689				    struct virt_mem_desc input_mems_arr,
2690				    u32 *dump_buf, bool dump)
2691{
2692	u32 i, offset = 0, input_offset = 0;
2693	bool mode_match = true;
2694
2695	while (input_offset < BYTES_TO_DWORDS(input_mems_arr.size)) {
2696		const struct dbg_dump_cond_hdr *cond_hdr;
2697		u16 modes_buf_offset;
2698		u32 num_entries;
2699		bool eval_mode;
2700
2701		cond_hdr =
2702		    (const struct dbg_dump_cond_hdr *)input_mems_arr.ptr +
2703		    input_offset++;
2704		num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
2705
2706		/* Check required mode */
2707		eval_mode = GET_FIELD(cond_hdr->mode.data,
2708				      DBG_MODE_HDR_EVAL_MODE) > 0;
2709		if (eval_mode) {
2710			modes_buf_offset =
2711				GET_FIELD(cond_hdr->mode.data,
2712					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2713			mode_match = qed_is_mode_match(p_hwfn,
2714						       &modes_buf_offset);
2715		}
2716
2717		if (!mode_match) {
2718			input_offset += cond_hdr->data_size;
2719			continue;
2720		}
2721
2722		for (i = 0; i < num_entries;
2723		     i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
2724			const struct dbg_dump_mem *mem =
2725			    (const struct dbg_dump_mem *)((u32 *)
2726							  input_mems_arr.ptr
2727							  + input_offset);
2728			const struct dbg_block *block;
2729			char storm_letter = 0;
 
 
 
2730			u32 mem_addr, mem_len;
2731			bool mem_wide_bus;
2732			u8 mem_group_id;
2733
2734			mem_group_id = GET_FIELD(mem->dword0,
2735						 DBG_DUMP_MEM_MEM_GROUP_ID);
2736			if (mem_group_id >= MEM_GROUPS_NUM) {
2737				DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
2738				return 0;
2739			}
2740
 
2741			if (!qed_grc_is_mem_included(p_hwfn,
2742						     (enum block_id)
2743						     cond_hdr->block_id,
2744						     mem_group_id))
2745				continue;
2746
2747			mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS);
2748			mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH);
2749			mem_wide_bus = GET_FIELD(mem->dword1,
2750						 DBG_DUMP_MEM_WIDE_BUS);
2751
2752			block = get_dbg_block(p_hwfn,
2753					      cond_hdr->block_id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2754
2755			/* If memory is associated with Storm,
2756			 * update storm details
 
 
 
 
 
2757			 */
2758			if (block->associated_storm_letter)
2759				storm_letter = block->associated_storm_letter;
 
 
 
 
 
 
2760
2761			/* Dump memory */
2762			offset += qed_grc_dump_mem(p_hwfn,
2763						p_ptt,
2764						dump_buf + offset,
2765						dump,
2766						NULL,
2767						mem_addr,
2768						mem_len,
2769						mem_wide_bus,
2770						0,
2771						false,
2772						s_mem_group_names[mem_group_id],
 
2773						storm_letter);
2774		}
2775	}
2776
2777	return offset;
2778}
2779
2780/* Dumps GRC memories according to the input array dump_mem.
2781 * Returns the dumped size in dwords.
2782 */
2783static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
2784				 struct qed_ptt *p_ptt,
2785				 u32 *dump_buf, bool dump)
2786{
2787	struct virt_mem_desc *dbg_buf =
2788	    &p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_MEM];
2789	u32 offset = 0, input_offset = 0;
2790
2791	while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
 
2792		const struct dbg_dump_split_hdr *split_hdr;
2793		struct virt_mem_desc curr_input_mems_arr;
2794		enum init_split_types split_type;
2795		u32 split_data_size;
2796
2797		split_hdr =
2798		    (const struct dbg_dump_split_hdr *)dbg_buf->ptr +
2799		    input_offset++;
2800		split_type = GET_FIELD(split_hdr->hdr,
2801				       DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2802		split_data_size = GET_FIELD(split_hdr->hdr,
2803					    DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2804		curr_input_mems_arr.ptr = (u32 *)dbg_buf->ptr + input_offset;
2805		curr_input_mems_arr.size = DWORDS_TO_BYTES(split_data_size);
 
 
2806
2807		if (split_type == SPLIT_TYPE_NONE)
2808			offset += qed_grc_dump_mem_entries(p_hwfn,
2809							   p_ptt,
2810							   curr_input_mems_arr,
2811							   dump_buf + offset,
2812							   dump);
2813		else
2814			DP_NOTICE(p_hwfn,
2815				  "Dumping split memories is currently not supported\n");
2816
2817		input_offset += split_data_size;
2818	}
2819
2820	return offset;
2821}
2822
2823/* Dumps GRC context data for the specified Storm.
2824 * Returns the dumped size in dwords.
2825 * The lid_size argument is specified in quad-regs.
2826 */
2827static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
2828				 struct qed_ptt *p_ptt,
2829				 u32 *dump_buf,
2830				 bool dump,
2831				 const char *name,
2832				 u32 num_lids,
2833				 enum cm_ctx_types ctx_type, u8 storm_id)
 
 
2834{
2835	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2836	struct storm_defs *storm = &s_storm_defs[storm_id];
2837	u32 i, lid, lid_size, total_size;
2838	u32 rd_reg_addr, offset = 0;
2839
2840	/* Convert quad-regs to dwords */
2841	lid_size = storm->cm_ctx_lid_sizes[dev_data->chip_id][ctx_type] * 4;
2842
2843	if (!lid_size)
2844		return 0;
2845
 
2846	total_size = num_lids * lid_size;
2847
2848	offset += qed_grc_dump_mem_hdr(p_hwfn,
2849				       dump_buf + offset,
2850				       dump,
2851				       name,
2852				       0,
2853				       total_size,
2854				       lid_size * 32,
2855				       false, name, storm->letter);
2856
2857	if (!dump)
2858		return offset + total_size;
2859
2860	rd_reg_addr = BYTES_TO_DWORDS(storm->cm_ctx_rd_addr[ctx_type]);
2861
2862	/* Dump context data */
2863	for (lid = 0; lid < num_lids; lid++) {
2864		for (i = 0; i < lid_size; i++) {
2865			qed_wr(p_hwfn,
2866			       p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
2867			offset += qed_grc_dump_addr_range(p_hwfn,
2868							  p_ptt,
2869							  dump_buf + offset,
2870							  dump,
2871							  rd_reg_addr,
2872							  1,
2873							  false,
2874							  SPLIT_TYPE_NONE, 0);
2875		}
2876	}
2877
2878	return offset;
2879}
2880
2881/* Dumps GRC contexts. Returns the dumped size in dwords. */
2882static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
2883			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
2884{
 
2885	u32 offset = 0;
2886	u8 storm_id;
2887
2888	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
 
 
2889		if (!qed_grc_is_storm_included(p_hwfn,
2890					       (enum dbg_storms)storm_id))
2891			continue;
2892
2893		/* Dump Conn AG context size */
2894		offset += qed_grc_dump_ctx_data(p_hwfn,
2895						p_ptt,
2896						dump_buf + offset,
2897						dump,
2898						"CONN_AG_CTX",
2899						NUM_OF_LCIDS,
2900						CM_CTX_CONN_AG, storm_id);
 
 
 
 
 
2901
2902		/* Dump Conn ST context size */
2903		offset += qed_grc_dump_ctx_data(p_hwfn,
2904						p_ptt,
2905						dump_buf + offset,
2906						dump,
2907						"CONN_ST_CTX",
2908						NUM_OF_LCIDS,
2909						CM_CTX_CONN_ST, storm_id);
 
 
 
 
 
2910
2911		/* Dump Task AG context size */
2912		offset += qed_grc_dump_ctx_data(p_hwfn,
2913						p_ptt,
2914						dump_buf + offset,
2915						dump,
2916						"TASK_AG_CTX",
2917						NUM_OF_LTIDS,
2918						CM_CTX_TASK_AG, storm_id);
 
 
 
 
 
2919
2920		/* Dump Task ST context size */
2921		offset += qed_grc_dump_ctx_data(p_hwfn,
2922						p_ptt,
2923						dump_buf + offset,
2924						dump,
2925						"TASK_ST_CTX",
2926						NUM_OF_LTIDS,
2927						CM_CTX_TASK_ST, storm_id);
 
 
 
 
 
2928	}
2929
2930	return offset;
2931}
2932
2933#define VFC_STATUS_RESP_READY_BIT	0
2934#define VFC_STATUS_BUSY_BIT		1
2935#define VFC_STATUS_SENDING_CMD_BIT	2
2936
2937#define VFC_POLLING_DELAY_MS	1
2938#define VFC_POLLING_COUNT		20
2939
2940/* Reads data from VFC. Returns the number of dwords read (0 on error).
2941 * Sizes are specified in dwords.
2942 */
2943static u32 qed_grc_dump_read_from_vfc(struct qed_hwfn *p_hwfn,
2944				      struct qed_ptt *p_ptt,
2945				      struct storm_defs *storm,
2946				      u32 *cmd_data,
2947				      u32 cmd_size,
2948				      u32 *addr_data,
2949				      u32 addr_size,
2950				      u32 resp_size, u32 *dump_buf)
2951{
2952	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2953	u32 vfc_status, polling_ms, polling_count = 0, i;
2954	u32 reg_addr, sem_base;
2955	bool is_ready = false;
2956
2957	sem_base = storm->sem_fast_mem_addr;
2958	polling_ms = VFC_POLLING_DELAY_MS *
2959	    s_hw_type_defs[dev_data->hw_type].delay_factor;
2960
2961	/* Write VFC command */
2962	ARR_REG_WR(p_hwfn,
2963		   p_ptt,
2964		   sem_base + SEM_FAST_REG_VFC_DATA_WR,
2965		   cmd_data, cmd_size);
2966
2967	/* Write VFC address */
2968	ARR_REG_WR(p_hwfn,
2969		   p_ptt,
2970		   sem_base + SEM_FAST_REG_VFC_ADDR,
2971		   addr_data, addr_size);
2972
2973	/* Read response */
2974	for (i = 0; i < resp_size; i++) {
2975		/* Poll until ready */
2976		do {
2977			reg_addr = sem_base + SEM_FAST_REG_VFC_STATUS;
2978			qed_grc_dump_addr_range(p_hwfn,
2979						p_ptt,
2980						&vfc_status,
2981						true,
2982						BYTES_TO_DWORDS(reg_addr),
2983						1,
2984						false, SPLIT_TYPE_NONE, 0);
2985			is_ready = vfc_status & BIT(VFC_STATUS_RESP_READY_BIT);
2986
2987			if (!is_ready) {
2988				if (polling_count++ == VFC_POLLING_COUNT)
2989					return 0;
2990
2991				msleep(polling_ms);
2992			}
2993		} while (!is_ready);
2994
2995		reg_addr = sem_base + SEM_FAST_REG_VFC_DATA_RD;
2996		qed_grc_dump_addr_range(p_hwfn,
2997					p_ptt,
2998					dump_buf + i,
2999					true,
3000					BYTES_TO_DWORDS(reg_addr),
3001					1, false, SPLIT_TYPE_NONE, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
3002	}
3003
3004	return resp_size;
3005}
3006
3007/* Dump VFC CAM. Returns the dumped size in dwords. */
3008static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
3009				struct qed_ptt *p_ptt,
3010				u32 *dump_buf, bool dump, u8 storm_id)
3011{
3012	u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
3013	struct storm_defs *storm = &s_storm_defs[storm_id];
3014	u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
3015	u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
3016	u32 row, offset = 0;
3017
3018	offset += qed_grc_dump_mem_hdr(p_hwfn,
3019				       dump_buf + offset,
3020				       dump,
3021				       "vfc_cam",
3022				       0,
3023				       total_size,
3024				       256,
3025				       false, "vfc_cam", storm->letter);
3026
3027	if (!dump)
3028		return offset + total_size;
3029
3030	/* Prepare CAM address */
3031	SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
3032
3033	/* Read VFC CAM data */
3034	for (row = 0; row < VFC_CAM_NUM_ROWS; row++) {
 
3035		SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
3036		offset += qed_grc_dump_read_from_vfc(p_hwfn,
3037						     p_ptt,
3038						     storm,
3039						     cam_cmd,
3040						     VFC_CAM_CMD_DWORDS,
3041						     cam_addr,
3042						     VFC_CAM_ADDR_DWORDS,
3043						     VFC_CAM_RESP_DWORDS,
3044						     dump_buf + offset);
 
 
 
 
 
 
 
3045	}
3046
3047	return offset;
3048}
3049
3050/* Dump VFC RAM. Returns the dumped size in dwords. */
3051static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
3052				struct qed_ptt *p_ptt,
3053				u32 *dump_buf,
3054				bool dump,
3055				u8 storm_id, struct vfc_ram_defs *ram_defs)
3056{
3057	u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
3058	struct storm_defs *storm = &s_storm_defs[storm_id];
3059	u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
3060	u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
3061	u32 row, offset = 0;
3062
3063	offset += qed_grc_dump_mem_hdr(p_hwfn,
3064				       dump_buf + offset,
3065				       dump,
3066				       ram_defs->mem_name,
3067				       0,
3068				       total_size,
3069				       256,
3070				       false,
3071				       ram_defs->type_name,
3072				       storm->letter);
3073
3074	if (!dump)
3075		return offset + total_size;
3076
3077	/* Prepare RAM address */
3078	SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
3079
3080	/* Read VFC RAM data */
 
 
3081	for (row = ram_defs->base_row;
3082	     row < ram_defs->base_row + ram_defs->num_rows; row++) {
 
 
 
 
 
 
 
 
3083		SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
3084		offset += qed_grc_dump_read_from_vfc(p_hwfn,
3085						     p_ptt,
3086						     storm,
3087						     ram_cmd,
3088						     VFC_RAM_CMD_DWORDS,
3089						     ram_addr,
3090						     VFC_RAM_ADDR_DWORDS,
3091						     VFC_RAM_RESP_DWORDS,
3092						     dump_buf + offset);
 
3093	}
3094
3095	return offset;
3096}
3097
3098/* Dumps GRC VFC data. Returns the dumped size in dwords. */
3099static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn,
3100			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3101{
 
3102	u8 storm_id, i;
3103	u32 offset = 0;
3104
3105	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3106		if (!qed_grc_is_storm_included(p_hwfn,
3107					       (enum dbg_storms)storm_id) ||
3108		    !s_storm_defs[storm_id].has_vfc)
 
 
3109			continue;
3110
3111		/* Read CAM */
3112		offset += qed_grc_dump_vfc_cam(p_hwfn,
3113					       p_ptt,
3114					       dump_buf + offset,
3115					       dump, storm_id);
3116
3117		/* Read RAM */
3118		for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
3119			offset += qed_grc_dump_vfc_ram(p_hwfn,
3120						       p_ptt,
3121						       dump_buf + offset,
3122						       dump,
3123						       storm_id,
3124						       &s_vfc_ram_defs[i]);
3125	}
3126
3127	return offset;
3128}
3129
3130/* Dumps GRC RSS data. Returns the dumped size in dwords. */
3131static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
3132			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3133{
3134	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3135	u32 offset = 0;
3136	u8 rss_mem_id;
3137
3138	for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
3139		u32 rss_addr, num_entries, total_dwords;
3140		struct rss_mem_defs *rss_defs;
3141		u32 addr, num_dwords_to_read;
3142		bool packed;
3143
3144		rss_defs = &s_rss_mem_defs[rss_mem_id];
3145		rss_addr = rss_defs->addr;
3146		num_entries = rss_defs->num_entries[dev_data->chip_id];
3147		total_dwords = (num_entries * rss_defs->entry_width) / 32;
3148		packed = (rss_defs->entry_width == 16);
3149
3150		offset += qed_grc_dump_mem_hdr(p_hwfn,
3151					       dump_buf + offset,
3152					       dump,
3153					       rss_defs->mem_name,
3154					       0,
3155					       total_dwords,
3156					       rss_defs->entry_width,
3157					       packed,
3158					       rss_defs->type_name, 0);
3159
3160		/* Dump RSS data */
3161		if (!dump) {
3162			offset += total_dwords;
3163			continue;
3164		}
3165
3166		addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
3167		while (total_dwords) {
3168			num_dwords_to_read = min_t(u32,
3169						   RSS_REG_RSS_RAM_DATA_SIZE,
3170						   total_dwords);
3171			qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
3172			offset += qed_grc_dump_addr_range(p_hwfn,
3173							  p_ptt,
3174							  dump_buf + offset,
3175							  dump,
3176							  addr,
3177							  num_dwords_to_read,
3178							  false,
3179							  SPLIT_TYPE_NONE, 0);
3180			total_dwords -= num_dwords_to_read;
3181			rss_addr++;
3182		}
3183	}
3184
3185	return offset;
3186}
3187
3188/* Dumps GRC Big RAM. Returns the dumped size in dwords. */
3189static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
3190				struct qed_ptt *p_ptt,
3191				u32 *dump_buf, bool dump, u8 big_ram_id)
3192{
3193	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3194	u32 block_size, ram_size, offset = 0, reg_val, i;
3195	char mem_name[12] = "???_BIG_RAM";
3196	char type_name[8] = "???_RAM";
3197	struct big_ram_defs *big_ram;
3198
3199	big_ram = &s_big_ram_defs[big_ram_id];
3200	ram_size = big_ram->ram_size[dev_data->chip_id];
3201
3202	reg_val = qed_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr);
3203	block_size = reg_val &
3204		     BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256
3205									 : 128;
3206
3207	memcpy(type_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3208	memcpy(mem_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3209
3210	/* Dump memory header */
3211	offset += qed_grc_dump_mem_hdr(p_hwfn,
3212				       dump_buf + offset,
3213				       dump,
3214				       mem_name,
3215				       0,
3216				       ram_size,
3217				       block_size * 8,
3218				       false, type_name, 0);
3219
3220	/* Read and dump Big RAM data */
3221	if (!dump)
3222		return offset + ram_size;
3223
3224	/* Dump Big RAM */
3225	for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE);
3226	     i++) {
3227		u32 addr, len;
3228
3229		qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
3230		addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
3231		len = BRB_REG_BIG_RAM_DATA_SIZE;
3232		offset += qed_grc_dump_addr_range(p_hwfn,
3233						  p_ptt,
3234						  dump_buf + offset,
3235						  dump,
3236						  addr,
3237						  len,
3238						  false, SPLIT_TYPE_NONE, 0);
3239	}
3240
3241	return offset;
3242}
3243
3244/* Dumps MCP scratchpad. Returns the dumped size in dwords. */
3245static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
3246			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3247{
3248	bool block_enable[MAX_BLOCK_ID] = { 0 };
3249	u32 offset = 0, addr;
3250	bool halted = false;
3251
3252	/* Halt MCP */
3253	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3254		halted = !qed_mcp_halt(p_hwfn, p_ptt);
3255		if (!halted)
3256			DP_NOTICE(p_hwfn, "MCP halt failed!\n");
3257	}
3258
3259	/* Dump MCP scratchpad */
3260	offset += qed_grc_dump_mem(p_hwfn,
3261				   p_ptt,
3262				   dump_buf + offset,
3263				   dump,
3264				   NULL,
3265				   BYTES_TO_DWORDS(MCP_REG_SCRATCH),
3266				   MCP_REG_SCRATCH_SIZE,
3267				   false, 0, false, "MCP", 0);
3268
3269	/* Dump MCP cpu_reg_file */
3270	offset += qed_grc_dump_mem(p_hwfn,
3271				   p_ptt,
3272				   dump_buf + offset,
3273				   dump,
3274				   NULL,
3275				   BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
3276				   MCP_REG_CPU_REG_FILE_SIZE,
3277				   false, 0, false, "MCP", 0);
3278
3279	/* Dump MCP registers */
3280	block_enable[BLOCK_MCP] = true;
3281	offset += qed_grc_dump_registers(p_hwfn,
3282					 p_ptt,
3283					 dump_buf + offset,
3284					 dump, block_enable, "MCP");
3285
3286	/* Dump required non-MCP registers */
3287	offset += qed_grc_dump_regs_hdr(dump_buf + offset,
3288					dump, 1, SPLIT_TYPE_NONE, 0,
3289					"MCP");
3290	addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
3291	offset += qed_grc_dump_reg_entry(p_hwfn,
3292					 p_ptt,
3293					 dump_buf + offset,
3294					 dump,
3295					 addr,
3296					 1,
3297					 false, SPLIT_TYPE_NONE, 0);
3298
3299	/* Release MCP */
3300	if (halted && qed_mcp_resume(p_hwfn, p_ptt))
3301		DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
3302
3303	return offset;
3304}
3305
3306/* Dumps the tbus indirect memory for all PHYs.
3307 * Returns the dumped size in dwords.
3308 */
3309static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
3310			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3311{
3312	u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
3313	char mem_name[32];
3314	u8 phy_id;
3315
3316	for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) {
3317		u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr;
3318		struct phy_defs *phy_defs;
3319		u8 *bytes_buf;
3320
3321		phy_defs = &s_phy_defs[phy_id];
3322		addr_lo_addr = phy_defs->base_addr +
3323			       phy_defs->tbus_addr_lo_addr;
3324		addr_hi_addr = phy_defs->base_addr +
3325			       phy_defs->tbus_addr_hi_addr;
3326		data_lo_addr = phy_defs->base_addr +
3327			       phy_defs->tbus_data_lo_addr;
3328		data_hi_addr = phy_defs->base_addr +
3329			       phy_defs->tbus_data_hi_addr;
3330
3331		if (snprintf(mem_name, sizeof(mem_name), "tbus_%s",
3332			     phy_defs->phy_name) < 0)
3333			DP_NOTICE(p_hwfn,
3334				  "Unexpected debug error: invalid PHY memory name\n");
3335
3336		offset += qed_grc_dump_mem_hdr(p_hwfn,
3337					       dump_buf + offset,
3338					       dump,
3339					       mem_name,
3340					       0,
3341					       PHY_DUMP_SIZE_DWORDS,
3342					       16, true, mem_name, 0);
3343
3344		if (!dump) {
3345			offset += PHY_DUMP_SIZE_DWORDS;
3346			continue;
3347		}
3348
3349		bytes_buf = (u8 *)(dump_buf + offset);
3350		for (tbus_hi_offset = 0;
3351		     tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
3352		     tbus_hi_offset++) {
3353			qed_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset);
3354			for (tbus_lo_offset = 0; tbus_lo_offset < 256;
3355			     tbus_lo_offset++) {
3356				qed_wr(p_hwfn,
3357				       p_ptt, addr_lo_addr, tbus_lo_offset);
3358				*(bytes_buf++) = (u8)qed_rd(p_hwfn,
3359							    p_ptt,
3360							    data_lo_addr);
3361				*(bytes_buf++) = (u8)qed_rd(p_hwfn,
3362							    p_ptt,
3363							    data_hi_addr);
3364			}
3365		}
3366
3367		offset += PHY_DUMP_SIZE_DWORDS;
3368	}
3369
3370	return offset;
3371}
3372
3373/* Dumps the MCP HW dump from NVRAM. Returns the dumped size in dwords. */
3374static u32 qed_grc_dump_mcp_hw_dump(struct qed_hwfn *p_hwfn,
3375				    struct qed_ptt *p_ptt,
3376				    u32 *dump_buf, bool dump)
3377{
3378	u32 hw_dump_offset_bytes = 0, hw_dump_size_bytes = 0;
3379	u32 hw_dump_size_dwords = 0, offset = 0;
3380	enum dbg_status status;
3381
3382	/* Read HW dump image from NVRAM */
3383	status = qed_find_nvram_image(p_hwfn,
3384				      p_ptt,
3385				      NVM_TYPE_HW_DUMP_OUT,
3386				      &hw_dump_offset_bytes,
3387				      &hw_dump_size_bytes,
3388				      false);
3389	if (status != DBG_STATUS_OK)
3390		return 0;
3391
3392	hw_dump_size_dwords = BYTES_TO_DWORDS(hw_dump_size_bytes);
3393
3394	/* Dump HW dump image section */
3395	offset += qed_dump_section_hdr(dump_buf + offset,
3396				       dump, "mcp_hw_dump", 1);
3397	offset += qed_dump_num_param(dump_buf + offset,
3398				     dump, "size", hw_dump_size_dwords);
3399
3400	/* Read MCP HW dump image into dump buffer */
3401	if (dump && hw_dump_size_dwords) {
3402		status = qed_nvram_read(p_hwfn,
3403					p_ptt,
3404					hw_dump_offset_bytes,
3405					hw_dump_size_bytes,
3406					dump_buf + offset,
3407					false);
3408		if (status != DBG_STATUS_OK) {
3409			DP_NOTICE(p_hwfn,
3410				  "Failed to read MCP HW Dump image from NVRAM\n");
3411			return 0;
3412		}
3413	}
3414	offset += hw_dump_size_dwords;
3415
3416	return offset;
3417}
3418
3419/* Dumps Static Debug data. Returns the dumped size in dwords. */
3420static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
3421				     struct qed_ptt *p_ptt,
3422				     u32 *dump_buf, bool dump)
3423{
3424	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3425	u32 block_id, line_id, offset = 0, addr, len;
3426
3427	/* Don't dump static debug if a debug bus recording is in progress */
3428	if (dump && qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
3429		return 0;
3430
3431	if (dump) {
3432		/* Disable debug bus in all blocks */
3433		qed_bus_disable_blocks(p_hwfn, p_ptt);
 
 
 
 
 
 
 
3434
3435		qed_bus_reset_dbg_block(p_hwfn, p_ptt);
3436		qed_wr(p_hwfn,
3437		       p_ptt, DBG_REG_FRAMING_MODE, DBG_BUS_FRAME_MODE_8HW);
3438		qed_wr(p_hwfn,
3439		       p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
3440		qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
3441		qed_bus_enable_dbg_block(p_hwfn, p_ptt, true);
3442	}
3443
3444	/* Dump all static debug lines for each relevant block */
3445	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3446		const struct dbg_block_chip *block_per_chip;
3447		const struct dbg_block *block;
3448		bool is_removed, has_dbg_bus;
3449		u16 modes_buf_offset;
3450		u32 block_dwords;
3451
3452		block_per_chip =
3453		    qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)block_id);
3454		is_removed = GET_FIELD(block_per_chip->flags,
3455				       DBG_BLOCK_CHIP_IS_REMOVED);
3456		has_dbg_bus = GET_FIELD(block_per_chip->flags,
3457					DBG_BLOCK_CHIP_HAS_DBG_BUS);
3458
3459		if (!is_removed && has_dbg_bus &&
3460		    GET_FIELD(block_per_chip->dbg_bus_mode.data,
3461			      DBG_MODE_HDR_EVAL_MODE) > 0) {
3462			modes_buf_offset =
3463			    GET_FIELD(block_per_chip->dbg_bus_mode.data,
3464				      DBG_MODE_HDR_MODES_BUF_OFFSET);
3465			if (!qed_is_mode_match(p_hwfn, &modes_buf_offset))
3466				has_dbg_bus = false;
3467		}
3468
3469		if (is_removed || !has_dbg_bus)
3470			continue;
3471
3472		block_dwords = NUM_DBG_LINES(block_per_chip) *
 
 
3473			       STATIC_DEBUG_LINE_DWORDS;
3474
3475		/* Dump static section params */
3476		block = get_dbg_block(p_hwfn, (enum block_id)block_id);
3477		offset += qed_grc_dump_mem_hdr(p_hwfn,
3478					       dump_buf + offset,
3479					       dump,
3480					       block->name,
3481					       0,
3482					       block_dwords,
3483					       32, false, "STATIC", 0);
3484
3485		if (!dump) {
3486			offset += block_dwords;
3487			continue;
3488		}
3489
3490		/* If all lines are invalid - dump zeros */
3491		if (dev_data->block_in_reset[block_id]) {
3492			memset(dump_buf + offset, 0,
3493			       DWORDS_TO_BYTES(block_dwords));
3494			offset += block_dwords;
3495			continue;
3496		}
3497
3498		/* Enable block's client */
 
3499		qed_bus_enable_clients(p_hwfn,
3500				       p_ptt,
3501				       BIT(block_per_chip->dbg_client_id));
3502
3503		addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
3504		len = STATIC_DEBUG_LINE_DWORDS;
3505		for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_per_chip);
3506		     line_id++) {
3507			/* Configure debug line ID */
3508			qed_bus_config_dbg_line(p_hwfn,
3509						p_ptt,
3510						(enum block_id)block_id,
3511						(u8)line_id, 0xf, 0, 0, 0);
3512
3513			/* Read debug line info */
3514			offset += qed_grc_dump_addr_range(p_hwfn,
3515							  p_ptt,
3516							  dump_buf + offset,
3517							  dump,
3518							  addr,
3519							  len,
3520							  true, SPLIT_TYPE_NONE,
3521							  0);
3522		}
3523
3524		/* Disable block's client and debug output */
3525		qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3526		qed_bus_config_dbg_line(p_hwfn, p_ptt,
3527					(enum block_id)block_id, 0, 0, 0, 0, 0);
3528	}
3529
3530	if (dump) {
3531		qed_bus_enable_dbg_block(p_hwfn, p_ptt, false);
3532		qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3533	}
3534
3535	return offset;
3536}
3537
3538/* Performs GRC Dump to the specified buffer.
3539 * Returns the dumped size in dwords.
3540 */
3541static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
3542				    struct qed_ptt *p_ptt,
3543				    u32 *dump_buf,
3544				    bool dump, u32 *num_dumped_dwords)
3545{
3546	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3547	bool parities_masked = false;
3548	u32 dwords_read, offset = 0;
3549	u8 i;
3550
3551	*num_dumped_dwords = 0;
3552	dev_data->num_regs_read = 0;
3553
3554	/* Update reset state */
3555	if (dump)
3556		qed_update_blocks_reset_state(p_hwfn, p_ptt);
3557
3558	/* Dump global params */
3559	offset += qed_dump_common_global_params(p_hwfn,
3560						p_ptt,
3561						dump_buf + offset, dump, 4);
3562	offset += qed_dump_str_param(dump_buf + offset,
3563				     dump, "dump-type", "grc-dump");
3564	offset += qed_dump_num_param(dump_buf + offset,
3565				     dump,
3566				     "num-lcids",
3567				     NUM_OF_LCIDS);
 
3568	offset += qed_dump_num_param(dump_buf + offset,
3569				     dump,
3570				     "num-ltids",
3571				     NUM_OF_LTIDS);
 
3572	offset += qed_dump_num_param(dump_buf + offset,
3573				     dump, "num-ports", dev_data->num_ports);
3574
3575	/* Dump reset registers (dumped before taking blocks out of reset ) */
3576	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
3577		offset += qed_grc_dump_reset_regs(p_hwfn,
3578						  p_ptt,
3579						  dump_buf + offset, dump);
3580
3581	/* Take all blocks out of reset (using reset registers) */
3582	if (dump) {
3583		qed_grc_unreset_blocks(p_hwfn, p_ptt, false);
3584		qed_update_blocks_reset_state(p_hwfn, p_ptt);
3585	}
3586
3587	/* Disable all parities using MFW command */
3588	if (dump &&
3589	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3590		parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
3591		if (!parities_masked) {
3592			DP_NOTICE(p_hwfn,
3593				  "Failed to mask parities using MFW\n");
3594			if (qed_grc_get_param
3595			    (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
3596				return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
3597		}
3598	}
3599
3600	/* Dump modified registers (dumped before modifying them) */
3601	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
3602		offset += qed_grc_dump_modified_regs(p_hwfn,
3603						     p_ptt,
3604						     dump_buf + offset, dump);
3605
3606	/* Stall storms */
3607	if (dump &&
3608	    (qed_grc_is_included(p_hwfn,
3609				 DBG_GRC_PARAM_DUMP_IOR) ||
3610	     qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
3611		qed_grc_stall_storms(p_hwfn, p_ptt, true);
3612
3613	/* Dump all regs  */
3614	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
3615		bool block_enable[MAX_BLOCK_ID];
3616
3617		/* Dump all blocks except MCP */
3618		for (i = 0; i < MAX_BLOCK_ID; i++)
3619			block_enable[i] = true;
3620		block_enable[BLOCK_MCP] = false;
3621		offset += qed_grc_dump_registers(p_hwfn,
3622						 p_ptt,
3623						 dump_buf +
3624						 offset,
3625						 dump,
3626						 block_enable, NULL);
3627
3628		/* Dump special registers */
3629		offset += qed_grc_dump_special_regs(p_hwfn,
3630						    p_ptt,
3631						    dump_buf + offset, dump);
3632	}
3633
3634	/* Dump memories */
3635	offset += qed_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
3636
3637	/* Dump MCP */
3638	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
3639		offset += qed_grc_dump_mcp(p_hwfn,
3640					   p_ptt, dump_buf + offset, dump);
3641
3642	/* Dump context */
3643	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
3644		offset += qed_grc_dump_ctx(p_hwfn,
3645					   p_ptt, dump_buf + offset, dump);
3646
3647	/* Dump RSS memories */
3648	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
3649		offset += qed_grc_dump_rss(p_hwfn,
3650					   p_ptt, dump_buf + offset, dump);
3651
3652	/* Dump Big RAM */
3653	for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
3654		if (qed_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
3655			offset += qed_grc_dump_big_ram(p_hwfn,
3656						       p_ptt,
3657						       dump_buf + offset,
3658						       dump, i);
3659
 
 
 
 
 
3660	/* Dump VFC */
3661	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)) {
3662		dwords_read = qed_grc_dump_vfc(p_hwfn,
3663					       p_ptt, dump_buf + offset, dump);
3664		offset += dwords_read;
3665		if (!dwords_read)
3666			return DBG_STATUS_VFC_READ_ERROR;
3667	}
3668
3669	/* Dump PHY tbus */
3670	if (qed_grc_is_included(p_hwfn,
3671				DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id ==
3672	    CHIP_K2 && dev_data->hw_type == HW_TYPE_ASIC)
3673		offset += qed_grc_dump_phy(p_hwfn,
3674					   p_ptt, dump_buf + offset, dump);
3675
3676	/* Dump MCP HW Dump */
3677	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP_HW_DUMP) &&
3678	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP) && 1)
3679		offset += qed_grc_dump_mcp_hw_dump(p_hwfn,
3680						   p_ptt,
3681						   dump_buf + offset, dump);
3682
3683	/* Dump static debug data (only if not during debug bus recording) */
3684	if (qed_grc_is_included(p_hwfn,
3685				DBG_GRC_PARAM_DUMP_STATIC) &&
3686	    (!dump || dev_data->bus.state == DBG_BUS_STATE_IDLE))
3687		offset += qed_grc_dump_static_debug(p_hwfn,
3688						    p_ptt,
3689						    dump_buf + offset, dump);
3690
3691	/* Dump last section */
3692	offset += qed_dump_last_section(dump_buf, offset, dump);
3693
3694	if (dump) {
3695		/* Unstall storms */
3696		if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
3697			qed_grc_stall_storms(p_hwfn, p_ptt, false);
3698
3699		/* Clear parity status */
3700		qed_grc_clear_all_prty(p_hwfn, p_ptt);
3701
3702		/* Enable all parities using MFW command */
3703		if (parities_masked)
3704			qed_mcp_mask_parities(p_hwfn, p_ptt, 0);
3705	}
3706
3707	*num_dumped_dwords = offset;
3708
3709	return DBG_STATUS_OK;
3710}
3711
3712/* Writes the specified failing Idle Check rule to the specified buffer.
3713 * Returns the dumped size in dwords.
3714 */
3715static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
3716				     struct qed_ptt *p_ptt,
3717				     u32 *dump_buf,
 
3718				     bool dump,
3719				     u16 rule_id,
3720				     const struct dbg_idle_chk_rule *rule,
3721				     u16 fail_entry_id, u32 *cond_reg_values)
3722{
3723	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3724	const struct dbg_idle_chk_cond_reg *cond_regs;
3725	const struct dbg_idle_chk_info_reg *info_regs;
3726	u32 i, next_reg_offset = 0, offset = 0;
3727	struct dbg_idle_chk_result_hdr *hdr;
3728	const union dbg_idle_chk_reg *regs;
3729	u8 reg_id;
3730
3731	hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
3732	regs = (const union dbg_idle_chk_reg *)
3733		p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr +
3734		rule->reg_offset;
3735	cond_regs = &regs[0].cond_reg;
3736	info_regs = &regs[rule->num_cond_regs].info_reg;
3737
3738	/* Dump rule data */
3739	if (dump) {
3740		memset(hdr, 0, sizeof(*hdr));
3741		hdr->rule_id = rule_id;
3742		hdr->mem_entry_id = fail_entry_id;
3743		hdr->severity = rule->severity;
3744		hdr->num_dumped_cond_regs = rule->num_cond_regs;
3745	}
3746
3747	offset += IDLE_CHK_RESULT_HDR_DWORDS;
3748
3749	/* Dump condition register values */
3750	for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
3751		const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
3752		struct dbg_idle_chk_result_reg_hdr *reg_hdr;
3753
3754		reg_hdr =
3755		    (struct dbg_idle_chk_result_reg_hdr *)(dump_buf + offset);
3756
3757		/* Write register header */
3758		if (!dump) {
3759			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS +
3760			    reg->entry_size;
3761			continue;
3762		}
3763
3764		offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
3765		memset(reg_hdr, 0, sizeof(*reg_hdr));
3766		reg_hdr->start_entry = reg->start_entry;
3767		reg_hdr->size = reg->entry_size;
3768		SET_FIELD(reg_hdr->data,
3769			  DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
3770			  reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0);
3771		SET_FIELD(reg_hdr->data,
3772			  DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
3773
3774		/* Write register values */
3775		for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++)
3776			dump_buf[offset] = cond_reg_values[next_reg_offset];
3777	}
3778
3779	/* Dump info register values */
3780	for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
3781		const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
3782		u32 block_id;
3783
3784		/* Check if register's block is in reset */
3785		if (!dump) {
3786			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
3787			continue;
3788		}
3789
3790		block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
3791		if (block_id >= MAX_BLOCK_ID) {
3792			DP_NOTICE(p_hwfn, "Invalid block_id\n");
3793			return 0;
3794		}
3795
3796		if (!dev_data->block_in_reset[block_id]) {
3797			struct dbg_idle_chk_result_reg_hdr *reg_hdr;
3798			bool wide_bus, eval_mode, mode_match = true;
3799			u16 modes_buf_offset;
3800			u32 addr;
3801
3802			reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
3803				  (dump_buf + offset);
3804
3805			/* Check mode */
3806			eval_mode = GET_FIELD(reg->mode.data,
3807					      DBG_MODE_HDR_EVAL_MODE) > 0;
3808			if (eval_mode) {
3809				modes_buf_offset =
3810				    GET_FIELD(reg->mode.data,
3811					      DBG_MODE_HDR_MODES_BUF_OFFSET);
3812				mode_match =
3813					qed_is_mode_match(p_hwfn,
3814							  &modes_buf_offset);
3815			}
3816
3817			if (!mode_match)
3818				continue;
3819
3820			addr = GET_FIELD(reg->data,
3821					 DBG_IDLE_CHK_INFO_REG_ADDRESS);
3822			wide_bus = GET_FIELD(reg->data,
3823					     DBG_IDLE_CHK_INFO_REG_WIDE_BUS);
3824
3825			/* Write register header */
3826			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
3827			hdr->num_dumped_info_regs++;
3828			memset(reg_hdr, 0, sizeof(*reg_hdr));
3829			reg_hdr->size = reg->size;
3830			SET_FIELD(reg_hdr->data,
3831				  DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
3832				  rule->num_cond_regs + reg_id);
3833
3834			/* Write register values */
3835			offset += qed_grc_dump_addr_range(p_hwfn,
3836							  p_ptt,
3837							  dump_buf + offset,
3838							  dump,
3839							  addr,
3840							  reg->size, wide_bus,
3841							  SPLIT_TYPE_NONE, 0);
3842		}
3843	}
3844
3845	return offset;
3846}
3847
3848/* Dumps idle check rule entries. Returns the dumped size in dwords. */
3849static u32
3850qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3851			       u32 *dump_buf, bool dump,
3852			       const struct dbg_idle_chk_rule *input_rules,
3853			       u32 num_input_rules, u32 *num_failing_rules)
3854{
3855	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3856	u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
3857	u32 i, offset = 0;
3858	u16 entry_id;
3859	u8 reg_id;
3860
3861	*num_failing_rules = 0;
3862
3863	for (i = 0; i < num_input_rules; i++) {
3864		const struct dbg_idle_chk_cond_reg *cond_regs;
3865		const struct dbg_idle_chk_rule *rule;
3866		const union dbg_idle_chk_reg *regs;
3867		u16 num_reg_entries = 1;
3868		bool check_rule = true;
3869		const u32 *imm_values;
3870
3871		rule = &input_rules[i];
3872		regs = (const union dbg_idle_chk_reg *)
3873			p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr +
3874			rule->reg_offset;
3875		cond_regs = &regs[0].cond_reg;
3876		imm_values =
3877		    (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr +
3878		    rule->imm_offset;
3879
3880		/* Check if all condition register blocks are out of reset, and
3881		 * find maximal number of entries (all condition registers that
3882		 * are memories must have the same size, which is > 1).
3883		 */
3884		for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule;
3885		     reg_id++) {
3886			u32 block_id =
3887				GET_FIELD(cond_regs[reg_id].data,
3888					  DBG_IDLE_CHK_COND_REG_BLOCK_ID);
3889
3890			if (block_id >= MAX_BLOCK_ID) {
3891				DP_NOTICE(p_hwfn, "Invalid block_id\n");
3892				return 0;
3893			}
3894
3895			check_rule = !dev_data->block_in_reset[block_id];
3896			if (cond_regs[reg_id].num_entries > num_reg_entries)
3897				num_reg_entries = cond_regs[reg_id].num_entries;
3898		}
3899
3900		if (!check_rule && dump)
3901			continue;
3902
3903		if (!dump) {
3904			u32 entry_dump_size =
3905				qed_idle_chk_dump_failure(p_hwfn,
3906							  p_ptt,
3907							  dump_buf + offset,
3908							  false,
3909							  rule->rule_id,
3910							  rule,
3911							  0,
3912							  NULL);
3913
3914			offset += num_reg_entries * entry_dump_size;
3915			(*num_failing_rules) += num_reg_entries;
3916			continue;
3917		}
3918
3919		/* Go over all register entries (number of entries is the same
3920		 * for all condition registers).
3921		 */
3922		for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
3923			u32 next_reg_offset = 0;
3924
3925			/* Read current entry of all condition registers */
3926			for (reg_id = 0; reg_id < rule->num_cond_regs;
3927			     reg_id++) {
3928				const struct dbg_idle_chk_cond_reg *reg =
3929					&cond_regs[reg_id];
3930				u32 padded_entry_size, addr;
3931				bool wide_bus;
3932
3933				/* Find GRC address (if it's a memory, the
3934				 * address of the specific entry is calculated).
3935				 */
3936				addr = GET_FIELD(reg->data,
3937						 DBG_IDLE_CHK_COND_REG_ADDRESS);
3938				wide_bus =
3939				    GET_FIELD(reg->data,
3940					      DBG_IDLE_CHK_COND_REG_WIDE_BUS);
3941				if (reg->num_entries > 1 ||
3942				    reg->start_entry > 0) {
3943					padded_entry_size =
3944					   reg->entry_size > 1 ?
3945					   roundup_pow_of_two(reg->entry_size) :
3946					   1;
3947					addr += (reg->start_entry + entry_id) *
3948						padded_entry_size;
3949				}
3950
3951				/* Read registers */
3952				if (next_reg_offset + reg->entry_size >=
3953				    IDLE_CHK_MAX_ENTRIES_SIZE) {
3954					DP_NOTICE(p_hwfn,
3955						  "idle check registers entry is too large\n");
3956					return 0;
3957				}
3958
3959				next_reg_offset +=
3960				    qed_grc_dump_addr_range(p_hwfn, p_ptt,
3961							    cond_reg_values +
3962							    next_reg_offset,
3963							    dump, addr,
3964							    reg->entry_size,
3965							    wide_bus,
3966							    SPLIT_TYPE_NONE, 0);
3967			}
3968
3969			/* Call rule condition function.
3970			 * If returns true, it's a failure.
3971			 */
3972			if ((*cond_arr[rule->cond_id]) (cond_reg_values,
3973							imm_values)) {
3974				offset += qed_idle_chk_dump_failure(p_hwfn,
3975							p_ptt,
3976							dump_buf + offset,
3977							dump,
3978							rule->rule_id,
3979							rule,
3980							entry_id,
3981							cond_reg_values);
3982				(*num_failing_rules)++;
3983			}
3984		}
3985	}
3986
3987	return offset;
3988}
3989
3990/* Performs Idle Check Dump to the specified buffer.
3991 * Returns the dumped size in dwords.
3992 */
3993static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
3994			     struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3995{
3996	struct virt_mem_desc *dbg_buf =
3997	    &p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES];
3998	u32 num_failing_rules_offset, offset = 0,
3999	    input_offset = 0, num_failing_rules = 0;
4000
4001	/* Dump global params  - 1 must match below amount of params */
4002	offset += qed_dump_common_global_params(p_hwfn,
4003						p_ptt,
4004						dump_buf + offset, dump, 1);
4005	offset += qed_dump_str_param(dump_buf + offset,
4006				     dump, "dump-type", "idle-chk");
4007
4008	/* Dump idle check section header with a single parameter */
4009	offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
4010	num_failing_rules_offset = offset;
4011	offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
4012
4013	while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
 
4014		const struct dbg_idle_chk_cond_hdr *cond_hdr =
4015		    (const struct dbg_idle_chk_cond_hdr *)dbg_buf->ptr +
4016		    input_offset++;
 
4017		bool eval_mode, mode_match = true;
4018		u32 curr_failing_rules;
4019		u16 modes_buf_offset;
4020
4021		/* Check mode */
4022		eval_mode = GET_FIELD(cond_hdr->mode.data,
4023				      DBG_MODE_HDR_EVAL_MODE) > 0;
4024		if (eval_mode) {
4025			modes_buf_offset =
4026				GET_FIELD(cond_hdr->mode.data,
4027					  DBG_MODE_HDR_MODES_BUF_OFFSET);
4028			mode_match = qed_is_mode_match(p_hwfn,
4029						       &modes_buf_offset);
4030		}
4031
4032		if (mode_match) {
4033			const struct dbg_idle_chk_rule *rule =
4034			    (const struct dbg_idle_chk_rule *)((u32 *)
4035							       dbg_buf->ptr
4036							       + input_offset);
4037			u32 num_input_rules =
4038				cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS;
4039			offset +=
4040			    qed_idle_chk_dump_rule_entries(p_hwfn,
4041							   p_ptt,
4042							   dump_buf +
4043							   offset,
4044							   dump,
4045							   rule,
4046							   num_input_rules,
4047							   &curr_failing_rules);
 
4048			num_failing_rules += curr_failing_rules;
4049		}
4050
4051		input_offset += cond_hdr->data_size;
4052	}
4053
4054	/* Overwrite num_rules parameter */
4055	if (dump)
4056		qed_dump_num_param(dump_buf + num_failing_rules_offset,
4057				   dump, "num_rules", num_failing_rules);
4058
4059	/* Dump last section */
4060	offset += qed_dump_last_section(dump_buf, offset, dump);
4061
4062	return offset;
4063}
4064
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4065/* Get info on the MCP Trace data in the scratchpad:
4066 * - trace_data_grc_addr (OUT): trace data GRC address in bytes
4067 * - trace_data_size (OUT): trace data size in bytes (without the header)
4068 */
4069static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn,
4070						   struct qed_ptt *p_ptt,
4071						   u32 *trace_data_grc_addr,
4072						   u32 *trace_data_size)
4073{
4074	u32 spad_trace_offsize, signature;
4075
4076	/* Read trace section offsize structure from MCP scratchpad */
4077	spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4078
4079	/* Extract trace section address from offsize (in scratchpad) */
4080	*trace_data_grc_addr =
4081		MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
4082
4083	/* Read signature from MCP trace section */
4084	signature = qed_rd(p_hwfn, p_ptt,
4085			   *trace_data_grc_addr +
4086			   offsetof(struct mcp_trace, signature));
4087
4088	if (signature != MFW_TRACE_SIGNATURE)
4089		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4090
4091	/* Read trace size from MCP trace section */
4092	*trace_data_size = qed_rd(p_hwfn,
4093				  p_ptt,
4094				  *trace_data_grc_addr +
4095				  offsetof(struct mcp_trace, size));
4096
4097	return DBG_STATUS_OK;
4098}
4099
4100/* Reads MCP trace meta data image from NVRAM
4101 * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file)
4102 * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when
4103 *			      loaded from file).
4104 * - trace_meta_size (OUT):   size in bytes of the trace meta data.
4105 */
4106static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
4107						   struct qed_ptt *p_ptt,
4108						   u32 trace_data_size_bytes,
4109						   u32 *running_bundle_id,
4110						   u32 *trace_meta_offset,
4111						   u32 *trace_meta_size)
4112{
4113	u32 spad_trace_offsize, nvram_image_type, running_mfw_addr;
4114
4115	/* Read MCP trace section offsize structure from MCP scratchpad */
4116	spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4117
4118	/* Find running bundle ID */
4119	running_mfw_addr =
4120		MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
4121		QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
4122	*running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
4123	if (*running_bundle_id > 1)
4124		return DBG_STATUS_INVALID_NVRAM_BUNDLE;
4125
4126	/* Find image in NVRAM */
4127	nvram_image_type =
4128	    (*running_bundle_id ==
4129	     DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
4130	return qed_find_nvram_image(p_hwfn,
4131				    p_ptt,
4132				    nvram_image_type,
4133				    trace_meta_offset,
4134				    trace_meta_size,
4135				    true);
4136}
4137
4138/* Reads the MCP Trace meta data from NVRAM into the specified buffer */
4139static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
4140					       struct qed_ptt *p_ptt,
4141					       u32 nvram_offset_in_bytes,
4142					       u32 size_in_bytes, u32 *buf)
4143{
4144	u8 modules_num, module_len, i, *byte_buf = (u8 *)buf;
4145	enum dbg_status status;
4146	u32 signature;
4147
4148	/* Read meta data from NVRAM */
4149	status = qed_nvram_read(p_hwfn,
4150				p_ptt,
4151				nvram_offset_in_bytes,
4152				size_in_bytes,
4153				buf,
4154				true);
4155	if (status != DBG_STATUS_OK)
4156		return status;
4157
4158	/* Extract and check first signature */
4159	signature = qed_read_unaligned_dword(byte_buf);
4160	byte_buf += sizeof(signature);
4161	if (signature != NVM_MAGIC_VALUE)
4162		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4163
4164	/* Extract number of modules */
4165	modules_num = *(byte_buf++);
4166
4167	/* Skip all modules */
4168	for (i = 0; i < modules_num; i++) {
4169		module_len = *(byte_buf++);
4170		byte_buf += module_len;
4171	}
4172
4173	/* Extract and check second signature */
4174	signature = qed_read_unaligned_dword(byte_buf);
4175	byte_buf += sizeof(signature);
4176	if (signature != NVM_MAGIC_VALUE)
4177		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4178
4179	return DBG_STATUS_OK;
4180}
4181
4182/* Dump MCP Trace */
4183static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
4184					  struct qed_ptt *p_ptt,
4185					  u32 *dump_buf,
4186					  bool dump, u32 *num_dumped_dwords)
4187{
4188	u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
4189	u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0;
4190	u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0;
4191	enum dbg_status status;
 
4192	int halted = 0;
4193	bool use_mfw;
4194
4195	*num_dumped_dwords = 0;
4196
4197	use_mfw = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
4198
4199	/* Get trace data info */
4200	status = qed_mcp_trace_get_data_info(p_hwfn,
4201					     p_ptt,
4202					     &trace_data_grc_addr,
4203					     &trace_data_size_bytes);
4204	if (status != DBG_STATUS_OK)
4205		return status;
4206
4207	/* Dump global params */
4208	offset += qed_dump_common_global_params(p_hwfn,
4209						p_ptt,
4210						dump_buf + offset, dump, 1);
4211	offset += qed_dump_str_param(dump_buf + offset,
4212				     dump, "dump-type", "mcp-trace");
4213
4214	/* Halt MCP while reading from scratchpad so the read data will be
4215	 * consistent. if halt fails, MCP trace is taken anyway, with a small
4216	 * risk that it may be corrupt.
4217	 */
4218	if (dump && use_mfw) {
4219		halted = !qed_mcp_halt(p_hwfn, p_ptt);
4220		if (!halted)
4221			DP_NOTICE(p_hwfn, "MCP halt failed!\n");
4222	}
4223
4224	/* Find trace data size */
4225	trace_data_size_dwords =
4226	    DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
4227			 BYTES_IN_DWORD);
4228
4229	/* Dump trace data section header and param */
4230	offset += qed_dump_section_hdr(dump_buf + offset,
4231				       dump, "mcp_trace_data", 1);
4232	offset += qed_dump_num_param(dump_buf + offset,
4233				     dump, "size", trace_data_size_dwords);
4234
4235	/* Read trace data from scratchpad into dump buffer */
4236	offset += qed_grc_dump_addr_range(p_hwfn,
4237					  p_ptt,
4238					  dump_buf + offset,
4239					  dump,
4240					  BYTES_TO_DWORDS(trace_data_grc_addr),
4241					  trace_data_size_dwords, false,
4242					  SPLIT_TYPE_NONE, 0);
4243
4244	/* Resume MCP (only if halt succeeded) */
4245	if (halted && qed_mcp_resume(p_hwfn, p_ptt))
4246		DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
4247
4248	/* Dump trace meta section header */
4249	offset += qed_dump_section_hdr(dump_buf + offset,
4250				       dump, "mcp_trace_meta", 1);
4251
4252	/* If MCP Trace meta size parameter was set, use it.
4253	 * Otherwise, read trace meta.
4254	 * trace_meta_size_bytes is dword-aligned.
4255	 */
4256	trace_meta_size_bytes =
4257		qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_MCP_TRACE_META_SIZE);
4258	if ((!trace_meta_size_bytes || dump) && use_mfw)
4259		status = qed_mcp_trace_get_meta_info(p_hwfn,
4260						     p_ptt,
4261						     trace_data_size_bytes,
4262						     &running_bundle_id,
4263						     &trace_meta_offset_bytes,
4264						     &trace_meta_size_bytes);
4265	if (status == DBG_STATUS_OK)
4266		trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes);
 
 
4267
4268	/* Dump trace meta size param */
4269	offset += qed_dump_num_param(dump_buf + offset,
4270				     dump, "size", trace_meta_size_dwords);
4271
4272	/* Read trace meta image into dump buffer */
4273	if (dump && trace_meta_size_dwords)
4274		status = qed_mcp_trace_read_meta(p_hwfn,
4275						 p_ptt,
4276						 trace_meta_offset_bytes,
4277						 trace_meta_size_bytes,
4278						 dump_buf + offset);
4279	if (status == DBG_STATUS_OK)
4280		offset += trace_meta_size_dwords;
4281
4282	/* Dump last section */
4283	offset += qed_dump_last_section(dump_buf, offset, dump);
4284
4285	*num_dumped_dwords = offset;
4286
4287	/* If no mcp access, indicate that the dump doesn't contain the meta
4288	 * data from NVRAM.
4289	 */
4290	return use_mfw ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4291}
4292
4293/* Dump GRC FIFO */
4294static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
4295					 struct qed_ptt *p_ptt,
4296					 u32 *dump_buf,
4297					 bool dump, u32 *num_dumped_dwords)
4298{
4299	u32 dwords_read, size_param_offset, offset = 0, addr, len;
4300	bool fifo_has_data;
4301
4302	*num_dumped_dwords = 0;
4303
4304	/* Dump global params */
4305	offset += qed_dump_common_global_params(p_hwfn,
4306						p_ptt,
4307						dump_buf + offset, dump, 1);
4308	offset += qed_dump_str_param(dump_buf + offset,
4309				     dump, "dump-type", "reg-fifo");
4310
4311	/* Dump fifo data section header and param. The size param is 0 for
4312	 * now, and is overwritten after reading the FIFO.
4313	 */
4314	offset += qed_dump_section_hdr(dump_buf + offset,
4315				       dump, "reg_fifo_data", 1);
4316	size_param_offset = offset;
4317	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4318
4319	if (!dump) {
4320		/* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
4321		 * test how much data is available, except for reading it.
4322		 */
4323		offset += REG_FIFO_DEPTH_DWORDS;
4324		goto out;
4325	}
4326
4327	fifo_has_data = qed_rd(p_hwfn, p_ptt,
4328			       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4329
4330	/* Pull available data from fifo. Use DMAE since this is widebus memory
4331	 * and must be accessed atomically. Test for dwords_read not passing
4332	 * buffer size since more entries could be added to the buffer as we are
4333	 * emptying it.
4334	 */
4335	addr = BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO);
4336	len = REG_FIFO_ELEMENT_DWORDS;
4337	for (dwords_read = 0;
4338	     fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
4339	     dwords_read += REG_FIFO_ELEMENT_DWORDS) {
4340		offset += qed_grc_dump_addr_range(p_hwfn,
4341						  p_ptt,
4342						  dump_buf + offset,
4343						  true,
4344						  addr,
4345						  len,
4346						  true, SPLIT_TYPE_NONE,
4347						  0);
4348		fifo_has_data = qed_rd(p_hwfn, p_ptt,
4349				       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4350	}
4351
4352	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4353			   dwords_read);
4354out:
4355	/* Dump last section */
4356	offset += qed_dump_last_section(dump_buf, offset, dump);
4357
4358	*num_dumped_dwords = offset;
4359
4360	return DBG_STATUS_OK;
4361}
4362
4363/* Dump IGU FIFO */
4364static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
4365					 struct qed_ptt *p_ptt,
4366					 u32 *dump_buf,
4367					 bool dump, u32 *num_dumped_dwords)
4368{
4369	u32 dwords_read, size_param_offset, offset = 0, addr, len;
4370	bool fifo_has_data;
4371
4372	*num_dumped_dwords = 0;
4373
4374	/* Dump global params */
4375	offset += qed_dump_common_global_params(p_hwfn,
4376						p_ptt,
4377						dump_buf + offset, dump, 1);
4378	offset += qed_dump_str_param(dump_buf + offset,
4379				     dump, "dump-type", "igu-fifo");
4380
4381	/* Dump fifo data section header and param. The size param is 0 for
4382	 * now, and is overwritten after reading the FIFO.
4383	 */
4384	offset += qed_dump_section_hdr(dump_buf + offset,
4385				       dump, "igu_fifo_data", 1);
4386	size_param_offset = offset;
4387	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4388
4389	if (!dump) {
4390		/* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
4391		 * test how much data is available, except for reading it.
4392		 */
4393		offset += IGU_FIFO_DEPTH_DWORDS;
4394		goto out;
4395	}
4396
4397	fifo_has_data = qed_rd(p_hwfn, p_ptt,
4398			       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4399
4400	/* Pull available data from fifo. Use DMAE since this is widebus memory
4401	 * and must be accessed atomically. Test for dwords_read not passing
4402	 * buffer size since more entries could be added to the buffer as we are
4403	 * emptying it.
4404	 */
4405	addr = BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY);
4406	len = IGU_FIFO_ELEMENT_DWORDS;
4407	for (dwords_read = 0;
4408	     fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
4409	     dwords_read += IGU_FIFO_ELEMENT_DWORDS) {
4410		offset += qed_grc_dump_addr_range(p_hwfn,
4411						  p_ptt,
4412						  dump_buf + offset,
4413						  true,
4414						  addr,
4415						  len,
4416						  true, SPLIT_TYPE_NONE,
4417						  0);
4418		fifo_has_data = qed_rd(p_hwfn, p_ptt,
4419				       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4420	}
4421
4422	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4423			   dwords_read);
4424out:
4425	/* Dump last section */
4426	offset += qed_dump_last_section(dump_buf, offset, dump);
4427
4428	*num_dumped_dwords = offset;
4429
4430	return DBG_STATUS_OK;
4431}
4432
4433/* Protection Override dump */
4434static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
4435						    struct qed_ptt *p_ptt,
4436						    u32 *dump_buf,
4437						    bool dump,
4438						    u32 *num_dumped_dwords)
4439{
4440	u32 size_param_offset, override_window_dwords, offset = 0, addr;
4441
4442	*num_dumped_dwords = 0;
4443
4444	/* Dump global params */
4445	offset += qed_dump_common_global_params(p_hwfn,
4446						p_ptt,
4447						dump_buf + offset, dump, 1);
4448	offset += qed_dump_str_param(dump_buf + offset,
4449				     dump, "dump-type", "protection-override");
4450
4451	/* Dump data section header and param. The size param is 0 for now,
4452	 * and is overwritten after reading the data.
4453	 */
4454	offset += qed_dump_section_hdr(dump_buf + offset,
4455				       dump, "protection_override_data", 1);
4456	size_param_offset = offset;
4457	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4458
4459	if (!dump) {
4460		offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
4461		goto out;
4462	}
4463
4464	/* Add override window info to buffer */
4465	override_window_dwords =
4466		qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
4467		PROTECTION_OVERRIDE_ELEMENT_DWORDS;
4468	if (override_window_dwords) {
4469		addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
4470		offset += qed_grc_dump_addr_range(p_hwfn,
4471						  p_ptt,
4472						  dump_buf + offset,
4473						  true,
4474						  addr,
4475						  override_window_dwords,
4476						  true, SPLIT_TYPE_NONE, 0);
4477		qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4478				   override_window_dwords);
4479	}
4480out:
4481	/* Dump last section */
4482	offset += qed_dump_last_section(dump_buf, offset, dump);
4483
4484	*num_dumped_dwords = offset;
4485
4486	return DBG_STATUS_OK;
4487}
4488
4489/* Performs FW Asserts Dump to the specified buffer.
4490 * Returns the dumped size in dwords.
4491 */
4492static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
4493			       struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4494{
4495	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4496	struct fw_asserts_ram_section *asserts;
4497	char storm_letter_str[2] = "?";
4498	struct fw_info fw_info;
4499	u32 offset = 0;
4500	u8 storm_id;
4501
4502	/* Dump global params */
4503	offset += qed_dump_common_global_params(p_hwfn,
4504						p_ptt,
4505						dump_buf + offset, dump, 1);
4506	offset += qed_dump_str_param(dump_buf + offset,
4507				     dump, "dump-type", "fw-asserts");
4508
4509	/* Find Storm dump size */
4510	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
4511		u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
4512		struct storm_defs *storm = &s_storm_defs[storm_id];
4513		u32 last_list_idx, addr;
4514
4515		if (dev_data->block_in_reset[storm->sem_block_id])
4516			continue;
4517
4518		/* Read FW info for the current Storm */
4519		qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
4520
4521		asserts = &fw_info.fw_asserts_section;
4522
4523		/* Dump FW Asserts section header and params */
4524		storm_letter_str[0] = storm->letter;
4525		offset += qed_dump_section_hdr(dump_buf + offset,
4526					       dump, "fw_asserts", 2);
4527		offset += qed_dump_str_param(dump_buf + offset,
4528					     dump, "storm", storm_letter_str);
4529		offset += qed_dump_num_param(dump_buf + offset,
4530					     dump,
4531					     "size",
4532					     asserts->list_element_dword_size);
4533
4534		/* Read and dump FW Asserts data */
4535		if (!dump) {
4536			offset += asserts->list_element_dword_size;
4537			continue;
4538		}
4539
4540		addr = le16_to_cpu(asserts->section_ram_line_offset);
4541		fw_asserts_section_addr = storm->sem_fast_mem_addr +
4542					  SEM_FAST_REG_INT_RAM +
4543					  RAM_LINES_TO_BYTES(addr);
4544
4545		next_list_idx_addr = fw_asserts_section_addr +
4546			DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
4547		next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
4548		last_list_idx = (next_list_idx > 0 ?
4549				 next_list_idx :
4550				 asserts->list_num_elements) - 1;
4551		addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
4552		       asserts->list_dword_offset +
4553		       last_list_idx * asserts->list_element_dword_size;
4554		offset +=
4555		    qed_grc_dump_addr_range(p_hwfn, p_ptt,
4556					    dump_buf + offset,
4557					    dump, addr,
4558					    asserts->list_element_dword_size,
4559						  false, SPLIT_TYPE_NONE, 0);
4560	}
4561
4562	/* Dump last section */
4563	offset += qed_dump_last_section(dump_buf, offset, dump);
4564
4565	return offset;
4566}
4567
4568/* Dumps the specified ILT pages to the specified buffer.
4569 * Returns the dumped size in dwords.
4570 */
4571static u32 qed_ilt_dump_pages_range(u32 *dump_buf, u32 *given_offset,
4572				    bool *dump, u32 start_page_id,
4573				    u32 num_pages,
4574				    struct phys_mem_desc *ilt_pages,
4575				    bool dump_page_ids, u32 buf_size_in_dwords,
4576				    u32 *given_actual_dump_size_in_dwords)
4577{
4578	u32 actual_dump_size_in_dwords = *given_actual_dump_size_in_dwords;
4579	u32 page_id, end_page_id, offset = *given_offset;
4580	struct phys_mem_desc *mem_desc = NULL;
4581	bool continue_dump = *dump;
4582	u32 partial_page_size = 0;
4583
4584	if (num_pages == 0)
4585		return offset;
4586
4587	end_page_id = start_page_id + num_pages - 1;
4588
4589	for (page_id = start_page_id; page_id <= end_page_id; page_id++) {
4590		mem_desc = &ilt_pages[page_id];
4591		if (!ilt_pages[page_id].virt_addr)
4592			continue;
4593
4594		if (dump_page_ids) {
4595			/* Copy page ID to dump buffer
4596			 * (if dump is needed and buffer is not full)
4597			 */
4598			if ((continue_dump) &&
4599			    (offset + 1 > buf_size_in_dwords)) {
4600				continue_dump = false;
4601				actual_dump_size_in_dwords = offset;
4602			}
4603			if (continue_dump)
4604				*(dump_buf + offset) = page_id;
4605			offset++;
4606		} else {
4607			/* Copy page memory to dump buffer */
4608			if ((continue_dump) &&
4609			    (offset + BYTES_TO_DWORDS(mem_desc->size) >
4610			     buf_size_in_dwords)) {
4611				if (offset + BYTES_TO_DWORDS(mem_desc->size) >
4612				    buf_size_in_dwords) {
4613					partial_page_size =
4614					    buf_size_in_dwords - offset;
4615					memcpy(dump_buf + offset,
4616					       mem_desc->virt_addr,
4617					       partial_page_size);
4618					continue_dump = false;
4619					actual_dump_size_in_dwords =
4620					    offset + partial_page_size;
4621				}
4622			}
4623
4624			if (continue_dump)
4625				memcpy(dump_buf + offset,
4626				       mem_desc->virt_addr, mem_desc->size);
4627			offset += BYTES_TO_DWORDS(mem_desc->size);
4628		}
4629	}
4630
4631	*dump = continue_dump;
4632	*given_offset = offset;
4633	*given_actual_dump_size_in_dwords = actual_dump_size_in_dwords;
4634
4635	return offset;
4636}
4637
4638/* Dumps a section containing the dumped ILT pages.
4639 * Returns the dumped size in dwords.
4640 */
4641static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn,
4642				      u32 *dump_buf,
4643				      u32 *given_offset,
4644				      bool *dump,
4645				      u32 valid_conn_pf_pages,
4646				      u32 valid_conn_vf_pages,
4647				      struct phys_mem_desc *ilt_pages,
4648				      bool dump_page_ids,
4649				      u32 buf_size_in_dwords,
4650				      u32 *given_actual_dump_size_in_dwords)
4651{
4652	struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
4653	u32 pf_start_line, start_page_id, offset = *given_offset;
4654	u32 cdut_pf_init_pages, cdut_vf_init_pages;
4655	u32 cdut_pf_work_pages, cdut_vf_work_pages;
4656	u32 base_data_offset, size_param_offset;
4657	u32 src_pages;
4658	u32 section_header_and_param_size;
4659	u32 cdut_pf_pages, cdut_vf_pages;
4660	u32 actual_dump_size_in_dwords;
4661	bool continue_dump = *dump;
4662	bool update_size = *dump;
4663	const char *section_name;
4664	u32 i;
4665
4666	actual_dump_size_in_dwords = *given_actual_dump_size_in_dwords;
4667	section_name = dump_page_ids ? "ilt_page_ids" : "ilt_page_mem";
4668	cdut_pf_init_pages = qed_get_cdut_num_pf_init_pages(p_hwfn);
4669	cdut_vf_init_pages = qed_get_cdut_num_vf_init_pages(p_hwfn);
4670	cdut_pf_work_pages = qed_get_cdut_num_pf_work_pages(p_hwfn);
4671	cdut_vf_work_pages = qed_get_cdut_num_vf_work_pages(p_hwfn);
4672	cdut_pf_pages = cdut_pf_init_pages + cdut_pf_work_pages;
4673	cdut_vf_pages = cdut_vf_init_pages + cdut_vf_work_pages;
4674	pf_start_line = p_hwfn->p_cxt_mngr->pf_start_line;
4675	section_header_and_param_size = qed_dump_section_hdr(NULL,
4676							     false,
4677							     section_name,
4678							     1) +
4679	qed_dump_num_param(NULL, false, "size", 0);
4680
4681	if ((continue_dump) &&
4682	    (offset + section_header_and_param_size > buf_size_in_dwords)) {
4683		continue_dump = false;
4684		update_size = false;
4685		actual_dump_size_in_dwords = offset;
4686	}
4687
4688	offset += qed_dump_section_hdr(dump_buf + offset,
4689				       continue_dump, section_name, 1);
4690
4691	/* Dump size parameter (0 for now, overwritten with real size later) */
4692	size_param_offset = offset;
4693	offset += qed_dump_num_param(dump_buf + offset,
4694				     continue_dump, "size", 0);
4695	base_data_offset = offset;
4696
4697	/* CDUC pages are ordered as follows:
4698	 * - PF pages - valid section (included in PF connection type mapping)
4699	 * - PF pages - invalid section (not dumped)
4700	 * - For each VF in the PF:
4701	 *   - VF pages - valid section (included in VF connection type mapping)
4702	 *   - VF pages - invalid section (not dumped)
4703	 */
4704	if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUC)) {
4705		/* Dump connection PF pages */
4706		start_page_id = clients[ILT_CLI_CDUC].first.val - pf_start_line;
4707		qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump,
4708					 start_page_id, valid_conn_pf_pages,
4709					 ilt_pages, dump_page_ids,
4710					 buf_size_in_dwords,
4711					 &actual_dump_size_in_dwords);
4712
4713		/* Dump connection VF pages */
4714		start_page_id += clients[ILT_CLI_CDUC].pf_total_lines;
4715		for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
4716		     i++, start_page_id += clients[ILT_CLI_CDUC].vf_total_lines)
4717			qed_ilt_dump_pages_range(dump_buf, &offset,
4718						 &continue_dump, start_page_id,
4719						 valid_conn_vf_pages,
4720						 ilt_pages, dump_page_ids,
4721						 buf_size_in_dwords,
4722						 &actual_dump_size_in_dwords);
4723	}
4724
4725	/* CDUT pages are ordered as follows:
4726	 * - PF init pages (not dumped)
4727	 * - PF work pages
4728	 * - For each VF in the PF:
4729	 *   - VF init pages (not dumped)
4730	 *   - VF work pages
4731	 */
4732	if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUT)) {
4733		/* Dump task PF pages */
4734		start_page_id = clients[ILT_CLI_CDUT].first.val +
4735		    cdut_pf_init_pages - pf_start_line;
4736		qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump,
4737					 start_page_id, cdut_pf_work_pages,
4738					 ilt_pages, dump_page_ids,
4739					 buf_size_in_dwords,
4740					 &actual_dump_size_in_dwords);
4741
4742		/* Dump task VF pages */
4743		start_page_id = clients[ILT_CLI_CDUT].first.val +
4744		    cdut_pf_pages + cdut_vf_init_pages - pf_start_line;
4745		for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
4746		     i++, start_page_id += cdut_vf_pages)
4747			qed_ilt_dump_pages_range(dump_buf, &offset,
4748						 &continue_dump, start_page_id,
4749						 cdut_vf_work_pages, ilt_pages,
4750						 dump_page_ids,
4751						 buf_size_in_dwords,
4752						 &actual_dump_size_in_dwords);
4753	}
4754
4755	/*Dump Searcher pages */
4756	if (clients[ILT_CLI_SRC].active) {
4757		start_page_id = clients[ILT_CLI_SRC].first.val - pf_start_line;
4758		src_pages = clients[ILT_CLI_SRC].last.val -
4759		    clients[ILT_CLI_SRC].first.val + 1;
4760		qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump,
4761					 start_page_id, src_pages, ilt_pages,
4762					 dump_page_ids, buf_size_in_dwords,
4763					 &actual_dump_size_in_dwords);
4764	}
4765
4766	/* Overwrite size param */
4767	if (update_size) {
4768		u32 section_size = (*dump == continue_dump) ?
4769		    offset - base_data_offset :
4770		    actual_dump_size_in_dwords - base_data_offset;
4771		if (section_size > 0)
4772			qed_dump_num_param(dump_buf + size_param_offset,
4773					   *dump, "size", section_size);
4774		else if ((section_size == 0) && (*dump != continue_dump))
4775			actual_dump_size_in_dwords -=
4776			    section_header_and_param_size;
4777	}
4778
4779	*dump = continue_dump;
4780	*given_offset = offset;
4781	*given_actual_dump_size_in_dwords = actual_dump_size_in_dwords;
4782
4783	return offset;
4784}
4785
4786/* Dumps a section containing the global parameters.
4787 * Part of ilt dump process
4788 * Returns the dumped size in dwords.
4789 */
4790static u32
4791qed_ilt_dump_dump_common_global_params(struct qed_hwfn *p_hwfn,
4792				       struct qed_ptt *p_ptt,
4793				       u32 *dump_buf,
4794				       bool dump,
4795				       u32 cduc_page_size,
4796				       u32 conn_ctx_size,
4797				       u32 cdut_page_size,
4798				       u32 *full_dump_size_param_offset,
4799				       u32 *actual_dump_size_param_offset)
4800{
4801	struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
4802	u32 offset = 0;
4803
4804	offset += qed_dump_common_global_params(p_hwfn, p_ptt,
4805						dump_buf + offset,
4806						dump, 30);
4807	offset += qed_dump_str_param(dump_buf + offset,
4808				     dump,
4809				     "dump-type", "ilt-dump");
4810	offset += qed_dump_num_param(dump_buf + offset,
4811				     dump,
4812				     "cduc-page-size",
4813				     cduc_page_size);
4814	offset += qed_dump_num_param(dump_buf + offset,
4815				     dump,
4816				     "cduc-first-page-id",
4817				     clients[ILT_CLI_CDUC].first.val);
4818	offset += qed_dump_num_param(dump_buf + offset,
4819				     dump,
4820				     "cduc-last-page-id",
4821				     clients[ILT_CLI_CDUC].last.val);
4822	offset += qed_dump_num_param(dump_buf + offset,
4823				     dump,
4824				     "cduc-num-pf-pages",
4825				     clients[ILT_CLI_CDUC].pf_total_lines);
4826	offset += qed_dump_num_param(dump_buf + offset,
4827				     dump,
4828				     "cduc-num-vf-pages",
4829				     clients[ILT_CLI_CDUC].vf_total_lines);
4830	offset += qed_dump_num_param(dump_buf + offset,
4831				     dump,
4832				     "max-conn-ctx-size",
4833				     conn_ctx_size);
4834	offset += qed_dump_num_param(dump_buf + offset,
4835				     dump,
4836				     "cdut-page-size",
4837				     cdut_page_size);
4838	offset += qed_dump_num_param(dump_buf + offset,
4839				     dump,
4840				     "cdut-first-page-id",
4841				     clients[ILT_CLI_CDUT].first.val);
4842	offset += qed_dump_num_param(dump_buf + offset,
4843				     dump,
4844				     "cdut-last-page-id",
4845				     clients[ILT_CLI_CDUT].last.val);
4846	offset += qed_dump_num_param(dump_buf + offset,
4847				     dump,
4848				     "cdut-num-pf-init-pages",
4849				     qed_get_cdut_num_pf_init_pages(p_hwfn));
4850	offset += qed_dump_num_param(dump_buf + offset,
4851				     dump,
4852				     "cdut-num-vf-init-pages",
4853				     qed_get_cdut_num_vf_init_pages(p_hwfn));
4854	offset += qed_dump_num_param(dump_buf + offset,
4855				     dump,
4856				     "cdut-num-pf-work-pages",
4857				     qed_get_cdut_num_pf_work_pages(p_hwfn));
4858	offset += qed_dump_num_param(dump_buf + offset,
4859				     dump,
4860				     "cdut-num-vf-work-pages",
4861				     qed_get_cdut_num_vf_work_pages(p_hwfn));
4862	offset += qed_dump_num_param(dump_buf + offset,
4863				     dump,
4864				     "max-task-ctx-size",
4865				     p_hwfn->p_cxt_mngr->task_ctx_size);
4866	offset += qed_dump_num_param(dump_buf + offset,
4867				     dump,
4868				     "first-vf-id-in-pf",
4869				     p_hwfn->p_cxt_mngr->first_vf_in_pf);
4870	offset += qed_dump_num_param(dump_buf + offset,
4871				     dump,
4872				     "num-vfs-in-pf",
4873				     p_hwfn->p_cxt_mngr->vf_count);
4874	offset += qed_dump_num_param(dump_buf + offset,
4875				     dump,
4876				     "ptr-size-bytes",
4877				     sizeof(void *));
4878	offset += qed_dump_num_param(dump_buf + offset,
4879				     dump,
4880				     "pf-start-line",
4881				     p_hwfn->p_cxt_mngr->pf_start_line);
4882	offset += qed_dump_num_param(dump_buf + offset,
4883				     dump,
4884				     "page-mem-desc-size-dwords",
4885				     PAGE_MEM_DESC_SIZE_DWORDS);
4886	offset += qed_dump_num_param(dump_buf + offset,
4887				     dump,
4888				     "ilt-shadow-size",
4889				     p_hwfn->p_cxt_mngr->ilt_shadow_size);
4890
4891	*full_dump_size_param_offset = offset;
4892
4893	offset += qed_dump_num_param(dump_buf + offset,
4894				     dump, "dump-size-full", 0);
4895
4896	*actual_dump_size_param_offset = offset;
4897
4898	offset += qed_dump_num_param(dump_buf + offset,
4899				     dump,
4900				     "dump-size-actual", 0);
4901	offset += qed_dump_num_param(dump_buf + offset,
4902				     dump,
4903				     "iscsi_task_pages",
4904				     p_hwfn->p_cxt_mngr->iscsi_task_pages);
4905	offset += qed_dump_num_param(dump_buf + offset,
4906				     dump,
4907				     "fcoe_task_pages",
4908				     p_hwfn->p_cxt_mngr->fcoe_task_pages);
4909	offset += qed_dump_num_param(dump_buf + offset,
4910				     dump,
4911				     "roce_task_pages",
4912				     p_hwfn->p_cxt_mngr->roce_task_pages);
4913	offset += qed_dump_num_param(dump_buf + offset,
4914				     dump,
4915				     "eth_task_pages",
4916				     p_hwfn->p_cxt_mngr->eth_task_pages);
4917	offset += qed_dump_num_param(dump_buf + offset,
4918				      dump,
4919				      "src-first-page-id",
4920				      clients[ILT_CLI_SRC].first.val);
4921	offset += qed_dump_num_param(dump_buf + offset,
4922				     dump,
4923				     "src-last-page-id",
4924				     clients[ILT_CLI_SRC].last.val);
4925	offset += qed_dump_num_param(dump_buf + offset,
4926				     dump,
4927				     "src-is-active",
4928				     clients[ILT_CLI_SRC].active);
4929
4930	/* Additional/Less parameters require matching of number in call to
4931	 * dump_common_global_params()
4932	 */
4933
4934	return offset;
4935}
4936
4937/* Dump section containing number of PF CIDs per connection type.
4938 * Part of ilt dump process.
4939 * Returns the dumped size in dwords.
4940 */
4941static u32 qed_ilt_dump_dump_num_pf_cids(struct qed_hwfn *p_hwfn,
4942					 u32 *dump_buf,
4943					 bool dump, u32 *valid_conn_pf_cids)
4944{
4945	u32 num_pf_cids = 0;
4946	u32 offset = 0;
4947	u8 conn_type;
4948
4949	offset += qed_dump_section_hdr(dump_buf + offset,
4950				       dump, "num_pf_cids_per_conn_type", 1);
4951	offset += qed_dump_num_param(dump_buf + offset,
4952				     dump, "size", NUM_OF_CONNECTION_TYPES);
4953	for (conn_type = 0, *valid_conn_pf_cids = 0;
4954	     conn_type < NUM_OF_CONNECTION_TYPES; conn_type++, offset++) {
4955		num_pf_cids = p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cid_count;
4956		if (dump)
4957			*(dump_buf + offset) = num_pf_cids;
4958		*valid_conn_pf_cids += num_pf_cids;
4959	}
4960
4961	return offset;
4962}
4963
4964/* Dump section containing number of VF CIDs per connection type
4965 * Part of ilt dump process.
4966 * Returns the dumped size in dwords.
4967 */
4968static u32 qed_ilt_dump_dump_num_vf_cids(struct qed_hwfn *p_hwfn,
4969					 u32 *dump_buf,
4970					 bool dump, u32 *valid_conn_vf_cids)
4971{
4972	u32 num_vf_cids = 0;
4973	u32 offset = 0;
4974	u8 conn_type;
4975
4976	offset += qed_dump_section_hdr(dump_buf + offset, dump,
4977				       "num_vf_cids_per_conn_type", 1);
4978	offset += qed_dump_num_param(dump_buf + offset,
4979				     dump, "size", NUM_OF_CONNECTION_TYPES);
4980	for (conn_type = 0, *valid_conn_vf_cids = 0;
4981	     conn_type < NUM_OF_CONNECTION_TYPES; conn_type++, offset++) {
4982		num_vf_cids =
4983		    p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cids_per_vf;
4984		if (dump)
4985			*(dump_buf + offset) = num_vf_cids;
4986		*valid_conn_vf_cids += num_vf_cids;
4987	}
4988
4989	return offset;
4990}
4991
4992/* Performs ILT Dump to the specified buffer.
4993 * buf_size_in_dwords - The dumped buffer size.
4994 * Returns the dumped size in dwords.
4995 */
4996static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
4997			struct qed_ptt *p_ptt,
4998			u32 *dump_buf, u32 buf_size_in_dwords, bool dump)
4999{
5000#if ((!defined VMWARE) && (!defined UEFI))
5001	struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
5002#endif
5003	u32 valid_conn_vf_cids = 0,
5004	    valid_conn_vf_pages, offset = 0, real_dumped_size = 0;
5005	u32 valid_conn_pf_cids = 0, valid_conn_pf_pages, num_pages;
5006	u32 num_cids_per_page, conn_ctx_size;
5007	u32 cduc_page_size, cdut_page_size;
5008	u32 actual_dump_size_in_dwords = 0;
5009	struct phys_mem_desc *ilt_pages;
5010	u32 actul_dump_off = 0;
5011	u32 last_section_size;
5012	u32 full_dump_off = 0;
5013	u32 section_size = 0;
5014	bool continue_dump;
5015	u32 page_id;
5016
5017	last_section_size = qed_dump_last_section(NULL, 0, false);
5018	cduc_page_size = 1 <<
5019	    (clients[ILT_CLI_CDUC].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
5020	cdut_page_size = 1 <<
5021	    (clients[ILT_CLI_CDUT].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
5022	conn_ctx_size = p_hwfn->p_cxt_mngr->conn_ctx_size;
5023	num_cids_per_page = (int)(cduc_page_size / conn_ctx_size);
5024	ilt_pages = p_hwfn->p_cxt_mngr->ilt_shadow;
5025	continue_dump = dump;
5026
5027	/* if need to dump then save memory for the last section
5028	 * (last section calculates CRC of dumped data)
5029	 */
5030	if (dump) {
5031		if (buf_size_in_dwords >= last_section_size) {
5032			buf_size_in_dwords -= last_section_size;
5033		} else {
5034			continue_dump = false;
5035			actual_dump_size_in_dwords = offset;
5036		}
5037	}
5038
5039	/* Dump global params */
5040
5041	/* if need to dump then first check that there is enough memory
5042	 * in dumped buffer for this section calculate the size of this
5043	 * section without dumping. if there is not enough memory - then
5044	 * stop the dumping.
5045	 */
5046	if (continue_dump) {
5047		section_size =
5048			qed_ilt_dump_dump_common_global_params(p_hwfn,
5049							       p_ptt,
5050							       NULL,
5051							       false,
5052							       cduc_page_size,
5053							       conn_ctx_size,
5054							       cdut_page_size,
5055							       &full_dump_off,
5056							       &actul_dump_off);
5057		if (offset + section_size > buf_size_in_dwords) {
5058			continue_dump = false;
5059			actual_dump_size_in_dwords = offset;
5060		}
5061	}
5062
5063	offset += qed_ilt_dump_dump_common_global_params(p_hwfn,
5064							 p_ptt,
5065							 dump_buf + offset,
5066							 continue_dump,
5067							 cduc_page_size,
5068							 conn_ctx_size,
5069							 cdut_page_size,
5070							 &full_dump_off,
5071							 &actul_dump_off);
5072
5073	/* Dump section containing number of PF CIDs per connection type
5074	 * If need to dump then first check that there is enough memory in
5075	 * dumped buffer for this section.
5076	 */
5077	if (continue_dump) {
5078		section_size =
5079			qed_ilt_dump_dump_num_pf_cids(p_hwfn,
5080						      NULL,
5081						      false,
5082						      &valid_conn_pf_cids);
5083		if (offset + section_size > buf_size_in_dwords) {
5084			continue_dump = false;
5085			actual_dump_size_in_dwords = offset;
5086		}
5087	}
5088
5089	offset += qed_ilt_dump_dump_num_pf_cids(p_hwfn,
5090						dump_buf + offset,
5091						continue_dump,
5092						&valid_conn_pf_cids);
5093
5094	/* Dump section containing number of VF CIDs per connection type
5095	 * If need to dump then first check that there is enough memory in
5096	 * dumped buffer for this section.
5097	 */
5098	if (continue_dump) {
5099		section_size =
5100			qed_ilt_dump_dump_num_vf_cids(p_hwfn,
5101						      NULL,
5102						      false,
5103						      &valid_conn_vf_cids);
5104		if (offset + section_size > buf_size_in_dwords) {
5105			continue_dump = false;
5106			actual_dump_size_in_dwords = offset;
5107		}
5108	}
5109
5110	offset += qed_ilt_dump_dump_num_vf_cids(p_hwfn,
5111						dump_buf + offset,
5112						continue_dump,
5113						&valid_conn_vf_cids);
5114
5115	/* Dump section containing physical memory descriptors for each
5116	 * ILT page.
5117	 */
5118	num_pages = p_hwfn->p_cxt_mngr->ilt_shadow_size;
5119
5120	/* If need to dump then first check that there is enough memory
5121	 * in dumped buffer for the section header.
5122	 */
5123	if (continue_dump) {
5124		section_size = qed_dump_section_hdr(NULL,
5125						    false,
5126						    "ilt_page_desc",
5127						    1) +
5128		    qed_dump_num_param(NULL,
5129				       false,
5130				       "size",
5131				       num_pages * PAGE_MEM_DESC_SIZE_DWORDS);
5132		if (offset + section_size > buf_size_in_dwords) {
5133			continue_dump = false;
5134			actual_dump_size_in_dwords = offset;
5135		}
5136	}
5137
5138	offset += qed_dump_section_hdr(dump_buf + offset,
5139				       continue_dump, "ilt_page_desc", 1);
5140	offset += qed_dump_num_param(dump_buf + offset,
5141				     continue_dump,
5142				     "size",
5143				     num_pages * PAGE_MEM_DESC_SIZE_DWORDS);
5144
5145	/* Copy memory descriptors to dump buffer
5146	 * If need to dump then dump till the dump buffer size
5147	 */
5148	if (continue_dump) {
5149		for (page_id = 0; page_id < num_pages;
5150		     page_id++, offset += PAGE_MEM_DESC_SIZE_DWORDS) {
5151			if (continue_dump &&
5152			    (offset + PAGE_MEM_DESC_SIZE_DWORDS <=
5153			     buf_size_in_dwords)) {
5154				memcpy(dump_buf + offset,
5155				       &ilt_pages[page_id],
5156				       DWORDS_TO_BYTES
5157				       (PAGE_MEM_DESC_SIZE_DWORDS));
5158			} else {
5159				if (continue_dump) {
5160					continue_dump = false;
5161					actual_dump_size_in_dwords = offset;
5162				}
5163			}
5164		}
5165	} else {
5166		offset += num_pages * PAGE_MEM_DESC_SIZE_DWORDS;
5167	}
5168
5169	valid_conn_pf_pages = DIV_ROUND_UP(valid_conn_pf_cids,
5170					   num_cids_per_page);
5171	valid_conn_vf_pages = DIV_ROUND_UP(valid_conn_vf_cids,
5172					   num_cids_per_page);
5173
5174	/* Dump ILT pages IDs */
5175	qed_ilt_dump_pages_section(p_hwfn, dump_buf, &offset, &continue_dump,
5176				   valid_conn_pf_pages, valid_conn_vf_pages,
5177				   ilt_pages, true, buf_size_in_dwords,
5178				   &actual_dump_size_in_dwords);
5179
5180	/* Dump ILT pages memory */
5181	qed_ilt_dump_pages_section(p_hwfn, dump_buf, &offset, &continue_dump,
5182				   valid_conn_pf_pages, valid_conn_vf_pages,
5183				   ilt_pages, false, buf_size_in_dwords,
5184				   &actual_dump_size_in_dwords);
5185
5186	real_dumped_size =
5187	    (continue_dump == dump) ? offset : actual_dump_size_in_dwords;
5188	qed_dump_num_param(dump_buf + full_dump_off, dump,
5189			   "full-dump-size", offset + last_section_size);
5190	qed_dump_num_param(dump_buf + actul_dump_off,
5191			   dump,
5192			   "actual-dump-size",
5193			   real_dumped_size + last_section_size);
5194
5195	/* Dump last section */
5196	real_dumped_size += qed_dump_last_section(dump_buf,
5197						  real_dumped_size, dump);
5198
5199	return real_dumped_size;
5200}
5201
5202/***************************** Public Functions *******************************/
5203
5204enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn,
5205				    const u8 * const bin_ptr)
5206{
5207	struct bin_buffer_hdr *buf_hdrs = (struct bin_buffer_hdr *)bin_ptr;
5208	u8 buf_id;
5209
5210	/* Convert binary data to debug arrays */
5211	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++)
5212		qed_set_dbg_bin_buf(p_hwfn,
5213				    buf_id,
5214				    (u32 *)(bin_ptr + buf_hdrs[buf_id].offset),
5215				    buf_hdrs[buf_id].length);
5216
5217	return DBG_STATUS_OK;
5218}
5219
5220static enum dbg_status qed_dbg_set_app_ver(u32 ver)
5221{
5222	if (ver < TOOLS_VERSION)
5223		return DBG_STATUS_UNSUPPORTED_APP_VERSION;
5224
5225	s_app_ver = ver;
5226
5227	return DBG_STATUS_OK;
5228}
5229
5230bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
5231		      struct qed_ptt *p_ptt, struct fw_info *fw_info)
5232{
5233	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5234	u8 storm_id;
5235
5236	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5237		struct storm_defs *storm = &s_storm_defs[storm_id];
5238
5239		/* Skip Storm if it's in reset */
5240		if (dev_data->block_in_reset[storm->sem_block_id])
5241			continue;
5242
5243		/* Read FW info for the current Storm */
5244		qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, fw_info);
5245
5246		return true;
5247	}
5248
5249	return false;
5250}
5251
5252enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
 
5253				   enum dbg_grc_params grc_param, u32 val)
5254{
5255	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5256	enum dbg_status status;
5257	int i;
5258
5259	DP_VERBOSE(p_hwfn,
5260		   QED_MSG_DEBUG,
5261		   "dbg_grc_config: paramId = %d, val = %d\n", grc_param, val);
5262
5263	status = qed_dbg_dev_init(p_hwfn);
5264	if (status != DBG_STATUS_OK)
5265		return status;
5266
5267	/* Initializes the GRC parameters (if not initialized). Needed in order
5268	 * to set the default parameter values for the first time.
5269	 */
5270	qed_dbg_grc_init_params(p_hwfn);
5271
5272	if (grc_param >= MAX_DBG_GRC_PARAMS)
5273		return DBG_STATUS_INVALID_ARGS;
5274	if (val < s_grc_param_defs[grc_param].min ||
5275	    val > s_grc_param_defs[grc_param].max)
5276		return DBG_STATUS_INVALID_ARGS;
5277
5278	if (s_grc_param_defs[grc_param].is_preset) {
5279		/* Preset param */
5280
5281		/* Disabling a preset is not allowed. Call
5282		 * dbg_grc_set_params_default instead.
5283		 */
5284		if (!val)
5285			return DBG_STATUS_INVALID_ARGS;
5286
5287		/* Update all params with the preset values */
5288		for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) {
5289			struct grc_param_defs *defs = &s_grc_param_defs[i];
5290			u32 preset_val;
 
5291			/* Skip persistent params */
5292			if (defs->is_persistent)
5293				continue;
5294
5295			/* Find preset value */
5296			if (grc_param == DBG_GRC_PARAM_EXCLUDE_ALL)
5297				preset_val =
5298				    defs->exclude_all_preset_val;
5299			else if (grc_param == DBG_GRC_PARAM_CRASH)
5300				preset_val =
5301				    defs->crash_preset_val[dev_data->chip_id];
5302			else
5303				return DBG_STATUS_INVALID_ARGS;
5304
5305			qed_grc_set_param(p_hwfn, i, preset_val);
 
5306		}
5307	} else {
5308		/* Regular param - set its value */
5309		qed_grc_set_param(p_hwfn, grc_param, val);
5310	}
5311
5312	return DBG_STATUS_OK;
5313}
5314
5315/* Assign default GRC param values */
5316void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
5317{
5318	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5319	u32 i;
5320
5321	for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
5322		if (!s_grc_param_defs[i].is_persistent)
5323			dev_data->grc.param_val[i] =
5324			    s_grc_param_defs[i].default_val[dev_data->chip_id];
5325}
5326
5327enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5328					      struct qed_ptt *p_ptt,
5329					      u32 *buf_size)
5330{
5331	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5332
5333	*buf_size = 0;
5334
5335	if (status != DBG_STATUS_OK)
5336		return status;
5337
5338	if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5339	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
5340	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
5341	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5342	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5343		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5344
5345	return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5346}
5347
5348enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
5349				 struct qed_ptt *p_ptt,
5350				 u32 *dump_buf,
5351				 u32 buf_size_in_dwords,
5352				 u32 *num_dumped_dwords)
5353{
5354	u32 needed_buf_size_in_dwords;
5355	enum dbg_status status;
5356
5357	*num_dumped_dwords = 0;
5358
5359	status = qed_dbg_grc_get_dump_buf_size(p_hwfn,
5360					       p_ptt,
5361					       &needed_buf_size_in_dwords);
5362	if (status != DBG_STATUS_OK)
5363		return status;
5364
5365	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5366		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5367
5368	/* Doesn't do anything, needed for compile time asserts */
5369	qed_static_asserts();
5370
5371	/* GRC Dump */
5372	status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
5373
5374	/* Revert GRC params to their default */
5375	qed_dbg_grc_set_params_default(p_hwfn);
5376
5377	return status;
5378}
5379
5380enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5381						   struct qed_ptt *p_ptt,
5382						   u32 *buf_size)
5383{
5384	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5385	struct idle_chk_data *idle_chk = &dev_data->idle_chk;
5386	enum dbg_status status;
5387
 
5388	*buf_size = 0;
5389
5390	status = qed_dbg_dev_init(p_hwfn);
5391	if (status != DBG_STATUS_OK)
5392		return status;
5393
5394	if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5395	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
5396	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
5397	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
5398		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5399
5400	if (!idle_chk->buf_size_set) {
5401		idle_chk->buf_size = qed_idle_chk_dump(p_hwfn,
5402						       p_ptt, NULL, false);
5403		idle_chk->buf_size_set = true;
5404	}
5405
5406	*buf_size = idle_chk->buf_size;
5407
5408	return DBG_STATUS_OK;
5409}
5410
5411enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
5412				      struct qed_ptt *p_ptt,
5413				      u32 *dump_buf,
5414				      u32 buf_size_in_dwords,
5415				      u32 *num_dumped_dwords)
5416{
5417	u32 needed_buf_size_in_dwords;
5418	enum dbg_status status;
5419
5420	*num_dumped_dwords = 0;
5421
5422	status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn,
5423						    p_ptt,
5424						    &needed_buf_size_in_dwords);
5425	if (status != DBG_STATUS_OK)
5426		return status;
5427
5428	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5429		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5430
5431	/* Update reset state */
5432	qed_grc_unreset_blocks(p_hwfn, p_ptt, true);
5433	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5434
5435	/* Idle Check Dump */
5436	*num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
5437
5438	/* Revert GRC params to their default */
5439	qed_dbg_grc_set_params_default(p_hwfn);
5440
5441	return DBG_STATUS_OK;
5442}
5443
5444enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5445						    struct qed_ptt *p_ptt,
5446						    u32 *buf_size)
5447{
5448	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5449
5450	*buf_size = 0;
5451
5452	if (status != DBG_STATUS_OK)
5453		return status;
5454
5455	return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5456}
5457
5458enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
5459				       struct qed_ptt *p_ptt,
5460				       u32 *dump_buf,
5461				       u32 buf_size_in_dwords,
5462				       u32 *num_dumped_dwords)
5463{
5464	u32 needed_buf_size_in_dwords;
5465	enum dbg_status status;
5466
5467	status =
5468		qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
5469						    p_ptt,
5470						    &needed_buf_size_in_dwords);
5471	if (status != DBG_STATUS_OK && status !=
5472	    DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
5473		return status;
5474
5475	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5476		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5477
5478	/* Update reset state */
5479	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5480
5481	/* Perform dump */
5482	status = qed_mcp_trace_dump(p_hwfn,
5483				    p_ptt, dump_buf, true, num_dumped_dwords);
5484
5485	/* Revert GRC params to their default */
5486	qed_dbg_grc_set_params_default(p_hwfn);
5487
5488	return status;
5489}
5490
5491enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5492						   struct qed_ptt *p_ptt,
5493						   u32 *buf_size)
5494{
5495	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5496
5497	*buf_size = 0;
5498
5499	if (status != DBG_STATUS_OK)
5500		return status;
5501
5502	return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5503}
5504
5505enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
5506				      struct qed_ptt *p_ptt,
5507				      u32 *dump_buf,
5508				      u32 buf_size_in_dwords,
5509				      u32 *num_dumped_dwords)
5510{
5511	u32 needed_buf_size_in_dwords;
5512	enum dbg_status status;
5513
5514	*num_dumped_dwords = 0;
5515
5516	status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
5517						    p_ptt,
5518						    &needed_buf_size_in_dwords);
5519	if (status != DBG_STATUS_OK)
5520		return status;
5521
5522	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5523		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5524
5525	/* Update reset state */
5526	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5527
5528	status = qed_reg_fifo_dump(p_hwfn,
5529				   p_ptt, dump_buf, true, num_dumped_dwords);
5530
5531	/* Revert GRC params to their default */
5532	qed_dbg_grc_set_params_default(p_hwfn);
5533
5534	return status;
5535}
5536
5537enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5538						   struct qed_ptt *p_ptt,
5539						   u32 *buf_size)
5540{
5541	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5542
5543	*buf_size = 0;
5544
5545	if (status != DBG_STATUS_OK)
5546		return status;
5547
5548	return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5549}
5550
5551enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
5552				      struct qed_ptt *p_ptt,
5553				      u32 *dump_buf,
5554				      u32 buf_size_in_dwords,
5555				      u32 *num_dumped_dwords)
5556{
5557	u32 needed_buf_size_in_dwords;
5558	enum dbg_status status;
5559
5560	*num_dumped_dwords = 0;
5561
5562	status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
5563						    p_ptt,
5564						    &needed_buf_size_in_dwords);
5565	if (status != DBG_STATUS_OK)
5566		return status;
5567
5568	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5569		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5570
5571	/* Update reset state */
5572	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5573
5574	status = qed_igu_fifo_dump(p_hwfn,
5575				   p_ptt, dump_buf, true, num_dumped_dwords);
5576	/* Revert GRC params to their default */
5577	qed_dbg_grc_set_params_default(p_hwfn);
5578
5579	return status;
5580}
5581
5582enum dbg_status
5583qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5584					      struct qed_ptt *p_ptt,
5585					      u32 *buf_size)
5586{
5587	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5588
5589	*buf_size = 0;
5590
5591	if (status != DBG_STATUS_OK)
5592		return status;
5593
5594	return qed_protection_override_dump(p_hwfn,
5595					    p_ptt, NULL, false, buf_size);
5596}
5597
5598enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
5599						 struct qed_ptt *p_ptt,
5600						 u32 *dump_buf,
5601						 u32 buf_size_in_dwords,
5602						 u32 *num_dumped_dwords)
5603{
5604	u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5605	enum dbg_status status;
5606
5607	*num_dumped_dwords = 0;
5608
5609	status =
5610		qed_dbg_protection_override_get_dump_buf_size(p_hwfn,
5611							      p_ptt,
5612							      p_size);
5613	if (status != DBG_STATUS_OK)
5614		return status;
5615
5616	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5617		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5618
5619	/* Update reset state */
5620	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5621
5622	status = qed_protection_override_dump(p_hwfn,
5623					      p_ptt,
5624					      dump_buf,
5625					      true, num_dumped_dwords);
5626
5627	/* Revert GRC params to their default */
5628	qed_dbg_grc_set_params_default(p_hwfn);
5629
5630	return status;
5631}
5632
5633enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5634						     struct qed_ptt *p_ptt,
5635						     u32 *buf_size)
5636{
5637	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5638
5639	*buf_size = 0;
5640
5641	if (status != DBG_STATUS_OK)
5642		return status;
5643
5644	/* Update reset state */
5645	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5646
5647	*buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false);
5648
5649	return DBG_STATUS_OK;
5650}
5651
5652enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
5653					struct qed_ptt *p_ptt,
5654					u32 *dump_buf,
5655					u32 buf_size_in_dwords,
5656					u32 *num_dumped_dwords)
5657{
5658	u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5659	enum dbg_status status;
5660
5661	*num_dumped_dwords = 0;
5662
5663	status =
5664		qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
5665						     p_ptt,
5666						     p_size);
5667	if (status != DBG_STATUS_OK)
5668		return status;
5669
5670	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5671		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5672
5673	*num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
5674
5675	/* Revert GRC params to their default */
5676	qed_dbg_grc_set_params_default(p_hwfn);
5677
5678	return DBG_STATUS_OK;
5679}
5680
5681static enum dbg_status qed_dbg_ilt_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5682						     struct qed_ptt *p_ptt,
5683						     u32 *buf_size)
5684{
5685	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5686
5687	*buf_size = 0;
5688
5689	if (status != DBG_STATUS_OK)
5690		return status;
5691
5692	*buf_size = qed_ilt_dump(p_hwfn, p_ptt, NULL, 0, false);
5693
5694	return DBG_STATUS_OK;
5695}
5696
5697static enum dbg_status qed_dbg_ilt_dump(struct qed_hwfn *p_hwfn,
5698					struct qed_ptt *p_ptt,
5699					u32 *dump_buf,
5700					u32 buf_size_in_dwords,
5701					u32 *num_dumped_dwords)
5702{
5703	*num_dumped_dwords = qed_ilt_dump(p_hwfn,
5704					  p_ptt,
5705					  dump_buf, buf_size_in_dwords, true);
5706
5707	/* Reveret GRC params to their default */
5708	qed_dbg_grc_set_params_default(p_hwfn);
5709
5710	return DBG_STATUS_OK;
5711}
5712
5713enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
5714				  struct qed_ptt *p_ptt,
5715				  enum block_id block_id,
5716				  enum dbg_attn_type attn_type,
5717				  bool clear_status,
5718				  struct dbg_attn_block_result *results)
5719{
5720	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5721	u8 reg_idx, num_attn_regs, num_result_regs = 0;
5722	const struct dbg_attn_reg *attn_reg_arr;
5723
5724	if (status != DBG_STATUS_OK)
5725		return status;
5726
5727	if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5728	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5729	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5730		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5731
5732	attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
5733					       block_id,
5734					       attn_type, &num_attn_regs);
5735
5736	for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
5737		const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
5738		struct dbg_attn_reg_result *reg_result;
5739		u32 sts_addr, sts_val;
5740		u16 modes_buf_offset;
5741		bool eval_mode;
5742
5743		/* Check mode */
5744		eval_mode = GET_FIELD(reg_data->mode.data,
5745				      DBG_MODE_HDR_EVAL_MODE) > 0;
5746		modes_buf_offset = GET_FIELD(reg_data->mode.data,
5747					     DBG_MODE_HDR_MODES_BUF_OFFSET);
5748		if (eval_mode && !qed_is_mode_match(p_hwfn, &modes_buf_offset))
5749			continue;
5750
5751		/* Mode match - read attention status register */
5752		sts_addr = DWORDS_TO_BYTES(clear_status ?
5753					   reg_data->sts_clr_address :
5754					   GET_FIELD(reg_data->data,
5755						     DBG_ATTN_REG_STS_ADDRESS));
5756		sts_val = qed_rd(p_hwfn, p_ptt, sts_addr);
5757		if (!sts_val)
5758			continue;
5759
5760		/* Non-zero attention status - add to results */
5761		reg_result = &results->reg_results[num_result_regs];
5762		SET_FIELD(reg_result->data,
5763			  DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr);
5764		SET_FIELD(reg_result->data,
5765			  DBG_ATTN_REG_RESULT_NUM_REG_ATTN,
5766			  GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN));
5767		reg_result->block_attn_offset = reg_data->block_attn_offset;
5768		reg_result->sts_val = sts_val;
5769		reg_result->mask_val = qed_rd(p_hwfn,
5770					      p_ptt,
5771					      DWORDS_TO_BYTES
5772					      (reg_data->mask_address));
5773		num_result_regs++;
5774	}
5775
5776	results->block_id = (u8)block_id;
5777	results->names_offset =
5778	    qed_get_block_attn_data(p_hwfn, block_id, attn_type)->names_offset;
5779	SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
5780	SET_FIELD(results->data,
5781		  DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
5782
5783	return DBG_STATUS_OK;
5784}
5785
5786/******************************* Data Types **********************************/
5787
 
 
 
 
 
5788/* REG fifo element */
5789struct reg_fifo_element {
5790	u64 data;
5791#define REG_FIFO_ELEMENT_ADDRESS_SHIFT		0
5792#define REG_FIFO_ELEMENT_ADDRESS_MASK		0x7fffff
5793#define REG_FIFO_ELEMENT_ACCESS_SHIFT		23
5794#define REG_FIFO_ELEMENT_ACCESS_MASK		0x1
5795#define REG_FIFO_ELEMENT_PF_SHIFT		24
5796#define REG_FIFO_ELEMENT_PF_MASK		0xf
5797#define REG_FIFO_ELEMENT_VF_SHIFT		28
5798#define REG_FIFO_ELEMENT_VF_MASK		0xff
5799#define REG_FIFO_ELEMENT_PORT_SHIFT		36
5800#define REG_FIFO_ELEMENT_PORT_MASK		0x3
5801#define REG_FIFO_ELEMENT_PRIVILEGE_SHIFT	38
5802#define REG_FIFO_ELEMENT_PRIVILEGE_MASK		0x3
5803#define REG_FIFO_ELEMENT_PROTECTION_SHIFT	40
5804#define REG_FIFO_ELEMENT_PROTECTION_MASK	0x7
5805#define REG_FIFO_ELEMENT_MASTER_SHIFT		43
5806#define REG_FIFO_ELEMENT_MASTER_MASK		0xf
5807#define REG_FIFO_ELEMENT_ERROR_SHIFT		47
5808#define REG_FIFO_ELEMENT_ERROR_MASK		0x1f
5809};
5810
5811/* REG fifo error element */
5812struct reg_fifo_err {
5813	u32 err_code;
5814	const char *err_msg;
5815};
5816
5817/* IGU fifo element */
5818struct igu_fifo_element {
5819	u32 dword0;
5820#define IGU_FIFO_ELEMENT_DWORD0_FID_SHIFT		0
5821#define IGU_FIFO_ELEMENT_DWORD0_FID_MASK		0xff
5822#define IGU_FIFO_ELEMENT_DWORD0_IS_PF_SHIFT		8
5823#define IGU_FIFO_ELEMENT_DWORD0_IS_PF_MASK		0x1
5824#define IGU_FIFO_ELEMENT_DWORD0_SOURCE_SHIFT		9
5825#define IGU_FIFO_ELEMENT_DWORD0_SOURCE_MASK		0xf
5826#define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_SHIFT		13
5827#define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_MASK		0xf
5828#define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_SHIFT		17
5829#define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_MASK		0x7fff
5830	u32 dword1;
5831	u32 dword2;
5832#define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_SHIFT	0
5833#define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_MASK		0x1
5834#define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_SHIFT		1
5835#define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_MASK		0xffffffff
5836	u32 reserved;
5837};
5838
5839struct igu_fifo_wr_data {
5840	u32 data;
5841#define IGU_FIFO_WR_DATA_PROD_CONS_SHIFT		0
5842#define IGU_FIFO_WR_DATA_PROD_CONS_MASK			0xffffff
5843#define IGU_FIFO_WR_DATA_UPDATE_FLAG_SHIFT		24
5844#define IGU_FIFO_WR_DATA_UPDATE_FLAG_MASK		0x1
5845#define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_SHIFT	25
5846#define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_MASK		0x3
5847#define IGU_FIFO_WR_DATA_SEGMENT_SHIFT			27
5848#define IGU_FIFO_WR_DATA_SEGMENT_MASK			0x1
5849#define IGU_FIFO_WR_DATA_TIMER_MASK_SHIFT		28
5850#define IGU_FIFO_WR_DATA_TIMER_MASK_MASK		0x1
5851#define IGU_FIFO_WR_DATA_CMD_TYPE_SHIFT			31
5852#define IGU_FIFO_WR_DATA_CMD_TYPE_MASK			0x1
5853};
5854
5855struct igu_fifo_cleanup_wr_data {
5856	u32 data;
5857#define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_SHIFT		0
5858#define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_MASK		0x7ffffff
5859#define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_SHIFT	27
5860#define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_MASK	0x1
5861#define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_SHIFT	28
5862#define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_MASK	0x7
5863#define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_SHIFT		31
5864#define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_MASK		0x1
5865};
5866
5867/* Protection override element */
5868struct protection_override_element {
5869	u64 data;
5870#define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_SHIFT		0
5871#define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_MASK		0x7fffff
5872#define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_SHIFT		23
5873#define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_MASK		0xffffff
5874#define PROTECTION_OVERRIDE_ELEMENT_READ_SHIFT			47
5875#define PROTECTION_OVERRIDE_ELEMENT_READ_MASK			0x1
5876#define PROTECTION_OVERRIDE_ELEMENT_WRITE_SHIFT			48
5877#define PROTECTION_OVERRIDE_ELEMENT_WRITE_MASK			0x1
5878#define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_SHIFT	49
5879#define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_MASK	0x7
5880#define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_SHIFT	52
5881#define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_MASK	0x7
5882};
5883
5884enum igu_fifo_sources {
5885	IGU_SRC_PXP0,
5886	IGU_SRC_PXP1,
5887	IGU_SRC_PXP2,
5888	IGU_SRC_PXP3,
5889	IGU_SRC_PXP4,
5890	IGU_SRC_PXP5,
5891	IGU_SRC_PXP6,
5892	IGU_SRC_PXP7,
5893	IGU_SRC_CAU,
5894	IGU_SRC_ATTN,
5895	IGU_SRC_GRC
5896};
5897
5898enum igu_fifo_addr_types {
5899	IGU_ADDR_TYPE_MSIX_MEM,
5900	IGU_ADDR_TYPE_WRITE_PBA,
5901	IGU_ADDR_TYPE_WRITE_INT_ACK,
5902	IGU_ADDR_TYPE_WRITE_ATTN_BITS,
5903	IGU_ADDR_TYPE_READ_INT,
5904	IGU_ADDR_TYPE_WRITE_PROD_UPDATE,
5905	IGU_ADDR_TYPE_RESERVED
5906};
5907
5908struct igu_fifo_addr_data {
5909	u16 start_addr;
5910	u16 end_addr;
5911	char *desc;
5912	char *vf_desc;
5913	enum igu_fifo_addr_types type;
5914};
5915
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5916/******************************** Constants **********************************/
5917
5918#define MAX_MSG_LEN				1024
5919
5920#define MCP_TRACE_MAX_MODULE_LEN		8
5921#define MCP_TRACE_FORMAT_MAX_PARAMS		3
5922#define MCP_TRACE_FORMAT_PARAM_WIDTH \
5923	(MCP_TRACE_FORMAT_P2_SIZE_OFFSET - MCP_TRACE_FORMAT_P1_SIZE_OFFSET)
5924
5925#define REG_FIFO_ELEMENT_ADDR_FACTOR		4
5926#define REG_FIFO_ELEMENT_IS_PF_VF_VAL		127
5927
5928#define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR	4
5929
5930/***************************** Constant Arrays *******************************/
5931
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5932/* Status string array */
5933static const char * const s_status_str[] = {
5934	/* DBG_STATUS_OK */
5935	"Operation completed successfully",
5936
5937	/* DBG_STATUS_APP_VERSION_NOT_SET */
5938	"Debug application version wasn't set",
5939
5940	/* DBG_STATUS_UNSUPPORTED_APP_VERSION */
5941	"Unsupported debug application version",
5942
5943	/* DBG_STATUS_DBG_BLOCK_NOT_RESET */
5944	"The debug block wasn't reset since the last recording",
5945
5946	/* DBG_STATUS_INVALID_ARGS */
5947	"Invalid arguments",
5948
5949	/* DBG_STATUS_OUTPUT_ALREADY_SET */
5950	"The debug output was already set",
5951
5952	/* DBG_STATUS_INVALID_PCI_BUF_SIZE */
5953	"Invalid PCI buffer size",
5954
5955	/* DBG_STATUS_PCI_BUF_ALLOC_FAILED */
5956	"PCI buffer allocation failed",
5957
5958	/* DBG_STATUS_PCI_BUF_NOT_ALLOCATED */
5959	"A PCI buffer wasn't allocated",
5960
5961	/* DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS */
5962	"The filter/trigger constraint dword offsets are not enabled for recording",
5963	/* DBG_STATUS_NO_MATCHING_FRAMING_MODE */
5964	"No matching framing mode",
 
5965
5966	/* DBG_STATUS_VFC_READ_ERROR */
5967	"Error reading from VFC",
5968
5969	/* DBG_STATUS_STORM_ALREADY_ENABLED */
5970	"The Storm was already enabled",
5971
5972	/* DBG_STATUS_STORM_NOT_ENABLED */
5973	"The specified Storm wasn't enabled",
5974
5975	/* DBG_STATUS_BLOCK_ALREADY_ENABLED */
5976	"The block was already enabled",
5977
5978	/* DBG_STATUS_BLOCK_NOT_ENABLED */
5979	"The specified block wasn't enabled",
5980
5981	/* DBG_STATUS_NO_INPUT_ENABLED */
5982	"No input was enabled for recording",
5983
5984	/* DBG_STATUS_NO_FILTER_TRIGGER_256B */
5985	"Filters and triggers are not allowed in E4 256-bit mode",
5986
5987	/* DBG_STATUS_FILTER_ALREADY_ENABLED */
5988	"The filter was already enabled",
5989
5990	/* DBG_STATUS_TRIGGER_ALREADY_ENABLED */
5991	"The trigger was already enabled",
5992
5993	/* DBG_STATUS_TRIGGER_NOT_ENABLED */
5994	"The trigger wasn't enabled",
5995
5996	/* DBG_STATUS_CANT_ADD_CONSTRAINT */
5997	"A constraint can be added only after a filter was enabled or a trigger state was added",
5998
5999	/* DBG_STATUS_TOO_MANY_TRIGGER_STATES */
6000	"Cannot add more than 3 trigger states",
6001
6002	/* DBG_STATUS_TOO_MANY_CONSTRAINTS */
6003	"Cannot add more than 4 constraints per filter or trigger state",
6004
6005	/* DBG_STATUS_RECORDING_NOT_STARTED */
6006	"The recording wasn't started",
6007
6008	/* DBG_STATUS_DATA_DIDNT_TRIGGER */
6009	"A trigger was configured, but it didn't trigger",
6010
6011	/* DBG_STATUS_NO_DATA_RECORDED */
6012	"No data was recorded",
6013
6014	/* DBG_STATUS_DUMP_BUF_TOO_SMALL */
6015	"Dump buffer is too small",
6016
6017	/* DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED */
6018	"Dumped data is not aligned to chunks",
6019
6020	/* DBG_STATUS_UNKNOWN_CHIP */
6021	"Unknown chip",
6022
6023	/* DBG_STATUS_VIRT_MEM_ALLOC_FAILED */
6024	"Failed allocating virtual memory",
6025
6026	/* DBG_STATUS_BLOCK_IN_RESET */
6027	"The input block is in reset",
6028
6029	/* DBG_STATUS_INVALID_TRACE_SIGNATURE */
6030	"Invalid MCP trace signature found in NVRAM",
6031
6032	/* DBG_STATUS_INVALID_NVRAM_BUNDLE */
6033	"Invalid bundle ID found in NVRAM",
6034
6035	/* DBG_STATUS_NVRAM_GET_IMAGE_FAILED */
6036	"Failed getting NVRAM image",
6037
6038	/* DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE */
6039	"NVRAM image is not dword-aligned",
6040
6041	/* DBG_STATUS_NVRAM_READ_FAILED */
6042	"Failed reading from NVRAM",
6043
6044	/* DBG_STATUS_IDLE_CHK_PARSE_FAILED */
6045	"Idle check parsing failed",
6046
6047	/* DBG_STATUS_MCP_TRACE_BAD_DATA */
6048	"MCP Trace data is corrupt",
6049
6050	/* DBG_STATUS_MCP_TRACE_NO_META */
6051	"Dump doesn't contain meta data - it must be provided in image file",
6052
6053	/* DBG_STATUS_MCP_COULD_NOT_HALT */
6054	"Failed to halt MCP",
6055
6056	/* DBG_STATUS_MCP_COULD_NOT_RESUME */
6057	"Failed to resume MCP after halt",
6058
6059	/* DBG_STATUS_RESERVED0 */
6060	"",
6061
6062	/* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */
6063	"Failed to empty SEMI sync FIFO",
6064
6065	/* DBG_STATUS_IGU_FIFO_BAD_DATA */
6066	"IGU FIFO data is corrupt",
6067
6068	/* DBG_STATUS_MCP_COULD_NOT_MASK_PRTY */
6069	"MCP failed to mask parities",
6070
6071	/* DBG_STATUS_FW_ASSERTS_PARSE_FAILED */
6072	"FW Asserts parsing failed",
6073
6074	/* DBG_STATUS_REG_FIFO_BAD_DATA */
6075	"GRC FIFO data is corrupt",
6076
6077	/* DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA */
6078	"Protection Override data is corrupt",
6079
6080	/* DBG_STATUS_DBG_ARRAY_NOT_SET */
6081	"Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
6082
6083	/* DBG_STATUS_RESERVED1 */
6084	"",
6085
6086	/* DBG_STATUS_NON_MATCHING_LINES */
6087	"Non-matching debug lines - in E4, all lines must be of the same type (either 128b or 256b)",
6088
6089	/* DBG_STATUS_INSUFFICIENT_HW_IDS */
6090	"Insufficient HW IDs. Try to record less Storms/blocks",
6091
6092	/* DBG_STATUS_DBG_BUS_IN_USE */
6093	"The debug bus is in use",
6094
6095	/* DBG_STATUS_INVALID_STORM_DBG_MODE */
6096	"The storm debug mode is not supported in the current chip",
6097
6098	/* DBG_STATUS_OTHER_ENGINE_BB_ONLY */
6099	"Other engine is supported only in BB",
6100
6101	/* DBG_STATUS_FILTER_SINGLE_HW_ID */
6102	"The configured filter mode requires a single Storm/block input",
6103
6104	/* DBG_STATUS_TRIGGER_SINGLE_HW_ID */
6105	"The configured filter mode requires that all the constraints of a single trigger state will be defined on a single Storm/block input",
6106
6107	/* DBG_STATUS_MISSING_TRIGGER_STATE_STORM */
6108	"When triggering on Storm data, the Storm to trigger on must be specified",
6109
6110	/* DBG_STATUS_MDUMP2_FAILED_TO_REQUEST_OFFSIZE */
6111	"Failed to request MDUMP2 Offsize",
6112
6113	/* DBG_STATUS_MDUMP2_FAILED_VALIDATION_OF_DATA_CRC */
6114	"Expected CRC (part of the MDUMP2 data) is different than the calculated CRC over that data",
6115
6116	/* DBG_STATUS_MDUMP2_INVALID_SIGNATURE */
6117	"Invalid Signature found at start of MDUMP2",
6118
6119	/* DBG_STATUS_MDUMP2_INVALID_LOG_SIZE */
6120	"Invalid Log Size of MDUMP2",
6121
6122	/* DBG_STATUS_MDUMP2_INVALID_LOG_HDR */
6123	"Invalid Log Header of MDUMP2",
6124
6125	/* DBG_STATUS_MDUMP2_INVALID_LOG_DATA */
6126	"Invalid Log Data of MDUMP2",
6127
6128	/* DBG_STATUS_MDUMP2_ERROR_EXTRACTING_NUM_PORTS */
6129	"Could not extract number of ports from regval buf of MDUMP2",
6130
6131	/* DBG_STATUS_MDUMP2_ERROR_EXTRACTING_MFW_STATUS */
6132	"Could not extract MFW (link) status from regval buf of MDUMP2",
6133
6134	/* DBG_STATUS_MDUMP2_ERROR_DISPLAYING_LINKDUMP */
6135	"Could not display linkdump of MDUMP2",
6136
6137	/* DBG_STATUS_MDUMP2_ERROR_READING_PHY_CFG */
6138	"Could not read PHY CFG of MDUMP2",
6139
6140	/* DBG_STATUS_MDUMP2_ERROR_READING_PLL_MODE */
6141	"Could not read PLL Mode of MDUMP2",
6142
6143	/* DBG_STATUS_MDUMP2_ERROR_READING_LANE_REGS */
6144	"Could not read TSCF/TSCE Lane Regs of MDUMP2",
6145
6146	/* DBG_STATUS_MDUMP2_ERROR_ALLOCATING_BUF */
6147	"Could not allocate MDUMP2 reg-val internal buffer"
6148};
6149
6150/* Idle check severity names array */
6151static const char * const s_idle_chk_severity_str[] = {
6152	"Error",
6153	"Error if no traffic",
6154	"Warning"
6155};
6156
6157/* MCP Trace level names array */
6158static const char * const s_mcp_trace_level_str[] = {
6159	"ERROR",
6160	"TRACE",
6161	"DEBUG"
6162};
6163
6164/* Access type names array */
6165static const char * const s_access_strs[] = {
6166	"read",
6167	"write"
6168};
6169
6170/* Privilege type names array */
6171static const char * const s_privilege_strs[] = {
6172	"VF",
6173	"PDA",
6174	"HV",
6175	"UA"
6176};
6177
6178/* Protection type names array */
6179static const char * const s_protection_strs[] = {
6180	"(default)",
6181	"(default)",
6182	"(default)",
6183	"(default)",
6184	"override VF",
6185	"override PDA",
6186	"override HV",
6187	"override UA"
6188};
6189
6190/* Master type names array */
6191static const char * const s_master_strs[] = {
6192	"???",
6193	"pxp",
6194	"mcp",
6195	"msdm",
6196	"psdm",
6197	"ysdm",
6198	"usdm",
6199	"tsdm",
6200	"xsdm",
6201	"dbu",
6202	"dmae",
6203	"jdap",
6204	"???",
6205	"???",
6206	"???",
6207	"???"
6208};
6209
6210/* REG FIFO error messages array */
6211static struct reg_fifo_err s_reg_fifo_errors[] = {
6212	{1, "grc timeout"},
6213	{2, "address doesn't belong to any block"},
6214	{4, "reserved address in block or write to read-only address"},
6215	{8, "privilege/protection mismatch"},
6216	{16, "path isolation error"},
6217	{17, "RSL error"}
6218};
6219
6220/* IGU FIFO sources array */
6221static const char * const s_igu_fifo_source_strs[] = {
6222	"TSTORM",
6223	"MSTORM",
6224	"USTORM",
6225	"XSTORM",
6226	"YSTORM",
6227	"PSTORM",
6228	"PCIE",
6229	"NIG_QM_PBF",
6230	"CAU",
6231	"ATTN",
6232	"GRC",
6233};
6234
6235/* IGU FIFO error messages */
6236static const char * const s_igu_fifo_error_strs[] = {
6237	"no error",
6238	"length error",
6239	"function disabled",
6240	"VF sent command to attention address",
6241	"host sent prod update command",
6242	"read of during interrupt register while in MIMD mode",
6243	"access to PXP BAR reserved address",
6244	"producer update command to attention index",
6245	"unknown error",
6246	"SB index not valid",
6247	"SB relative index and FID not found",
6248	"FID not match",
6249	"command with error flag asserted (PCI error or CAU discard)",
6250	"VF sent cleanup and RF cleanup is disabled",
6251	"cleanup command on type bigger than 4"
6252};
6253
6254/* IGU FIFO address data */
6255static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
6256	{0x0, 0x101, "MSI-X Memory", NULL,
6257	 IGU_ADDR_TYPE_MSIX_MEM},
6258	{0x102, 0x1ff, "reserved", NULL,
6259	 IGU_ADDR_TYPE_RESERVED},
6260	{0x200, 0x200, "Write PBA[0:63]", NULL,
6261	 IGU_ADDR_TYPE_WRITE_PBA},
6262	{0x201, 0x201, "Write PBA[64:127]", "reserved",
6263	 IGU_ADDR_TYPE_WRITE_PBA},
6264	{0x202, 0x202, "Write PBA[128]", "reserved",
6265	 IGU_ADDR_TYPE_WRITE_PBA},
6266	{0x203, 0x3ff, "reserved", NULL,
6267	 IGU_ADDR_TYPE_RESERVED},
6268	{0x400, 0x5ef, "Write interrupt acknowledgment", NULL,
6269	 IGU_ADDR_TYPE_WRITE_INT_ACK},
6270	{0x5f0, 0x5f0, "Attention bits update", NULL,
6271	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6272	{0x5f1, 0x5f1, "Attention bits set", NULL,
6273	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6274	{0x5f2, 0x5f2, "Attention bits clear", NULL,
6275	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6276	{0x5f3, 0x5f3, "Read interrupt 0:63 with mask", NULL,
6277	 IGU_ADDR_TYPE_READ_INT},
6278	{0x5f4, 0x5f4, "Read interrupt 0:31 with mask", NULL,
6279	 IGU_ADDR_TYPE_READ_INT},
6280	{0x5f5, 0x5f5, "Read interrupt 32:63 with mask", NULL,
6281	 IGU_ADDR_TYPE_READ_INT},
6282	{0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL,
6283	 IGU_ADDR_TYPE_READ_INT},
6284	{0x5f7, 0x5ff, "reserved", NULL,
6285	 IGU_ADDR_TYPE_RESERVED},
6286	{0x600, 0x7ff, "Producer update", NULL,
6287	 IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
6288};
6289
6290/******************************** Variables **********************************/
6291
6292/* Temporary buffer, used for print size calculations */
6293static char s_temp_buf[MAX_MSG_LEN];
6294
6295/**************************** Private Functions ******************************/
6296
6297static void qed_user_static_asserts(void)
6298{
6299}
6300
6301static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
6302{
6303	return (a + b) % size;
6304}
6305
6306static u32 qed_cyclic_sub(u32 a, u32 b, u32 size)
6307{
6308	return (size + a - b) % size;
6309}
6310
6311/* Reads the specified number of bytes from the specified cyclic buffer (up to 4
6312 * bytes) and returns them as a dword value. the specified buffer offset is
6313 * updated.
6314 */
6315static u32 qed_read_from_cyclic_buf(void *buf,
6316				    u32 *offset,
6317				    u32 buf_size, u8 num_bytes_to_read)
6318{
6319	u8 i, *val_ptr, *bytes_buf = (u8 *)buf;
6320	u32 val = 0;
6321
6322	val_ptr = (u8 *)&val;
6323
6324	/* Assume running on a LITTLE ENDIAN and the buffer is network order
6325	 * (BIG ENDIAN), as high order bytes are placed in lower memory address.
6326	 */
6327	for (i = 0; i < num_bytes_to_read; i++) {
6328		val_ptr[i] = bytes_buf[*offset];
6329		*offset = qed_cyclic_add(*offset, 1, buf_size);
6330	}
6331
6332	return val;
6333}
6334
6335/* Reads and returns the next byte from the specified buffer.
6336 * The specified buffer offset is updated.
6337 */
6338static u8 qed_read_byte_from_buf(void *buf, u32 *offset)
6339{
6340	return ((u8 *)buf)[(*offset)++];
6341}
6342
6343/* Reads and returns the next dword from the specified buffer.
6344 * The specified buffer offset is updated.
6345 */
6346static u32 qed_read_dword_from_buf(void *buf, u32 *offset)
6347{
6348	u32 dword_val = *(u32 *)&((u8 *)buf)[*offset];
6349
6350	*offset += 4;
6351
6352	return dword_val;
6353}
6354
6355/* Reads the next string from the specified buffer, and copies it to the
6356 * specified pointer. The specified buffer offset is updated.
6357 */
6358static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest)
6359{
6360	const char *source_str = &((const char *)buf)[*offset];
6361
6362	strscpy(dest, source_str, size);
 
6363	*offset += size;
6364}
6365
6366/* Returns a pointer to the specified offset (in bytes) of the specified buffer.
6367 * If the specified buffer in NULL, a temporary buffer pointer is returned.
6368 */
6369static char *qed_get_buf_ptr(void *buf, u32 offset)
6370{
6371	return buf ? (char *)buf + offset : s_temp_buf;
6372}
6373
6374/* Reads a param from the specified buffer. Returns the number of dwords read.
6375 * If the returned str_param is NULL, the param is numeric and its value is
6376 * returned in num_param.
6377 * Otheriwise, the param is a string and its pointer is returned in str_param.
6378 */
6379static u32 qed_read_param(u32 *dump_buf,
6380			  const char **param_name,
6381			  const char **param_str_val, u32 *param_num_val)
6382{
6383	char *char_buf = (char *)dump_buf;
6384	size_t offset = 0;
6385
6386	/* Extract param name */
6387	*param_name = char_buf;
6388	offset += strlen(*param_name) + 1;
6389
6390	/* Check param type */
6391	if (*(char_buf + offset++)) {
6392		/* String param */
6393		*param_str_val = char_buf + offset;
6394		*param_num_val = 0;
6395		offset += strlen(*param_str_val) + 1;
6396		if (offset & 0x3)
6397			offset += (4 - (offset & 0x3));
6398	} else {
6399		/* Numeric param */
6400		*param_str_val = NULL;
6401		if (offset & 0x3)
6402			offset += (4 - (offset & 0x3));
6403		*param_num_val = *(u32 *)(char_buf + offset);
6404		offset += 4;
6405	}
6406
6407	return (u32)offset / 4;
6408}
6409
6410/* Reads a section header from the specified buffer.
6411 * Returns the number of dwords read.
6412 */
6413static u32 qed_read_section_hdr(u32 *dump_buf,
6414				const char **section_name,
6415				u32 *num_section_params)
6416{
6417	const char *param_str_val;
6418
6419	return qed_read_param(dump_buf,
6420			      section_name, &param_str_val, num_section_params);
6421}
6422
6423/* Reads section params from the specified buffer and prints them to the results
6424 * buffer. Returns the number of dwords read.
6425 */
6426static u32 qed_print_section_params(u32 *dump_buf,
6427				    u32 num_section_params,
6428				    char *results_buf, u32 *num_chars_printed)
6429{
6430	u32 i, dump_offset = 0, results_offset = 0;
6431
6432	for (i = 0; i < num_section_params; i++) {
6433		const char *param_name, *param_str_val;
6434		u32 param_num_val = 0;
6435
6436		dump_offset += qed_read_param(dump_buf + dump_offset,
6437					      &param_name,
6438					      &param_str_val, &param_num_val);
6439
6440		if (param_str_val)
6441			results_offset +=
6442				sprintf(qed_get_buf_ptr(results_buf,
6443							results_offset),
6444					"%s: %s\n", param_name, param_str_val);
6445		else if (strcmp(param_name, "fw-timestamp"))
6446			results_offset +=
6447				sprintf(qed_get_buf_ptr(results_buf,
6448							results_offset),
6449					"%s: %d\n", param_name, param_num_val);
6450	}
6451
6452	results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset),
6453				  "\n");
6454
6455	*num_chars_printed = results_offset;
6456
6457	return dump_offset;
6458}
6459
6460/* Returns the block name that matches the specified block ID,
6461 * or NULL if not found.
6462 */
6463static const char *qed_dbg_get_block_name(struct qed_hwfn *p_hwfn,
6464					  enum block_id block_id)
6465{
6466	const struct dbg_block_user *block =
6467	    (const struct dbg_block_user *)
6468	    p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS_USER_DATA].ptr + block_id;
6469
6470	return (const char *)block->name;
6471}
6472
6473static struct dbg_tools_user_data *qed_dbg_get_user_data(struct qed_hwfn
6474							 *p_hwfn)
6475{
6476	return (struct dbg_tools_user_data *)p_hwfn->dbg_user_info;
6477}
6478
6479/* Parses the idle check rules and returns the number of characters printed.
6480 * In case of parsing error, returns 0.
6481 */
6482static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
6483					 u32 *dump_buf,
6484					 u32 *dump_buf_end,
6485					 u32 num_rules,
6486					 bool print_fw_idle_chk,
6487					 char *results_buf,
6488					 u32 *num_errors, u32 *num_warnings)
6489{
6490	/* Offset in results_buf in bytes */
6491	u32 results_offset = 0;
6492
6493	u32 rule_idx;
6494	u16 i, j;
6495
6496	*num_errors = 0;
6497	*num_warnings = 0;
6498
6499	/* Go over dumped results */
6500	for (rule_idx = 0; rule_idx < num_rules && dump_buf < dump_buf_end;
6501	     rule_idx++) {
6502		const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data;
6503		struct dbg_idle_chk_result_hdr *hdr;
6504		const char *parsing_str, *lsi_msg;
6505		u32 parsing_str_offset;
6506		bool has_fw_msg;
6507		u8 curr_reg_id;
6508
6509		hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
6510		rule_parsing_data =
6511		    (const struct dbg_idle_chk_rule_parsing_data *)
6512		    p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr +
6513		    hdr->rule_id;
6514		parsing_str_offset =
6515		    GET_FIELD(rule_parsing_data->data,
6516			      DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
6517		has_fw_msg =
6518		    GET_FIELD(rule_parsing_data->data,
6519			      DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
6520		parsing_str = (const char *)
6521		    p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr +
6522		    parsing_str_offset;
 
6523		lsi_msg = parsing_str;
6524		curr_reg_id = 0;
6525
6526		if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES)
6527			return 0;
6528
6529		/* Skip rule header */
6530		dump_buf += BYTES_TO_DWORDS(sizeof(*hdr));
6531
6532		/* Update errors/warnings count */
6533		if (hdr->severity == IDLE_CHK_SEVERITY_ERROR ||
6534		    hdr->severity == IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC)
6535			(*num_errors)++;
6536		else
6537			(*num_warnings)++;
6538
6539		/* Print rule severity */
6540		results_offset +=
6541		    sprintf(qed_get_buf_ptr(results_buf,
6542					    results_offset), "%s: ",
6543			    s_idle_chk_severity_str[hdr->severity]);
6544
6545		/* Print rule message */
6546		if (has_fw_msg)
6547			parsing_str += strlen(parsing_str) + 1;
6548		results_offset +=
6549		    sprintf(qed_get_buf_ptr(results_buf,
6550					    results_offset), "%s.",
6551			    has_fw_msg &&
6552			    print_fw_idle_chk ? parsing_str : lsi_msg);
6553		parsing_str += strlen(parsing_str) + 1;
6554
6555		/* Print register values */
6556		results_offset +=
6557		    sprintf(qed_get_buf_ptr(results_buf,
6558					    results_offset), " Registers:");
6559		for (i = 0;
6560		     i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs;
6561		     i++) {
6562			struct dbg_idle_chk_result_reg_hdr *reg_hdr;
6563			bool is_mem;
6564			u8 reg_id;
6565
6566			reg_hdr =
6567				(struct dbg_idle_chk_result_reg_hdr *)dump_buf;
6568			is_mem = GET_FIELD(reg_hdr->data,
6569					   DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
6570			reg_id = GET_FIELD(reg_hdr->data,
6571					   DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
6572
6573			/* Skip reg header */
6574			dump_buf += BYTES_TO_DWORDS(sizeof(*reg_hdr));
6575
6576			/* Skip register names until the required reg_id is
6577			 * reached.
6578			 */
6579			for (; reg_id > curr_reg_id; curr_reg_id++)
6580				parsing_str += strlen(parsing_str) + 1;
 
6581
6582			results_offset +=
6583			    sprintf(qed_get_buf_ptr(results_buf,
6584						    results_offset), " %s",
6585				    parsing_str);
6586			if (i < hdr->num_dumped_cond_regs && is_mem)
6587				results_offset +=
6588				    sprintf(qed_get_buf_ptr(results_buf,
6589							    results_offset),
6590					    "[%d]", hdr->mem_entry_id +
6591					    reg_hdr->start_entry);
6592			results_offset +=
6593			    sprintf(qed_get_buf_ptr(results_buf,
6594						    results_offset), "=");
6595			for (j = 0; j < reg_hdr->size; j++, dump_buf++) {
6596				results_offset +=
6597				    sprintf(qed_get_buf_ptr(results_buf,
6598							    results_offset),
6599					    "0x%x", *dump_buf);
6600				if (j < reg_hdr->size - 1)
6601					results_offset +=
6602					    sprintf(qed_get_buf_ptr
6603						    (results_buf,
6604						     results_offset), ",");
6605			}
6606		}
6607
6608		results_offset +=
6609		    sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
6610	}
6611
6612	/* Check if end of dump buffer was exceeded */
6613	if (dump_buf > dump_buf_end)
6614		return 0;
6615
6616	return results_offset;
6617}
6618
6619/* Parses an idle check dump buffer.
6620 * If result_buf is not NULL, the idle check results are printed to it.
6621 * In any case, the required results buffer size is assigned to
6622 * parsed_results_bytes.
6623 * The parsing status is returned.
6624 */
6625static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
6626					       u32 *dump_buf,
6627					       u32 num_dumped_dwords,
6628					       char *results_buf,
6629					       u32 *parsed_results_bytes,
6630					       u32 *num_errors,
6631					       u32 *num_warnings)
6632{
6633	u32 num_section_params = 0, num_rules, num_rules_not_dumped;
6634	const char *section_name, *param_name, *param_str_val;
6635	u32 *dump_buf_end = dump_buf + num_dumped_dwords;
 
6636
6637	/* Offset in results_buf in bytes */
6638	u32 results_offset = 0;
6639
6640	*parsed_results_bytes = 0;
6641	*num_errors = 0;
6642	*num_warnings = 0;
6643
6644	if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
6645	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
6646		return DBG_STATUS_DBG_ARRAY_NOT_SET;
6647
6648	/* Read global_params section */
6649	dump_buf += qed_read_section_hdr(dump_buf,
6650					 &section_name, &num_section_params);
6651	if (strcmp(section_name, "global_params"))
6652		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6653
6654	/* Print global params */
6655	dump_buf += qed_print_section_params(dump_buf,
6656					     num_section_params,
6657					     results_buf, &results_offset);
6658
6659	/* Read idle_chk section
6660	 * There may be 1 or 2 idle_chk section parameters:
6661	 * - 1st is "num_rules"
6662	 * - 2nd is "num_rules_not_dumped" (optional)
6663	 */
6664
6665	dump_buf += qed_read_section_hdr(dump_buf,
6666					 &section_name, &num_section_params);
6667	if (strcmp(section_name, "idle_chk") ||
6668	    (num_section_params != 2 && num_section_params != 1))
6669		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6670	dump_buf += qed_read_param(dump_buf,
6671				   &param_name, &param_str_val, &num_rules);
6672	if (strcmp(param_name, "num_rules"))
6673		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6674	if (num_section_params > 1) {
6675		dump_buf += qed_read_param(dump_buf,
6676					   &param_name,
6677					   &param_str_val,
6678					   &num_rules_not_dumped);
6679		if (strcmp(param_name, "num_rules_not_dumped"))
6680			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6681	} else {
6682		num_rules_not_dumped = 0;
6683	}
6684
6685	if (num_rules) {
6686		u32 rules_print_size;
6687
6688		/* Print FW output */
6689		results_offset +=
6690		    sprintf(qed_get_buf_ptr(results_buf,
6691					    results_offset),
6692			    "FW_IDLE_CHECK:\n");
6693		rules_print_size =
6694			qed_parse_idle_chk_dump_rules(p_hwfn,
6695						      dump_buf,
6696						      dump_buf_end,
6697						      num_rules,
6698						      true,
6699						      results_buf ?
6700						      results_buf +
6701						      results_offset :
6702						      NULL,
6703						      num_errors,
6704						      num_warnings);
6705		results_offset += rules_print_size;
6706		if (!rules_print_size)
6707			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6708
6709		/* Print LSI output */
6710		results_offset +=
6711		    sprintf(qed_get_buf_ptr(results_buf,
6712					    results_offset),
6713			    "\nLSI_IDLE_CHECK:\n");
6714		rules_print_size =
6715			qed_parse_idle_chk_dump_rules(p_hwfn,
6716						      dump_buf,
6717						      dump_buf_end,
6718						      num_rules,
6719						      false,
6720						      results_buf ?
6721						      results_buf +
6722						      results_offset :
6723						      NULL,
6724						      num_errors,
6725						      num_warnings);
6726		results_offset += rules_print_size;
6727		if (!rules_print_size)
6728			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6729	}
6730
6731	/* Print errors/warnings count */
6732	if (*num_errors)
6733		results_offset +=
6734		    sprintf(qed_get_buf_ptr(results_buf,
6735					    results_offset),
6736			    "\nIdle Check failed!!! (with %d errors and %d warnings)\n",
6737			    *num_errors, *num_warnings);
6738	else if (*num_warnings)
6739		results_offset +=
6740		    sprintf(qed_get_buf_ptr(results_buf,
6741					    results_offset),
6742			    "\nIdle Check completed successfully (with %d warnings)\n",
6743			    *num_warnings);
6744	else
6745		results_offset +=
6746		    sprintf(qed_get_buf_ptr(results_buf,
6747					    results_offset),
6748			    "\nIdle Check completed successfully\n");
6749
6750	if (num_rules_not_dumped)
6751		results_offset +=
6752		    sprintf(qed_get_buf_ptr(results_buf,
6753					    results_offset),
6754			    "\nIdle Check Partially dumped : num_rules_not_dumped = %d\n",
6755			    num_rules_not_dumped);
6756
6757	/* Add 1 for string NULL termination */
6758	*parsed_results_bytes = results_offset + 1;
6759
6760	return DBG_STATUS_OK;
6761}
6762
6763/* Allocates and fills MCP Trace meta data based on the specified meta data
6764 * dump buffer.
6765 * Returns debug status code.
6766 */
6767static enum dbg_status
6768qed_mcp_trace_alloc_meta_data(struct qed_hwfn *p_hwfn,
6769			      const u32 *meta_buf)
6770{
6771	struct dbg_tools_user_data *dev_user_data;
6772	u32 offset = 0, signature, i;
6773	struct mcp_trace_meta *meta;
6774	u8 *meta_buf_bytes;
6775
6776	dev_user_data = qed_dbg_get_user_data(p_hwfn);
6777	meta = &dev_user_data->mcp_trace_meta;
6778	meta_buf_bytes = (u8 *)meta_buf;
6779
6780	/* Free the previous meta before loading a new one. */
6781	if (meta->is_allocated)
6782		qed_mcp_trace_free_meta_data(p_hwfn);
6783
6784	memset(meta, 0, sizeof(*meta));
6785
6786	/* Read first signature */
6787	signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6788	if (signature != NVM_MAGIC_VALUE)
6789		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6790
6791	/* Read no. of modules and allocate memory for their pointers */
6792	meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6793	meta->modules = kcalloc(meta->modules_num, sizeof(char *),
6794				GFP_KERNEL);
6795	if (!meta->modules)
6796		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6797
6798	/* Allocate and read all module strings */
6799	for (i = 0; i < meta->modules_num; i++) {
6800		u8 module_len = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6801
6802		*(meta->modules + i) = kzalloc(module_len, GFP_KERNEL);
6803		if (!(*(meta->modules + i))) {
6804			/* Update number of modules to be released */
6805			meta->modules_num = i ? i - 1 : 0;
6806			return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6807		}
6808
6809		qed_read_str_from_buf(meta_buf_bytes, &offset, module_len,
6810				      *(meta->modules + i));
6811		if (module_len > MCP_TRACE_MAX_MODULE_LEN)
6812			(*(meta->modules + i))[MCP_TRACE_MAX_MODULE_LEN] = '\0';
6813	}
6814
6815	/* Read second signature */
6816	signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6817	if (signature != NVM_MAGIC_VALUE)
6818		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6819
6820	/* Read number of formats and allocate memory for all formats */
6821	meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6822	meta->formats = kcalloc(meta->formats_num,
6823				sizeof(struct mcp_trace_format),
6824				GFP_KERNEL);
6825	if (!meta->formats)
6826		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6827
6828	/* Allocate and read all strings */
6829	for (i = 0; i < meta->formats_num; i++) {
6830		struct mcp_trace_format *format_ptr = &meta->formats[i];
6831		u8 format_len;
6832
6833		format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes,
6834							   &offset);
6835		format_len = GET_MFW_FIELD(format_ptr->data,
6836					   MCP_TRACE_FORMAT_LEN);
 
6837		format_ptr->format_str = kzalloc(format_len, GFP_KERNEL);
6838		if (!format_ptr->format_str) {
6839			/* Update number of modules to be released */
6840			meta->formats_num = i ? i - 1 : 0;
6841			return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6842		}
6843
6844		qed_read_str_from_buf(meta_buf_bytes,
6845				      &offset,
6846				      format_len, format_ptr->format_str);
6847	}
6848
6849	meta->is_allocated = true;
6850	return DBG_STATUS_OK;
6851}
6852
6853/* Parses an MCP trace buffer. If result_buf is not NULL, the MCP Trace results
6854 * are printed to it. The parsing status is returned.
6855 * Arguments:
6856 * trace_buf - MCP trace cyclic buffer
6857 * trace_buf_size - MCP trace cyclic buffer size in bytes
6858 * data_offset - offset in bytes of the data to parse in the MCP trace cyclic
6859 *		 buffer.
6860 * data_size - size in bytes of data to parse.
6861 * parsed_buf - destination buffer for parsed data.
6862 * parsed_results_bytes - size of parsed data in bytes.
6863 */
6864static enum dbg_status qed_parse_mcp_trace_buf(struct qed_hwfn *p_hwfn,
6865					       u8 *trace_buf,
6866					       u32 trace_buf_size,
6867					       u32 data_offset,
6868					       u32 data_size,
6869					       char *parsed_buf,
6870					       u32 *parsed_results_bytes)
6871{
6872	struct dbg_tools_user_data *dev_user_data;
6873	struct mcp_trace_meta *meta;
6874	u32 param_mask, param_shift;
6875	enum dbg_status status;
6876
6877	dev_user_data = qed_dbg_get_user_data(p_hwfn);
6878	meta = &dev_user_data->mcp_trace_meta;
6879	*parsed_results_bytes = 0;
6880
6881	if (!meta->is_allocated)
6882		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6883
6884	status = DBG_STATUS_OK;
6885
6886	while (data_size) {
6887		struct mcp_trace_format *format_ptr;
6888		u8 format_level, format_module;
6889		u32 params[3] = { 0, 0, 0 };
6890		u32 header, format_idx, i;
6891
6892		if (data_size < MFW_TRACE_ENTRY_SIZE)
6893			return DBG_STATUS_MCP_TRACE_BAD_DATA;
6894
6895		header = qed_read_from_cyclic_buf(trace_buf,
6896						  &data_offset,
6897						  trace_buf_size,
6898						  MFW_TRACE_ENTRY_SIZE);
6899		data_size -= MFW_TRACE_ENTRY_SIZE;
6900		format_idx = header & MFW_TRACE_EVENTID_MASK;
6901
6902		/* Skip message if its index doesn't exist in the meta data */
6903		if (format_idx >= meta->formats_num) {
6904			u8 format_size = (u8)GET_MFW_FIELD(header,
6905							   MFW_TRACE_PRM_SIZE);
 
6906
6907			if (data_size < format_size)
6908				return DBG_STATUS_MCP_TRACE_BAD_DATA;
6909
6910			data_offset = qed_cyclic_add(data_offset,
6911						     format_size,
6912						     trace_buf_size);
6913			data_size -= format_size;
6914			continue;
6915		}
6916
6917		format_ptr = &meta->formats[format_idx];
6918
6919		for (i = 0,
6920		     param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_shift =
6921		     MCP_TRACE_FORMAT_P1_SIZE_OFFSET;
6922		     i < MCP_TRACE_FORMAT_MAX_PARAMS;
6923		     i++, param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
 
6924		     param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) {
6925			/* Extract param size (0..3) */
6926			u8 param_size = (u8)((format_ptr->data & param_mask) >>
6927					     param_shift);
6928
6929			/* If the param size is zero, there are no other
6930			 * parameters.
6931			 */
6932			if (!param_size)
6933				break;
6934
6935			/* Size is encoded using 2 bits, where 3 is used to
6936			 * encode 4.
6937			 */
6938			if (param_size == 3)
6939				param_size = 4;
6940
6941			if (data_size < param_size)
6942				return DBG_STATUS_MCP_TRACE_BAD_DATA;
6943
6944			params[i] = qed_read_from_cyclic_buf(trace_buf,
6945							     &data_offset,
6946							     trace_buf_size,
6947							     param_size);
6948			data_size -= param_size;
6949		}
6950
6951		format_level = (u8)GET_MFW_FIELD(format_ptr->data,
6952						 MCP_TRACE_FORMAT_LEVEL);
6953		format_module = (u8)GET_MFW_FIELD(format_ptr->data,
6954						  MCP_TRACE_FORMAT_MODULE);
 
 
6955		if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str))
6956			return DBG_STATUS_MCP_TRACE_BAD_DATA;
6957
6958		/* Print current message to results buffer */
6959		*parsed_results_bytes +=
6960			sprintf(qed_get_buf_ptr(parsed_buf,
6961						*parsed_results_bytes),
6962				"%s %-8s: ",
6963				s_mcp_trace_level_str[format_level],
6964				meta->modules[format_module]);
6965		*parsed_results_bytes +=
6966		    sprintf(qed_get_buf_ptr(parsed_buf, *parsed_results_bytes),
6967			    format_ptr->format_str,
6968			    params[0], params[1], params[2]);
6969	}
6970
6971	/* Add string NULL terminator */
6972	(*parsed_results_bytes)++;
6973
6974	return status;
6975}
6976
6977/* Parses an MCP Trace dump buffer.
6978 * If result_buf is not NULL, the MCP Trace results are printed to it.
6979 * In any case, the required results buffer size is assigned to
6980 * parsed_results_bytes.
6981 * The parsing status is returned.
6982 */
6983static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
6984						u32 *dump_buf,
6985						char *results_buf,
6986						u32 *parsed_results_bytes,
6987						bool free_meta_data)
6988{
6989	const char *section_name, *param_name, *param_str_val;
6990	u32 data_size, trace_data_dwords, trace_meta_dwords;
6991	u32 offset, results_offset, results_buf_bytes;
6992	u32 param_num_val, num_section_params;
6993	struct mcp_trace *trace;
6994	enum dbg_status status;
6995	const u32 *meta_buf;
6996	u8 *trace_buf;
6997
6998	*parsed_results_bytes = 0;
6999
7000	/* Read global_params section */
7001	dump_buf += qed_read_section_hdr(dump_buf,
7002					 &section_name, &num_section_params);
7003	if (strcmp(section_name, "global_params"))
7004		return DBG_STATUS_MCP_TRACE_BAD_DATA;
7005
7006	/* Print global params */
7007	dump_buf += qed_print_section_params(dump_buf,
7008					     num_section_params,
7009					     results_buf, &results_offset);
7010
7011	/* Read trace_data section */
7012	dump_buf += qed_read_section_hdr(dump_buf,
7013					 &section_name, &num_section_params);
7014	if (strcmp(section_name, "mcp_trace_data") || num_section_params != 1)
7015		return DBG_STATUS_MCP_TRACE_BAD_DATA;
7016	dump_buf += qed_read_param(dump_buf,
7017				   &param_name, &param_str_val, &param_num_val);
7018	if (strcmp(param_name, "size"))
7019		return DBG_STATUS_MCP_TRACE_BAD_DATA;
7020	trace_data_dwords = param_num_val;
7021
7022	/* Prepare trace info */
7023	trace = (struct mcp_trace *)dump_buf;
7024	if (trace->signature != MFW_TRACE_SIGNATURE || !trace->size)
7025		return DBG_STATUS_MCP_TRACE_BAD_DATA;
7026
7027	trace_buf = (u8 *)dump_buf + sizeof(*trace);
7028	offset = trace->trace_oldest;
7029	data_size = qed_cyclic_sub(trace->trace_prod, offset, trace->size);
7030	dump_buf += trace_data_dwords;
7031
7032	/* Read meta_data section */
7033	dump_buf += qed_read_section_hdr(dump_buf,
7034					 &section_name, &num_section_params);
7035	if (strcmp(section_name, "mcp_trace_meta"))
7036		return DBG_STATUS_MCP_TRACE_BAD_DATA;
7037	dump_buf += qed_read_param(dump_buf,
7038				   &param_name, &param_str_val, &param_num_val);
7039	if (strcmp(param_name, "size"))
7040		return DBG_STATUS_MCP_TRACE_BAD_DATA;
7041	trace_meta_dwords = param_num_val;
7042
7043	/* Choose meta data buffer */
7044	if (!trace_meta_dwords) {
7045		/* Dump doesn't include meta data */
7046		struct dbg_tools_user_data *dev_user_data =
7047			qed_dbg_get_user_data(p_hwfn);
7048
7049		if (!dev_user_data->mcp_trace_user_meta_buf)
7050			return DBG_STATUS_MCP_TRACE_NO_META;
7051
7052		meta_buf = dev_user_data->mcp_trace_user_meta_buf;
7053	} else {
7054		/* Dump includes meta data */
7055		meta_buf = dump_buf;
7056	}
7057
7058	/* Allocate meta data memory */
7059	status = qed_mcp_trace_alloc_meta_data(p_hwfn, meta_buf);
7060	if (status != DBG_STATUS_OK)
7061		return status;
7062
7063	status = qed_parse_mcp_trace_buf(p_hwfn,
7064					 trace_buf,
7065					 trace->size,
7066					 offset,
7067					 data_size,
7068					 results_buf ?
7069					 results_buf + results_offset :
7070					 NULL,
7071					 &results_buf_bytes);
7072	if (status != DBG_STATUS_OK)
7073		return status;
7074
7075	if (free_meta_data)
7076		qed_mcp_trace_free_meta_data(p_hwfn);
7077
7078	*parsed_results_bytes = results_offset + results_buf_bytes;
7079
7080	return DBG_STATUS_OK;
7081}
7082
7083/* Parses a Reg FIFO dump buffer.
7084 * If result_buf is not NULL, the Reg FIFO results are printed to it.
7085 * In any case, the required results buffer size is assigned to
7086 * parsed_results_bytes.
7087 * The parsing status is returned.
7088 */
7089static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
7090					       char *results_buf,
7091					       u32 *parsed_results_bytes)
7092{
7093	const char *section_name, *param_name, *param_str_val;
7094	u32 param_num_val, num_section_params, num_elements;
7095	struct reg_fifo_element *elements;
7096	u8 i, j, err_code, vf_val;
7097	u32 results_offset = 0;
7098	char vf_str[4];
7099
7100	/* Read global_params section */
7101	dump_buf += qed_read_section_hdr(dump_buf,
7102					 &section_name, &num_section_params);
7103	if (strcmp(section_name, "global_params"))
7104		return DBG_STATUS_REG_FIFO_BAD_DATA;
7105
7106	/* Print global params */
7107	dump_buf += qed_print_section_params(dump_buf,
7108					     num_section_params,
7109					     results_buf, &results_offset);
7110
7111	/* Read reg_fifo_data section */
7112	dump_buf += qed_read_section_hdr(dump_buf,
7113					 &section_name, &num_section_params);
7114	if (strcmp(section_name, "reg_fifo_data"))
7115		return DBG_STATUS_REG_FIFO_BAD_DATA;
7116	dump_buf += qed_read_param(dump_buf,
7117				   &param_name, &param_str_val, &param_num_val);
7118	if (strcmp(param_name, "size"))
7119		return DBG_STATUS_REG_FIFO_BAD_DATA;
7120	if (param_num_val % REG_FIFO_ELEMENT_DWORDS)
7121		return DBG_STATUS_REG_FIFO_BAD_DATA;
7122	num_elements = param_num_val / REG_FIFO_ELEMENT_DWORDS;
7123	elements = (struct reg_fifo_element *)dump_buf;
7124
7125	/* Decode elements */
7126	for (i = 0; i < num_elements; i++) {
7127		const char *err_msg = NULL;
7128
7129		/* Discover if element belongs to a VF or a PF */
7130		vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF);
7131		if (vf_val == REG_FIFO_ELEMENT_IS_PF_VF_VAL)
7132			sprintf(vf_str, "%s", "N/A");
7133		else
7134			sprintf(vf_str, "%d", vf_val);
7135
7136		/* Find error message */
7137		err_code = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_ERROR);
7138		for (j = 0; j < ARRAY_SIZE(s_reg_fifo_errors) && !err_msg; j++)
7139			if (err_code == s_reg_fifo_errors[j].err_code)
7140				err_msg = s_reg_fifo_errors[j].err_msg;
7141
7142		/* Add parsed element to parsed buffer */
7143		results_offset +=
7144		    sprintf(qed_get_buf_ptr(results_buf,
7145					    results_offset),
7146			    "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, error: %s\n",
7147			    elements[i].data,
7148			    (u32)GET_FIELD(elements[i].data,
7149					   REG_FIFO_ELEMENT_ADDRESS) *
7150			    REG_FIFO_ELEMENT_ADDR_FACTOR,
7151			    s_access_strs[GET_FIELD(elements[i].data,
7152						    REG_FIFO_ELEMENT_ACCESS)],
7153			    (u32)GET_FIELD(elements[i].data,
7154					   REG_FIFO_ELEMENT_PF),
7155			    vf_str,
7156			    (u32)GET_FIELD(elements[i].data,
7157					   REG_FIFO_ELEMENT_PORT),
7158			    s_privilege_strs[GET_FIELD(elements[i].data,
7159						REG_FIFO_ELEMENT_PRIVILEGE)],
7160			    s_protection_strs[GET_FIELD(elements[i].data,
7161						REG_FIFO_ELEMENT_PROTECTION)],
7162			    s_master_strs[GET_FIELD(elements[i].data,
7163						    REG_FIFO_ELEMENT_MASTER)],
7164			    err_msg ? err_msg : "unknown error code");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7165	}
7166
7167	results_offset += sprintf(qed_get_buf_ptr(results_buf,
7168						  results_offset),
7169				  "fifo contained %d elements", num_elements);
7170
7171	/* Add 1 for string NULL termination */
7172	*parsed_results_bytes = results_offset + 1;
7173
7174	return DBG_STATUS_OK;
7175}
7176
7177static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
7178						  *element, char
7179						  *results_buf,
7180						  u32 *results_offset)
7181{
7182	const struct igu_fifo_addr_data *found_addr = NULL;
7183	u8 source, err_type, i, is_cleanup;
7184	char parsed_addr_data[32];
7185	char parsed_wr_data[256];
7186	u32 wr_data, prod_cons;
7187	bool is_wr_cmd, is_pf;
7188	u16 cmd_addr;
7189	u64 dword12;
7190
7191	/* Dword12 (dword index 1 and 2) contains bits 32..95 of the
7192	 * FIFO element.
7193	 */
7194	dword12 = ((u64)element->dword2 << 32) | element->dword1;
7195	is_wr_cmd = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
7196	is_pf = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_IS_PF);
7197	cmd_addr = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
7198	source = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_SOURCE);
7199	err_type = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
7200
7201	if (source >= ARRAY_SIZE(s_igu_fifo_source_strs))
7202		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7203	if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs))
7204		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7205
7206	/* Find address data */
7207	for (i = 0; i < ARRAY_SIZE(s_igu_fifo_addr_data) && !found_addr; i++) {
7208		const struct igu_fifo_addr_data *curr_addr =
7209			&s_igu_fifo_addr_data[i];
7210
7211		if (cmd_addr >= curr_addr->start_addr && cmd_addr <=
7212		    curr_addr->end_addr)
7213			found_addr = curr_addr;
7214	}
7215
7216	if (!found_addr)
7217		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7218
7219	/* Prepare parsed address data */
7220	switch (found_addr->type) {
7221	case IGU_ADDR_TYPE_MSIX_MEM:
7222		sprintf(parsed_addr_data, " vector_num = 0x%x", cmd_addr / 2);
7223		break;
7224	case IGU_ADDR_TYPE_WRITE_INT_ACK:
7225	case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
7226		sprintf(parsed_addr_data,
7227			" SB = 0x%x", cmd_addr - found_addr->start_addr);
7228		break;
7229	default:
7230		parsed_addr_data[0] = '\0';
7231	}
7232
7233	if (!is_wr_cmd) {
7234		parsed_wr_data[0] = '\0';
7235		goto out;
7236	}
7237
7238	/* Prepare parsed write data */
7239	wr_data = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
7240	prod_cons = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_PROD_CONS);
7241	is_cleanup = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_CMD_TYPE);
7242
7243	if (source == IGU_SRC_ATTN) {
7244		sprintf(parsed_wr_data, "prod: 0x%x, ", prod_cons);
7245	} else {
7246		if (is_cleanup) {
7247			u8 cleanup_val, cleanup_type;
7248
7249			cleanup_val =
7250				GET_FIELD(wr_data,
7251					  IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
7252			cleanup_type =
7253			    GET_FIELD(wr_data,
7254				      IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
7255
7256			sprintf(parsed_wr_data,
7257				"cmd_type: cleanup, cleanup_val: %s, cleanup_type : %d, ",
7258				cleanup_val ? "set" : "clear",
7259				cleanup_type);
7260		} else {
7261			u8 update_flag, en_dis_int_for_sb, segment;
7262			u8 timer_mask;
7263
7264			update_flag = GET_FIELD(wr_data,
7265						IGU_FIFO_WR_DATA_UPDATE_FLAG);
7266			en_dis_int_for_sb =
7267				GET_FIELD(wr_data,
7268					  IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
7269			segment = GET_FIELD(wr_data,
7270					    IGU_FIFO_WR_DATA_SEGMENT);
7271			timer_mask = GET_FIELD(wr_data,
7272					       IGU_FIFO_WR_DATA_TIMER_MASK);
7273
7274			sprintf(parsed_wr_data,
7275				"cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ",
7276				prod_cons,
7277				update_flag ? "update" : "nop",
7278				en_dis_int_for_sb ?
7279				(en_dis_int_for_sb == 1 ? "disable" : "nop") :
7280				"enable",
7281				segment ? "attn" : "regular",
7282				timer_mask);
7283		}
7284	}
7285out:
7286	/* Add parsed element to parsed buffer */
7287	*results_offset += sprintf(qed_get_buf_ptr(results_buf,
7288						   *results_offset),
7289				   "raw: 0x%01x%08x%08x, %s: %d, source : %s, type : %s, cmd_addr : 0x%x(%s%s), %serror: %s\n",
7290				   element->dword2, element->dword1,
7291				   element->dword0,
7292				   is_pf ? "pf" : "vf",
7293				   GET_FIELD(element->dword0,
7294					     IGU_FIFO_ELEMENT_DWORD0_FID),
7295				   s_igu_fifo_source_strs[source],
7296				   is_wr_cmd ? "wr" : "rd",
7297				   cmd_addr,
7298				   (!is_pf && found_addr->vf_desc)
7299				   ? found_addr->vf_desc
7300				   : found_addr->desc,
7301				   parsed_addr_data,
7302				   parsed_wr_data,
7303				   s_igu_fifo_error_strs[err_type]);
7304
7305	return DBG_STATUS_OK;
7306}
7307
7308/* Parses an IGU FIFO dump buffer.
7309 * If result_buf is not NULL, the IGU FIFO results are printed to it.
7310 * In any case, the required results buffer size is assigned to
7311 * parsed_results_bytes.
7312 * The parsing status is returned.
7313 */
7314static enum dbg_status qed_parse_igu_fifo_dump(u32 *dump_buf,
7315					       char *results_buf,
7316					       u32 *parsed_results_bytes)
7317{
7318	const char *section_name, *param_name, *param_str_val;
7319	u32 param_num_val, num_section_params, num_elements;
7320	struct igu_fifo_element *elements;
7321	enum dbg_status status;
7322	u32 results_offset = 0;
7323	u8 i;
7324
7325	/* Read global_params section */
7326	dump_buf += qed_read_section_hdr(dump_buf,
7327					 &section_name, &num_section_params);
7328	if (strcmp(section_name, "global_params"))
7329		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7330
7331	/* Print global params */
7332	dump_buf += qed_print_section_params(dump_buf,
7333					     num_section_params,
7334					     results_buf, &results_offset);
7335
7336	/* Read igu_fifo_data section */
7337	dump_buf += qed_read_section_hdr(dump_buf,
7338					 &section_name, &num_section_params);
7339	if (strcmp(section_name, "igu_fifo_data"))
7340		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7341	dump_buf += qed_read_param(dump_buf,
7342				   &param_name, &param_str_val, &param_num_val);
7343	if (strcmp(param_name, "size"))
7344		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7345	if (param_num_val % IGU_FIFO_ELEMENT_DWORDS)
7346		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7347	num_elements = param_num_val / IGU_FIFO_ELEMENT_DWORDS;
7348	elements = (struct igu_fifo_element *)dump_buf;
7349
7350	/* Decode elements */
7351	for (i = 0; i < num_elements; i++) {
7352		status = qed_parse_igu_fifo_element(&elements[i],
7353						    results_buf,
7354						    &results_offset);
7355		if (status != DBG_STATUS_OK)
7356			return status;
7357	}
7358
7359	results_offset += sprintf(qed_get_buf_ptr(results_buf,
7360						  results_offset),
7361				  "fifo contained %d elements", num_elements);
7362
7363	/* Add 1 for string NULL termination */
7364	*parsed_results_bytes = results_offset + 1;
7365
7366	return DBG_STATUS_OK;
7367}
7368
7369static enum dbg_status
7370qed_parse_protection_override_dump(u32 *dump_buf,
7371				   char *results_buf,
7372				   u32 *parsed_results_bytes)
7373{
7374	const char *section_name, *param_name, *param_str_val;
7375	u32 param_num_val, num_section_params, num_elements;
7376	struct protection_override_element *elements;
7377	u32 results_offset = 0;
7378	u8 i;
7379
7380	/* Read global_params section */
7381	dump_buf += qed_read_section_hdr(dump_buf,
7382					 &section_name, &num_section_params);
7383	if (strcmp(section_name, "global_params"))
7384		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7385
7386	/* Print global params */
7387	dump_buf += qed_print_section_params(dump_buf,
7388					     num_section_params,
7389					     results_buf, &results_offset);
7390
7391	/* Read protection_override_data section */
7392	dump_buf += qed_read_section_hdr(dump_buf,
7393					 &section_name, &num_section_params);
7394	if (strcmp(section_name, "protection_override_data"))
7395		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7396	dump_buf += qed_read_param(dump_buf,
7397				   &param_name, &param_str_val, &param_num_val);
7398	if (strcmp(param_name, "size"))
7399		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7400	if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS)
7401		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7402	num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS;
7403	elements = (struct protection_override_element *)dump_buf;
7404
7405	/* Decode elements */
7406	for (i = 0; i < num_elements; i++) {
7407		u32 address = GET_FIELD(elements[i].data,
7408					PROTECTION_OVERRIDE_ELEMENT_ADDRESS) *
7409			      PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
7410
7411		results_offset +=
7412		    sprintf(qed_get_buf_ptr(results_buf,
7413					    results_offset),
7414			    "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n",
7415			    i, address,
7416			    (u32)GET_FIELD(elements[i].data,
7417				      PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
7418			    (u32)GET_FIELD(elements[i].data,
7419				      PROTECTION_OVERRIDE_ELEMENT_READ),
7420			    (u32)GET_FIELD(elements[i].data,
7421				      PROTECTION_OVERRIDE_ELEMENT_WRITE),
7422			    s_protection_strs[GET_FIELD(elements[i].data,
7423				PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
7424			    s_protection_strs[GET_FIELD(elements[i].data,
7425				PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION)]);
7426	}
7427
7428	results_offset += sprintf(qed_get_buf_ptr(results_buf,
7429						  results_offset),
7430				  "protection override contained %d elements",
7431				  num_elements);
7432
7433	/* Add 1 for string NULL termination */
7434	*parsed_results_bytes = results_offset + 1;
7435
7436	return DBG_STATUS_OK;
7437}
7438
7439/* Parses a FW Asserts dump buffer.
7440 * If result_buf is not NULL, the FW Asserts results are printed to it.
7441 * In any case, the required results buffer size is assigned to
7442 * parsed_results_bytes.
7443 * The parsing status is returned.
7444 */
7445static enum dbg_status qed_parse_fw_asserts_dump(u32 *dump_buf,
7446						 char *results_buf,
7447						 u32 *parsed_results_bytes)
7448{
7449	u32 num_section_params, param_num_val, i, results_offset = 0;
7450	const char *param_name, *param_str_val, *section_name;
7451	bool last_section_found = false;
7452
7453	*parsed_results_bytes = 0;
7454
7455	/* Read global_params section */
7456	dump_buf += qed_read_section_hdr(dump_buf,
7457					 &section_name, &num_section_params);
7458	if (strcmp(section_name, "global_params"))
7459		return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7460
7461	/* Print global params */
7462	dump_buf += qed_print_section_params(dump_buf,
7463					     num_section_params,
7464					     results_buf, &results_offset);
7465
7466	while (!last_section_found) {
7467		dump_buf += qed_read_section_hdr(dump_buf,
7468						 &section_name,
7469						 &num_section_params);
7470		if (!strcmp(section_name, "fw_asserts")) {
7471			/* Extract params */
7472			const char *storm_letter = NULL;
7473			u32 storm_dump_size = 0;
7474
7475			for (i = 0; i < num_section_params; i++) {
7476				dump_buf += qed_read_param(dump_buf,
7477							   &param_name,
7478							   &param_str_val,
7479							   &param_num_val);
7480				if (!strcmp(param_name, "storm"))
7481					storm_letter = param_str_val;
7482				else if (!strcmp(param_name, "size"))
7483					storm_dump_size = param_num_val;
7484				else
7485					return
7486					    DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7487			}
7488
7489			if (!storm_letter || !storm_dump_size)
7490				return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7491
7492			/* Print data */
7493			results_offset +=
7494			    sprintf(qed_get_buf_ptr(results_buf,
7495						    results_offset),
7496				    "\n%sSTORM_ASSERT: size=%d\n",
7497				    storm_letter, storm_dump_size);
7498			for (i = 0; i < storm_dump_size; i++, dump_buf++)
7499				results_offset +=
7500				    sprintf(qed_get_buf_ptr(results_buf,
7501							    results_offset),
7502					    "%08x\n", *dump_buf);
7503		} else if (!strcmp(section_name, "last")) {
7504			last_section_found = true;
7505		} else {
7506			return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7507		}
7508	}
7509
7510	/* Add 1 for string NULL termination */
7511	*parsed_results_bytes = results_offset + 1;
7512
7513	return DBG_STATUS_OK;
7514}
7515
7516/***************************** Public Functions *******************************/
7517
7518enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn,
7519					 const u8 * const bin_ptr)
7520{
7521	struct bin_buffer_hdr *buf_hdrs = (struct bin_buffer_hdr *)bin_ptr;
7522	u8 buf_id;
7523
7524	/* Convert binary data to debug arrays */
7525	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++)
7526		qed_set_dbg_bin_buf(p_hwfn,
7527				    (enum bin_dbg_buffer_type)buf_id,
7528				    (u32 *)(bin_ptr + buf_hdrs[buf_id].offset),
7529				    buf_hdrs[buf_id].length);
 
7530
7531	return DBG_STATUS_OK;
7532}
7533
7534enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn,
7535					void **user_data_ptr)
7536{
7537	*user_data_ptr = kzalloc(sizeof(struct dbg_tools_user_data),
7538				 GFP_KERNEL);
7539	if (!(*user_data_ptr))
7540		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7541
7542	return DBG_STATUS_OK;
7543}
7544
7545const char *qed_dbg_get_status_str(enum dbg_status status)
7546{
7547	return (status <
7548		MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
7549}
7550
7551enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
7552						  u32 *dump_buf,
7553						  u32 num_dumped_dwords,
7554						  u32 *results_buf_size)
7555{
7556	u32 num_errors, num_warnings;
7557
7558	return qed_parse_idle_chk_dump(p_hwfn,
7559				       dump_buf,
7560				       num_dumped_dwords,
7561				       NULL,
7562				       results_buf_size,
7563				       &num_errors, &num_warnings);
7564}
7565
7566enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
7567					   u32 *dump_buf,
7568					   u32 num_dumped_dwords,
7569					   char *results_buf,
7570					   u32 *num_errors,
7571					   u32 *num_warnings)
7572{
7573	u32 parsed_buf_size;
7574
7575	return qed_parse_idle_chk_dump(p_hwfn,
7576				       dump_buf,
7577				       num_dumped_dwords,
7578				       results_buf,
7579				       &parsed_buf_size,
7580				       num_errors, num_warnings);
7581}
7582
7583void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn,
7584				     const u32 *meta_buf)
7585{
7586	struct dbg_tools_user_data *dev_user_data =
7587		qed_dbg_get_user_data(p_hwfn);
7588
7589	dev_user_data->mcp_trace_user_meta_buf = meta_buf;
7590}
7591
7592enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
7593						   u32 *dump_buf,
7594						   u32 num_dumped_dwords,
7595						   u32 *results_buf_size)
7596{
7597	return qed_parse_mcp_trace_dump(p_hwfn,
7598					dump_buf, NULL, results_buf_size, true);
7599}
7600
7601enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
7602					    u32 *dump_buf,
7603					    u32 num_dumped_dwords,
7604					    char *results_buf)
7605{
7606	u32 parsed_buf_size;
7607
7608	/* Doesn't do anything, needed for compile time asserts */
7609	qed_user_static_asserts();
7610
7611	return qed_parse_mcp_trace_dump(p_hwfn,
7612					dump_buf,
7613					results_buf, &parsed_buf_size, true);
7614}
7615
7616enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
7617						 u32 *dump_buf,
7618						 char *results_buf)
7619{
7620	u32 parsed_buf_size;
7621
7622	return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, results_buf,
7623					&parsed_buf_size, false);
7624}
7625
7626enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
7627					 u8 *dump_buf,
7628					 u32 num_dumped_bytes,
7629					 char *results_buf)
7630{
7631	u32 parsed_results_bytes;
7632
7633	return qed_parse_mcp_trace_buf(p_hwfn,
7634				       dump_buf,
7635				       num_dumped_bytes,
7636				       0,
7637				       num_dumped_bytes,
7638				       results_buf, &parsed_results_bytes);
7639}
7640
7641/* Frees the specified MCP Trace meta data */
7642void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn)
7643{
7644	struct dbg_tools_user_data *dev_user_data;
7645	struct mcp_trace_meta *meta;
7646	u32 i;
7647
7648	dev_user_data = qed_dbg_get_user_data(p_hwfn);
7649	meta = &dev_user_data->mcp_trace_meta;
7650	if (!meta->is_allocated)
7651		return;
7652
7653	/* Release modules */
7654	if (meta->modules) {
7655		for (i = 0; i < meta->modules_num; i++)
7656			kfree(meta->modules[i]);
7657		kfree(meta->modules);
7658	}
7659
7660	/* Release formats */
7661	if (meta->formats) {
7662		for (i = 0; i < meta->formats_num; i++)
7663			kfree(meta->formats[i].format_str);
7664		kfree(meta->formats);
7665	}
7666
7667	meta->is_allocated = false;
7668}
7669
7670enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7671						  u32 *dump_buf,
7672						  u32 num_dumped_dwords,
7673						  u32 *results_buf_size)
7674{
7675	return qed_parse_reg_fifo_dump(dump_buf, NULL, results_buf_size);
7676}
7677
7678enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
7679					   u32 *dump_buf,
7680					   u32 num_dumped_dwords,
7681					   char *results_buf)
7682{
7683	u32 parsed_buf_size;
7684
7685	return qed_parse_reg_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7686}
7687
7688enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7689						  u32 *dump_buf,
7690						  u32 num_dumped_dwords,
7691						  u32 *results_buf_size)
7692{
7693	return qed_parse_igu_fifo_dump(dump_buf, NULL, results_buf_size);
7694}
7695
7696enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
7697					   u32 *dump_buf,
7698					   u32 num_dumped_dwords,
7699					   char *results_buf)
7700{
7701	u32 parsed_buf_size;
7702
7703	return qed_parse_igu_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7704}
7705
7706enum dbg_status
7707qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
7708					     u32 *dump_buf,
7709					     u32 num_dumped_dwords,
7710					     u32 *results_buf_size)
7711{
7712	return qed_parse_protection_override_dump(dump_buf,
7713						  NULL, results_buf_size);
7714}
7715
7716enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
7717						      u32 *dump_buf,
7718						      u32 num_dumped_dwords,
7719						      char *results_buf)
7720{
7721	u32 parsed_buf_size;
7722
7723	return qed_parse_protection_override_dump(dump_buf,
7724						  results_buf,
7725						  &parsed_buf_size);
7726}
7727
7728enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
7729						    u32 *dump_buf,
7730						    u32 num_dumped_dwords,
7731						    u32 *results_buf_size)
7732{
7733	return qed_parse_fw_asserts_dump(dump_buf, NULL, results_buf_size);
7734}
7735
7736enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
7737					     u32 *dump_buf,
7738					     u32 num_dumped_dwords,
7739					     char *results_buf)
7740{
7741	u32 parsed_buf_size;
7742
7743	return qed_parse_fw_asserts_dump(dump_buf,
7744					 results_buf, &parsed_buf_size);
7745}
7746
7747enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
7748				   struct dbg_attn_block_result *results)
7749{
 
7750	const u32 *block_attn_name_offsets;
7751	const char *attn_name_base;
7752	const char *block_name;
7753	enum dbg_attn_type attn_type;
 
7754	u8 num_regs, i, j;
7755
7756	num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
7757	attn_type = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
7758	block_name = qed_dbg_get_block_name(p_hwfn, results->block_id);
7759	if (!block_name)
7760		return DBG_STATUS_INVALID_ARGS;
7761
7762	if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr ||
7763	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr ||
7764	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
7765		return DBG_STATUS_DBG_ARRAY_NOT_SET;
7766
7767	block_attn_name_offsets =
7768	    (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr +
7769	    results->names_offset;
7770
7771	attn_name_base = p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr;
7772
7773	/* Go over registers with a non-zero attention status */
7774	for (i = 0; i < num_regs; i++) {
7775		struct dbg_attn_bit_mapping *bit_mapping;
7776		struct dbg_attn_reg_result *reg_result;
7777		u8 num_reg_attn, bit_idx = 0;
7778
7779		reg_result = &results->reg_results[i];
7780		num_reg_attn = GET_FIELD(reg_result->data,
7781					 DBG_ATTN_REG_RESULT_NUM_REG_ATTN);
7782		bit_mapping = (struct dbg_attn_bit_mapping *)
7783		    p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr +
7784		    reg_result->block_attn_offset;
 
 
7785
7786		/* Go over attention status bits */
7787		for (j = 0; j < num_reg_attn; j++) {
7788			u16 attn_idx_val = GET_FIELD(bit_mapping[j].data,
7789						     DBG_ATTN_BIT_MAPPING_VAL);
7790			const char *attn_name, *attn_type_str, *masked_str;
7791			u32 attn_name_offset;
7792			u32 sts_addr;
7793
7794			/* Check if bit mask should be advanced (due to unused
7795			 * bits).
7796			 */
7797			if (GET_FIELD(bit_mapping[j].data,
7798				      DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) {
7799				bit_idx += (u8)attn_idx_val;
7800				continue;
7801			}
7802
7803			/* Check current bit index */
7804			if (reg_result->sts_val & BIT(bit_idx)) {
7805				/* An attention bit with value=1 was found
7806				 * Find attention name
7807				 */
7808				attn_name_offset =
7809					block_attn_name_offsets[attn_idx_val];
7810				attn_name = attn_name_base + attn_name_offset;
7811				attn_type_str =
7812					(attn_type ==
7813					 ATTN_TYPE_INTERRUPT ? "Interrupt" :
7814					 "Parity");
7815				masked_str = reg_result->mask_val &
7816					     BIT(bit_idx) ?
7817					     " [masked]" : "";
7818				sts_addr =
7819				GET_FIELD(reg_result->data,
7820					  DBG_ATTN_REG_RESULT_STS_ADDRESS);
7821				DP_NOTICE(p_hwfn,
7822					  "%s (%s) : %s [address 0x%08x, bit %d]%s\n",
7823					  block_name, attn_type_str, attn_name,
7824					  sts_addr * 4, bit_idx, masked_str);
7825			}
7826
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7827			bit_idx++;
7828		}
7829	}
7830
7831	return DBG_STATUS_OK;
7832}
7833
7834/* Wrapper for unifying the idle_chk and mcp_trace api */
7835static enum dbg_status
7836qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
7837				   u32 *dump_buf,
7838				   u32 num_dumped_dwords,
7839				   char *results_buf)
7840{
7841	u32 num_errors, num_warnnings;
7842
7843	return qed_print_idle_chk_results(p_hwfn, dump_buf, num_dumped_dwords,
7844					  results_buf, &num_errors,
7845					  &num_warnnings);
7846}
7847
7848static DEFINE_MUTEX(qed_dbg_lock);
7849
7850#define MAX_PHY_RESULT_BUFFER 9000
7851
7852/******************************** Feature Meta data section ******************/
7853
7854#define GRC_NUM_STR_FUNCS 2
7855#define IDLE_CHK_NUM_STR_FUNCS 1
7856#define MCP_TRACE_NUM_STR_FUNCS 1
7857#define REG_FIFO_NUM_STR_FUNCS 1
7858#define IGU_FIFO_NUM_STR_FUNCS 1
7859#define PROTECTION_OVERRIDE_NUM_STR_FUNCS 1
7860#define FW_ASSERTS_NUM_STR_FUNCS 1
7861#define ILT_NUM_STR_FUNCS 1
7862#define PHY_NUM_STR_FUNCS 20
7863
7864/* Feature meta data lookup table */
7865static struct {
7866	char *name;
7867	u32 num_funcs;
7868	enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn,
7869				    struct qed_ptt *p_ptt, u32 *size);
7870	enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn,
7871					struct qed_ptt *p_ptt, u32 *dump_buf,
7872					u32 buf_size, u32 *dumped_dwords);
7873	enum dbg_status (*print_results)(struct qed_hwfn *p_hwfn,
7874					 u32 *dump_buf, u32 num_dumped_dwords,
7875					 char *results_buf);
7876	enum dbg_status (*results_buf_size)(struct qed_hwfn *p_hwfn,
7877					    u32 *dump_buf,
7878					    u32 num_dumped_dwords,
7879					    u32 *results_buf_size);
7880	const struct qed_func_lookup *hsi_func_lookup;
7881} qed_features_lookup[] = {
7882	{
7883	"grc", GRC_NUM_STR_FUNCS, qed_dbg_grc_get_dump_buf_size,
7884		    qed_dbg_grc_dump, NULL, NULL, NULL}, {
7885	"idle_chk", IDLE_CHK_NUM_STR_FUNCS,
7886		    qed_dbg_idle_chk_get_dump_buf_size,
7887		    qed_dbg_idle_chk_dump,
7888		    qed_print_idle_chk_results_wrapper,
7889		    qed_get_idle_chk_results_buf_size,
7890		    NULL}, {
7891	"mcp_trace", MCP_TRACE_NUM_STR_FUNCS,
7892		    qed_dbg_mcp_trace_get_dump_buf_size,
7893		    qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results,
7894		    qed_get_mcp_trace_results_buf_size,
7895		    NULL}, {
7896	"reg_fifo", REG_FIFO_NUM_STR_FUNCS,
7897		    qed_dbg_reg_fifo_get_dump_buf_size,
7898		    qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results,
7899		    qed_get_reg_fifo_results_buf_size,
7900		    NULL}, {
7901	"igu_fifo", IGU_FIFO_NUM_STR_FUNCS,
7902		    qed_dbg_igu_fifo_get_dump_buf_size,
7903		    qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results,
7904		    qed_get_igu_fifo_results_buf_size,
7905		    NULL}, {
7906	"protection_override", PROTECTION_OVERRIDE_NUM_STR_FUNCS,
7907		    qed_dbg_protection_override_get_dump_buf_size,
7908		    qed_dbg_protection_override_dump,
7909		    qed_print_protection_override_results,
7910		    qed_get_protection_override_results_buf_size,
7911		    NULL}, {
7912	"fw_asserts", FW_ASSERTS_NUM_STR_FUNCS,
7913		    qed_dbg_fw_asserts_get_dump_buf_size,
7914		    qed_dbg_fw_asserts_dump,
7915		    qed_print_fw_asserts_results,
7916		    qed_get_fw_asserts_results_buf_size,
7917		    NULL}, {
7918	"ilt", ILT_NUM_STR_FUNCS, qed_dbg_ilt_get_dump_buf_size,
7919		    qed_dbg_ilt_dump, NULL, NULL, NULL},};
7920
7921static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
7922{
7923	u32 i, precision = 80;
7924
7925	if (!p_text_buf)
7926		return;
7927
7928	pr_notice("\n%.*s", precision, p_text_buf);
7929	for (i = precision; i < text_size; i += precision)
7930		pr_cont("%.*s", precision, p_text_buf + i);
7931	pr_cont("\n");
7932}
7933
7934#define QED_RESULTS_BUF_MIN_SIZE 16
7935/* Generic function for decoding debug feature info */
7936static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
7937				      enum qed_dbg_features feature_idx)
7938{
7939	struct qed_dbg_feature *feature =
7940	    &p_hwfn->cdev->dbg_features[feature_idx];
7941	u32 txt_size_bytes, null_char_pos, i;
7942	u32 *dbuf, dwords;
7943	enum dbg_status rc;
7944	char *text_buf;
7945
7946	/* Check if feature supports formatting capability */
7947	if (!qed_features_lookup[feature_idx].results_buf_size)
7948		return DBG_STATUS_OK;
7949
7950	dbuf = (u32 *)feature->dump_buf;
7951	dwords = feature->dumped_dwords;
7952
7953	/* Obtain size of formatted output */
7954	rc = qed_features_lookup[feature_idx].results_buf_size(p_hwfn,
7955							       dbuf,
7956							       dwords,
7957							       &txt_size_bytes);
7958	if (rc != DBG_STATUS_OK)
7959		return rc;
7960
7961	/* Make sure that the allocated size is a multiple of dword
7962	 * (4 bytes).
7963	 */
7964	null_char_pos = txt_size_bytes - 1;
7965	txt_size_bytes = (txt_size_bytes + 3) & ~0x3;
7966
7967	if (txt_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
7968		DP_NOTICE(p_hwfn->cdev,
7969			  "formatted size of feature was too small %d. Aborting\n",
7970			  txt_size_bytes);
7971		return DBG_STATUS_INVALID_ARGS;
7972	}
7973
7974	/* allocate temp text buf */
7975	text_buf = vzalloc(txt_size_bytes);
7976	if (!text_buf) {
7977		DP_NOTICE(p_hwfn->cdev,
7978			  "failed to allocate text buffer. Aborting\n");
7979		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7980	}
7981
7982	/* Decode feature opcodes to string on temp buf */
7983	rc = qed_features_lookup[feature_idx].print_results(p_hwfn,
7984							    dbuf,
7985							    dwords,
7986							    text_buf);
7987	if (rc != DBG_STATUS_OK) {
7988		vfree(text_buf);
7989		return rc;
7990	}
7991
7992	/* Replace the original null character with a '\n' character.
7993	 * The bytes that were added as a result of the dword alignment are also
7994	 * padded with '\n' characters.
7995	 */
7996	for (i = null_char_pos; i < txt_size_bytes; i++)
7997		text_buf[i] = '\n';
7998
7999	/* Dump printable feature to log */
8000	if (p_hwfn->cdev->print_dbg_data)
8001		qed_dbg_print_feature(text_buf, txt_size_bytes);
8002
8003	/* Dump binary data as is to the output file */
8004	if (p_hwfn->cdev->dbg_bin_dump) {
8005		vfree(text_buf);
8006		return rc;
8007	}
8008
8009	/* Free the old dump_buf and point the dump_buf to the newly allocated
8010	 * and formatted text buffer.
8011	 */
8012	vfree(feature->dump_buf);
8013	feature->dump_buf = text_buf;
8014	feature->buf_size = txt_size_bytes;
8015	feature->dumped_dwords = txt_size_bytes / 4;
8016
8017	return rc;
8018}
8019
8020#define MAX_DBG_FEATURE_SIZE_DWORDS	0x3FFFFFFF
8021
8022/* Generic function for performing the dump of a debug feature. */
8023static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
8024				    struct qed_ptt *p_ptt,
8025				    enum qed_dbg_features feature_idx)
8026{
8027	struct qed_dbg_feature *feature =
8028	    &p_hwfn->cdev->dbg_features[feature_idx];
8029	u32 buf_size_dwords, *dbuf, *dwords;
8030	enum dbg_status rc;
8031
8032	DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n",
8033		  qed_features_lookup[feature_idx].name);
8034
8035	/* Dump_buf was already allocated need to free (this can happen if dump
8036	 * was called but file was never read).
8037	 * We can't use the buffer as is since size may have changed.
8038	 */
8039	if (feature->dump_buf) {
8040		vfree(feature->dump_buf);
8041		feature->dump_buf = NULL;
8042	}
8043
8044	/* Get buffer size from hsi, allocate accordingly, and perform the
8045	 * dump.
8046	 */
8047	rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
8048						       &buf_size_dwords);
8049	if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
8050		return rc;
8051
8052	if (buf_size_dwords > MAX_DBG_FEATURE_SIZE_DWORDS) {
8053		feature->buf_size = 0;
8054		DP_NOTICE(p_hwfn->cdev,
8055			  "Debug feature [\"%s\"] size (0x%x dwords) exceeds maximum size (0x%x dwords)\n",
8056			  qed_features_lookup[feature_idx].name,
8057			  buf_size_dwords, MAX_DBG_FEATURE_SIZE_DWORDS);
8058
8059		return DBG_STATUS_OK;
8060	}
8061
8062	feature->buf_size = buf_size_dwords * sizeof(u32);
8063	feature->dump_buf = vmalloc(feature->buf_size);
8064	if (!feature->dump_buf)
8065		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
8066
8067	dbuf = (u32 *)feature->dump_buf;
8068	dwords = &feature->dumped_dwords;
8069	rc = qed_features_lookup[feature_idx].perform_dump(p_hwfn, p_ptt,
8070							   dbuf,
8071							   feature->buf_size /
8072							   sizeof(u32),
8073							   dwords);
8074
8075	/* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error.
8076	 * In this case the buffer holds valid binary data, but we won't able
8077	 * to parse it (since parsing relies on data in NVRAM which is only
8078	 * accessible when MFW is responsive). skip the formatting but return
8079	 * success so that binary data is provided.
8080	 */
8081	if (rc == DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
8082		return DBG_STATUS_OK;
8083
8084	if (rc != DBG_STATUS_OK)
8085		return rc;
8086
8087	/* Format output */
8088	rc = format_feature(p_hwfn, feature_idx);
8089	return rc;
8090}
8091
8092int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
8093{
8094	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_GRC, num_dumped_bytes);
8095}
8096
8097int qed_dbg_grc_size(struct qed_dev *cdev)
8098{
8099	return qed_dbg_feature_size(cdev, DBG_FEATURE_GRC);
8100}
8101
8102int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
8103{
8104	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IDLE_CHK,
8105			       num_dumped_bytes);
8106}
8107
8108int qed_dbg_idle_chk_size(struct qed_dev *cdev)
8109{
8110	return qed_dbg_feature_size(cdev, DBG_FEATURE_IDLE_CHK);
8111}
8112
8113int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
8114{
8115	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_REG_FIFO,
8116			       num_dumped_bytes);
8117}
8118
8119int qed_dbg_reg_fifo_size(struct qed_dev *cdev)
8120{
8121	return qed_dbg_feature_size(cdev, DBG_FEATURE_REG_FIFO);
8122}
8123
8124int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
8125{
8126	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IGU_FIFO,
8127			       num_dumped_bytes);
8128}
8129
8130int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
8131{
8132	return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
8133}
8134
8135static int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn,
8136				    enum qed_nvm_images image_id, u32 *length)
8137{
8138	struct qed_nvm_image_att image_att;
8139	int rc;
8140
8141	*length = 0;
8142	rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
8143	if (rc)
8144		return rc;
8145
8146	*length = image_att.length;
8147
8148	return rc;
8149}
8150
8151static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,
8152			     u32 *num_dumped_bytes,
8153			     enum qed_nvm_images image_id)
8154{
8155	struct qed_hwfn *p_hwfn =
8156		&cdev->hwfns[cdev->engine_for_debug];
8157	u32 len_rounded;
 
8158	int rc;
8159
8160	*num_dumped_bytes = 0;
8161	rc = qed_dbg_nvm_image_length(p_hwfn, image_id, &len_rounded);
8162	if (rc)
8163		return rc;
8164
8165	DP_NOTICE(p_hwfn->cdev,
8166		  "Collecting a debug feature [\"nvram image %d\"]\n",
8167		  image_id);
8168
8169	len_rounded = roundup(len_rounded, sizeof(u32));
8170	rc = qed_mcp_get_nvm_image(p_hwfn, image_id, buffer, len_rounded);
8171	if (rc)
8172		return rc;
8173
8174	/* QED_NVM_IMAGE_NVM_META image is not swapped like other images */
8175	if (image_id != QED_NVM_IMAGE_NVM_META)
8176		cpu_to_be32_array((__force __be32 *)buffer,
8177				  (const u32 *)buffer,
8178				  len_rounded / sizeof(u32));
 
8179
8180	*num_dumped_bytes = len_rounded;
8181
8182	return rc;
8183}
8184
8185int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
8186				u32 *num_dumped_bytes)
8187{
8188	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_PROTECTION_OVERRIDE,
8189			       num_dumped_bytes);
8190}
8191
8192int qed_dbg_protection_override_size(struct qed_dev *cdev)
8193{
8194	return qed_dbg_feature_size(cdev, DBG_FEATURE_PROTECTION_OVERRIDE);
8195}
8196
8197int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
8198		       u32 *num_dumped_bytes)
8199{
8200	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_FW_ASSERTS,
8201			       num_dumped_bytes);
8202}
8203
8204int qed_dbg_fw_asserts_size(struct qed_dev *cdev)
8205{
8206	return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS);
8207}
8208
8209int qed_dbg_ilt(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
8210{
8211	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_ILT, num_dumped_bytes);
8212}
8213
8214int qed_dbg_ilt_size(struct qed_dev *cdev)
8215{
8216	return qed_dbg_feature_size(cdev, DBG_FEATURE_ILT);
8217}
8218
8219int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
8220		      u32 *num_dumped_bytes)
8221{
8222	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_MCP_TRACE,
8223			       num_dumped_bytes);
8224}
8225
8226int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
8227{
8228	return qed_dbg_feature_size(cdev, DBG_FEATURE_MCP_TRACE);
8229}
8230
8231/* Defines the amount of bytes allocated for recording the length of debugfs
8232 * feature buffer.
8233 */
8234#define REGDUMP_HEADER_SIZE			sizeof(u32)
8235#define REGDUMP_HEADER_SIZE_SHIFT		0
8236#define REGDUMP_HEADER_SIZE_MASK		0xffffff
8237#define REGDUMP_HEADER_FEATURE_SHIFT		24
8238#define REGDUMP_HEADER_FEATURE_MASK		0x1f
8239#define REGDUMP_HEADER_BIN_DUMP_SHIFT		29
8240#define REGDUMP_HEADER_BIN_DUMP_MASK		0x1
8241#define REGDUMP_HEADER_OMIT_ENGINE_SHIFT	30
8242#define REGDUMP_HEADER_OMIT_ENGINE_MASK		0x1
8243#define REGDUMP_HEADER_ENGINE_SHIFT		31
8244#define REGDUMP_HEADER_ENGINE_MASK		0x1
8245#define REGDUMP_MAX_SIZE			0x1000000
8246#define ILT_DUMP_MAX_SIZE			(1024 * 1024 * 15)
8247
8248enum debug_print_features {
8249	OLD_MODE = 0,
8250	IDLE_CHK = 1,
8251	GRC_DUMP = 2,
8252	MCP_TRACE = 3,
8253	REG_FIFO = 4,
8254	PROTECTION_OVERRIDE = 5,
8255	IGU_FIFO = 6,
8256	PHY = 7,
8257	FW_ASSERTS = 8,
8258	NVM_CFG1 = 9,
8259	DEFAULT_CFG = 10,
8260	NVM_META = 11,
8261	MDUMP = 12,
8262	ILT_DUMP = 13,
8263};
8264
8265static u32 qed_calc_regdump_header(struct qed_dev *cdev,
8266				   enum debug_print_features feature,
8267				   int engine, u32 feature_size,
8268				   u8 omit_engine, u8 dbg_bin_dump)
8269{
8270	u32 res = 0;
8271
8272	SET_FIELD(res, REGDUMP_HEADER_SIZE, feature_size);
8273	if (res != feature_size)
8274		DP_NOTICE(cdev,
8275			  "Feature %d is too large (size 0x%x) and will corrupt the dump\n",
8276			  feature, feature_size);
8277
8278	SET_FIELD(res, REGDUMP_HEADER_FEATURE, feature);
8279	SET_FIELD(res, REGDUMP_HEADER_BIN_DUMP, dbg_bin_dump);
8280	SET_FIELD(res, REGDUMP_HEADER_OMIT_ENGINE, omit_engine);
8281	SET_FIELD(res, REGDUMP_HEADER_ENGINE, engine);
8282
8283	return res;
8284}
8285
8286int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
8287{
8288	u8 cur_engine, omit_engine = 0, org_engine;
8289	struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug];
 
8290	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
8291	int grc_params[MAX_DBG_GRC_PARAMS], rc, i;
8292	u32 offset = 0, feature_size;
 
8293
8294	for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
8295		grc_params[i] = dev_data->grc.param_val[i];
8296
8297	if (!QED_IS_CMT(cdev))
8298		omit_engine = 1;
8299
8300	cdev->dbg_bin_dump = 1;
8301	mutex_lock(&qed_dbg_lock);
8302
8303	org_engine = qed_get_debug_engine(cdev);
8304	for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
8305		/* Collect idle_chks and grcDump for each hw function */
8306		DP_VERBOSE(cdev, QED_MSG_DEBUG,
8307			   "obtaining idle_chk and grcdump for current engine\n");
8308		qed_set_debug_engine(cdev, cur_engine);
8309
8310		/* First idle_chk */
8311		rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
8312				      REGDUMP_HEADER_SIZE, &feature_size);
8313		if (!rc) {
8314			*(u32 *)((u8 *)buffer + offset) =
8315			    qed_calc_regdump_header(cdev, IDLE_CHK,
8316						    cur_engine,
8317						    feature_size,
8318						    omit_engine,
8319						    cdev->dbg_bin_dump);
8320			offset += (feature_size + REGDUMP_HEADER_SIZE);
8321		} else {
8322			DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
8323		}
8324
8325		/* Second idle_chk */
8326		rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
8327				      REGDUMP_HEADER_SIZE, &feature_size);
8328		if (!rc) {
8329			*(u32 *)((u8 *)buffer + offset) =
8330			    qed_calc_regdump_header(cdev, IDLE_CHK,
8331						    cur_engine,
8332						    feature_size,
8333						    omit_engine,
8334						    cdev->dbg_bin_dump);
8335			offset += (feature_size + REGDUMP_HEADER_SIZE);
8336		} else {
8337			DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
8338		}
8339
8340		/* reg_fifo dump */
8341		rc = qed_dbg_reg_fifo(cdev, (u8 *)buffer + offset +
8342				      REGDUMP_HEADER_SIZE, &feature_size);
8343		if (!rc) {
8344			*(u32 *)((u8 *)buffer + offset) =
8345			    qed_calc_regdump_header(cdev, REG_FIFO,
8346						    cur_engine,
8347						    feature_size,
8348						    omit_engine,
8349						    cdev->dbg_bin_dump);
8350			offset += (feature_size + REGDUMP_HEADER_SIZE);
8351		} else {
8352			DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc);
8353		}
8354
8355		/* igu_fifo dump */
8356		rc = qed_dbg_igu_fifo(cdev, (u8 *)buffer + offset +
8357				      REGDUMP_HEADER_SIZE, &feature_size);
8358		if (!rc) {
8359			*(u32 *)((u8 *)buffer + offset) =
8360			    qed_calc_regdump_header(cdev, IGU_FIFO,
8361						    cur_engine,
8362						    feature_size,
8363						    omit_engine,
8364						    cdev->dbg_bin_dump);
8365			offset += (feature_size + REGDUMP_HEADER_SIZE);
8366		} else {
8367			DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc);
8368		}
8369
8370		/* protection_override dump */
8371		rc = qed_dbg_protection_override(cdev, (u8 *)buffer + offset +
8372						 REGDUMP_HEADER_SIZE,
8373						 &feature_size);
8374		if (!rc) {
8375			*(u32 *)((u8 *)buffer + offset) =
8376			    qed_calc_regdump_header(cdev,
8377						    PROTECTION_OVERRIDE,
8378						    cur_engine,
8379						    feature_size,
8380						    omit_engine,
8381						    cdev->dbg_bin_dump);
8382			offset += (feature_size + REGDUMP_HEADER_SIZE);
8383		} else {
8384			DP_ERR(cdev,
8385			       "qed_dbg_protection_override failed. rc = %d\n",
8386			       rc);
8387		}
8388
8389		/* fw_asserts dump */
8390		rc = qed_dbg_fw_asserts(cdev, (u8 *)buffer + offset +
8391					REGDUMP_HEADER_SIZE, &feature_size);
8392		if (!rc) {
8393			*(u32 *)((u8 *)buffer + offset) =
8394			    qed_calc_regdump_header(cdev, FW_ASSERTS,
8395						    cur_engine,
8396						    feature_size,
8397						    omit_engine,
8398						    cdev->dbg_bin_dump);
8399			offset += (feature_size + REGDUMP_HEADER_SIZE);
8400		} else {
8401			DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
8402			       rc);
8403		}
8404
8405		feature_size = qed_dbg_ilt_size(cdev);
8406		if (!cdev->disable_ilt_dump && feature_size <
8407		    ILT_DUMP_MAX_SIZE) {
8408			rc = qed_dbg_ilt(cdev, (u8 *)buffer + offset +
8409					 REGDUMP_HEADER_SIZE, &feature_size);
8410			if (!rc) {
8411				*(u32 *)((u8 *)buffer + offset) =
8412				    qed_calc_regdump_header(cdev, ILT_DUMP,
8413							    cur_engine,
8414							    feature_size,
8415							    omit_engine,
8416							    cdev->dbg_bin_dump);
8417				offset += (feature_size + REGDUMP_HEADER_SIZE);
8418			} else {
8419				DP_ERR(cdev, "qed_dbg_ilt failed. rc = %d\n",
8420				       rc);
8421			}
8422		}
8423
8424		/* Grc dump - must be last because when mcp stuck it will
8425		 * clutter idle_chk, reg_fifo, ...
8426		 */
8427		for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
8428			dev_data->grc.param_val[i] = grc_params[i];
8429
 
 
 
8430		rc = qed_dbg_grc(cdev, (u8 *)buffer + offset +
8431				 REGDUMP_HEADER_SIZE, &feature_size);
8432		if (!rc) {
8433			*(u32 *)((u8 *)buffer + offset) =
8434			    qed_calc_regdump_header(cdev, GRC_DUMP,
8435						    cur_engine,
8436						    feature_size,
8437						    omit_engine,
8438						    cdev->dbg_bin_dump);
8439			offset += (feature_size + REGDUMP_HEADER_SIZE);
8440		} else {
8441			DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc);
8442		}
8443	}
8444
8445	qed_set_debug_engine(cdev, org_engine);
8446
8447	/* mcp_trace */
8448	rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset +
8449			       REGDUMP_HEADER_SIZE, &feature_size);
8450	if (!rc) {
8451		*(u32 *)((u8 *)buffer + offset) =
8452		    qed_calc_regdump_header(cdev, MCP_TRACE, cur_engine,
8453					    feature_size, omit_engine,
8454					    cdev->dbg_bin_dump);
8455		offset += (feature_size + REGDUMP_HEADER_SIZE);
8456	} else {
8457		DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
8458	}
8459
8460	/* nvm cfg1 */
8461	rc = qed_dbg_nvm_image(cdev,
8462			       (u8 *)buffer + offset +
8463			       REGDUMP_HEADER_SIZE, &feature_size,
8464			       QED_NVM_IMAGE_NVM_CFG1);
8465	if (!rc) {
8466		*(u32 *)((u8 *)buffer + offset) =
8467		    qed_calc_regdump_header(cdev, NVM_CFG1, cur_engine,
8468					    feature_size, omit_engine,
8469					    cdev->dbg_bin_dump);
8470		offset += (feature_size + REGDUMP_HEADER_SIZE);
8471	} else if (rc != -ENOENT) {
8472		DP_ERR(cdev,
8473		       "qed_dbg_nvm_image failed for image  %d (%s), rc = %d\n",
8474		       QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1",
8475		       rc);
8476	}
8477
8478		/* nvm default */
8479	rc = qed_dbg_nvm_image(cdev,
8480			       (u8 *)buffer + offset +
8481			       REGDUMP_HEADER_SIZE, &feature_size,
8482			       QED_NVM_IMAGE_DEFAULT_CFG);
8483	if (!rc) {
8484		*(u32 *)((u8 *)buffer + offset) =
8485		    qed_calc_regdump_header(cdev, DEFAULT_CFG,
8486					    cur_engine, feature_size,
8487					    omit_engine,
8488					    cdev->dbg_bin_dump);
8489		offset += (feature_size + REGDUMP_HEADER_SIZE);
8490	} else if (rc != -ENOENT) {
8491		DP_ERR(cdev,
8492		       "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
8493		       QED_NVM_IMAGE_DEFAULT_CFG,
8494		       "QED_NVM_IMAGE_DEFAULT_CFG", rc);
8495	}
8496
8497	/* nvm meta */
8498	rc = qed_dbg_nvm_image(cdev,
8499			       (u8 *)buffer + offset +
8500			       REGDUMP_HEADER_SIZE, &feature_size,
8501			       QED_NVM_IMAGE_NVM_META);
8502	if (!rc) {
8503		*(u32 *)((u8 *)buffer + offset) =
8504		    qed_calc_regdump_header(cdev, NVM_META, cur_engine,
8505					    feature_size, omit_engine,
8506					    cdev->dbg_bin_dump);
8507		offset += (feature_size + REGDUMP_HEADER_SIZE);
8508	} else if (rc != -ENOENT) {
8509		DP_ERR(cdev,
8510		       "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
8511		       QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META",
8512		       rc);
8513	}
8514
8515	/* nvm mdump */
8516	rc = qed_dbg_nvm_image(cdev, (u8 *)buffer + offset +
8517			       REGDUMP_HEADER_SIZE, &feature_size,
8518			       QED_NVM_IMAGE_MDUMP);
8519	if (!rc) {
8520		*(u32 *)((u8 *)buffer + offset) =
8521		    qed_calc_regdump_header(cdev, MDUMP, cur_engine,
8522					    feature_size, omit_engine,
8523					    cdev->dbg_bin_dump);
8524		offset += (feature_size + REGDUMP_HEADER_SIZE);
8525	} else if (rc != -ENOENT) {
8526		DP_ERR(cdev,
8527		       "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
8528		       QED_NVM_IMAGE_MDUMP, "QED_NVM_IMAGE_MDUMP", rc);
8529	}
8530
8531	mutex_unlock(&qed_dbg_lock);
8532	cdev->dbg_bin_dump = 0;
8533
8534	return 0;
8535}
8536
8537int qed_dbg_all_data_size(struct qed_dev *cdev)
8538{
8539	u32 regs_len = 0, image_len = 0, ilt_len = 0, total_ilt_len = 0;
8540	struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug];
 
8541	u8 cur_engine, org_engine;
8542
8543	cdev->disable_ilt_dump = false;
8544	org_engine = qed_get_debug_engine(cdev);
8545	for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
8546		/* Engine specific */
8547		DP_VERBOSE(cdev, QED_MSG_DEBUG,
8548			   "calculating idle_chk and grcdump register length for current engine\n");
8549		qed_set_debug_engine(cdev, cur_engine);
8550		regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
8551		    REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
8552		    REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
8553		    REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
8554		    REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
8555		    REGDUMP_HEADER_SIZE +
8556		    qed_dbg_protection_override_size(cdev) +
8557		    REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
8558		ilt_len = REGDUMP_HEADER_SIZE + qed_dbg_ilt_size(cdev);
8559		if (ilt_len < ILT_DUMP_MAX_SIZE) {
8560			total_ilt_len += ilt_len;
8561			regs_len += ilt_len;
8562		}
8563	}
8564
8565	qed_set_debug_engine(cdev, org_engine);
8566
8567	/* Engine common */
8568	regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev) +
8569	    REGDUMP_HEADER_SIZE + qed_dbg_phy_size(cdev);
8570	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_CFG1, &image_len);
8571	if (image_len)
8572		regs_len += REGDUMP_HEADER_SIZE + image_len;
8573	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_DEFAULT_CFG, &image_len);
8574	if (image_len)
8575		regs_len += REGDUMP_HEADER_SIZE + image_len;
8576	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_META, &image_len);
8577	if (image_len)
8578		regs_len += REGDUMP_HEADER_SIZE + image_len;
8579	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_MDUMP, &image_len);
8580	if (image_len)
8581		regs_len += REGDUMP_HEADER_SIZE + image_len;
8582
8583	if (regs_len > REGDUMP_MAX_SIZE) {
8584		DP_VERBOSE(cdev, QED_MSG_DEBUG,
8585			   "Dump exceeds max size 0x%x, disable ILT dump\n",
8586			   REGDUMP_MAX_SIZE);
8587		cdev->disable_ilt_dump = true;
8588		regs_len -= total_ilt_len;
8589	}
8590
8591	return regs_len;
8592}
8593
8594int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
8595		    enum qed_dbg_features feature, u32 *num_dumped_bytes)
8596{
8597	struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature];
8598	struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug];
 
 
8599	enum dbg_status dbg_rc;
8600	struct qed_ptt *p_ptt;
8601	int rc = 0;
8602
8603	/* Acquire ptt */
8604	p_ptt = qed_ptt_acquire(p_hwfn);
8605	if (!p_ptt)
8606		return -EINVAL;
8607
8608	/* Get dump */
8609	dbg_rc = qed_dbg_dump(p_hwfn, p_ptt, feature);
8610	if (dbg_rc != DBG_STATUS_OK) {
8611		DP_VERBOSE(cdev, QED_MSG_DEBUG, "%s\n",
8612			   qed_dbg_get_status_str(dbg_rc));
8613		*num_dumped_bytes = 0;
8614		rc = -EINVAL;
8615		goto out;
8616	}
8617
8618	DP_VERBOSE(cdev, QED_MSG_DEBUG,
8619		   "copying debugfs feature to external buffer\n");
8620	memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size);
8621	*num_dumped_bytes = cdev->dbg_features[feature].dumped_dwords *
8622			    4;
8623
8624out:
8625	qed_ptt_release(p_hwfn, p_ptt);
8626	return rc;
8627}
8628
8629int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
8630{
8631	struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature];
8632	struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug];
8633	struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
 
 
8634	u32 buf_size_dwords;
8635	enum dbg_status rc;
8636
8637	if (!p_ptt)
8638		return -EINVAL;
8639
8640	rc = qed_features_lookup[feature].get_size(p_hwfn, p_ptt,
8641						   &buf_size_dwords);
8642	if (rc != DBG_STATUS_OK)
8643		buf_size_dwords = 0;
8644
8645	/* Feature will not be dumped if it exceeds maximum size */
8646	if (buf_size_dwords > MAX_DBG_FEATURE_SIZE_DWORDS)
8647		buf_size_dwords = 0;
8648
8649	qed_ptt_release(p_hwfn, p_ptt);
8650	qed_feature->buf_size = buf_size_dwords * sizeof(u32);
8651	return qed_feature->buf_size;
8652}
8653
8654int qed_dbg_phy_size(struct qed_dev *cdev)
8655{
8656	/* return max size of phy info and
8657	 * phy mac_stat multiplied by the number of ports
8658	 */
8659	return MAX_PHY_RESULT_BUFFER * (1 + qed_device_num_ports(cdev));
8660}
8661
8662u8 qed_get_debug_engine(struct qed_dev *cdev)
8663{
8664	return cdev->engine_for_debug;
8665}
8666
8667void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
8668{
8669	DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n",
8670		   engine_number);
8671	cdev->engine_for_debug = engine_number;
8672}
8673
8674void qed_dbg_pf_init(struct qed_dev *cdev)
8675{
8676	const u8 *dbg_values = NULL;
8677	int i;
8678
8679	/* Sync ver with debugbus qed code */
8680	qed_dbg_set_app_ver(TOOLS_VERSION);
8681
8682	/* Debug values are after init values.
8683	 * The offset is the first dword of the file.
8684	 */
8685	dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data;
8686
8687	for_each_hwfn(cdev, i) {
8688		qed_dbg_set_bin_ptr(&cdev->hwfns[i], dbg_values);
8689		qed_dbg_user_set_bin_ptr(&cdev->hwfns[i], dbg_values);
8690	}
8691
8692	/* Set the hwfn to be 0 as default */
8693	cdev->engine_for_debug = 0;
8694}
8695
8696void qed_dbg_pf_exit(struct qed_dev *cdev)
8697{
8698	struct qed_dbg_feature *feature = NULL;
8699	enum qed_dbg_features feature_idx;
8700
8701	/* debug features' buffers may be allocated if debug feature was used
8702	 * but dump wasn't called
8703	 */
8704	for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) {
8705		feature = &cdev->dbg_features[feature_idx];
8706		if (feature->dump_buf) {
8707			vfree(feature->dump_buf);
8708			feature->dump_buf = NULL;
8709		}
8710	}
8711}
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* QLogic qed NIC Driver
   3 * Copyright (c) 2015 QLogic Corporation
 
   4 */
   5
   6#include <linux/module.h>
   7#include <linux/vmalloc.h>
   8#include <linux/crc32.h>
   9#include "qed.h"
 
  10#include "qed_hsi.h"
 
  11#include "qed_hw.h"
  12#include "qed_mcp.h"
  13#include "qed_reg_addr.h"
  14
  15/* Memory groups enum */
  16enum mem_groups {
  17	MEM_GROUP_PXP_MEM,
  18	MEM_GROUP_DMAE_MEM,
  19	MEM_GROUP_CM_MEM,
  20	MEM_GROUP_QM_MEM,
  21	MEM_GROUP_DORQ_MEM,
  22	MEM_GROUP_BRB_RAM,
  23	MEM_GROUP_BRB_MEM,
  24	MEM_GROUP_PRS_MEM,
 
 
  25	MEM_GROUP_IOR,
 
  26	MEM_GROUP_BTB_RAM,
 
 
 
  27	MEM_GROUP_CONN_CFC_MEM,
  28	MEM_GROUP_TASK_CFC_MEM,
  29	MEM_GROUP_CAU_PI,
  30	MEM_GROUP_CAU_MEM,
 
  31	MEM_GROUP_PXP_ILT,
  32	MEM_GROUP_TM_MEM,
  33	MEM_GROUP_SDM_MEM,
  34	MEM_GROUP_PBUF,
  35	MEM_GROUP_RAM,
  36	MEM_GROUP_MULD_MEM,
  37	MEM_GROUP_BTB_MEM,
  38	MEM_GROUP_RDIF_CTX,
  39	MEM_GROUP_TDIF_CTX,
  40	MEM_GROUP_CFC_MEM,
  41	MEM_GROUP_IGU_MEM,
  42	MEM_GROUP_IGU_MSIX,
  43	MEM_GROUP_CAU_SB,
  44	MEM_GROUP_BMB_RAM,
  45	MEM_GROUP_BMB_MEM,
 
 
  46	MEM_GROUPS_NUM
  47};
  48
  49/* Memory groups names */
  50static const char * const s_mem_group_names[] = {
  51	"PXP_MEM",
  52	"DMAE_MEM",
  53	"CM_MEM",
  54	"QM_MEM",
  55	"DORQ_MEM",
  56	"BRB_RAM",
  57	"BRB_MEM",
  58	"PRS_MEM",
 
 
  59	"IOR",
 
  60	"BTB_RAM",
 
 
 
  61	"CONN_CFC_MEM",
  62	"TASK_CFC_MEM",
  63	"CAU_PI",
  64	"CAU_MEM",
 
  65	"PXP_ILT",
  66	"TM_MEM",
  67	"SDM_MEM",
  68	"PBUF",
  69	"RAM",
  70	"MULD_MEM",
  71	"BTB_MEM",
  72	"RDIF_CTX",
  73	"TDIF_CTX",
  74	"CFC_MEM",
  75	"IGU_MEM",
  76	"IGU_MSIX",
  77	"CAU_SB",
  78	"BMB_RAM",
  79	"BMB_MEM",
 
 
  80};
  81
  82/* Idle check conditions */
  83
  84static u32 cond5(const u32 *r, const u32 *imm)
  85{
  86	return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]);
  87}
  88
  89static u32 cond7(const u32 *r, const u32 *imm)
  90{
  91	return ((r[0] >> imm[0]) & imm[1]) != imm[2];
  92}
  93
  94static u32 cond6(const u32 *r, const u32 *imm)
  95{
  96	return (r[0] & imm[0]) != imm[1];
  97}
  98
  99static u32 cond9(const u32 *r, const u32 *imm)
 100{
 101	return ((r[0] & imm[0]) >> imm[1]) !=
 102	    (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
 103}
 104
 105static u32 cond10(const u32 *r, const u32 *imm)
 106{
 107	return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
 108}
 109
 110static u32 cond4(const u32 *r, const u32 *imm)
 111{
 112	return (r[0] & ~imm[0]) != imm[1];
 113}
 114
 115static u32 cond0(const u32 *r, const u32 *imm)
 116{
 117	return (r[0] & ~r[1]) != imm[0];
 118}
 119
 
 
 
 
 
 120static u32 cond1(const u32 *r, const u32 *imm)
 121{
 122	return r[0] != imm[0];
 123}
 124
 125static u32 cond11(const u32 *r, const u32 *imm)
 126{
 127	return r[0] != r[1] && r[2] == imm[0];
 128}
 129
 130static u32 cond12(const u32 *r, const u32 *imm)
 131{
 132	return r[0] != r[1] && r[2] > imm[0];
 133}
 134
 135static u32 cond3(const u32 *r, const u32 *imm)
 136{
 137	return r[0] != r[1];
 138}
 139
 140static u32 cond13(const u32 *r, const u32 *imm)
 141{
 142	return r[0] & imm[0];
 143}
 144
 145static u32 cond8(const u32 *r, const u32 *imm)
 146{
 147	return r[0] < (r[1] - imm[0]);
 148}
 149
 150static u32 cond2(const u32 *r, const u32 *imm)
 151{
 152	return r[0] > imm[0];
 153}
 154
 155/* Array of Idle Check conditions */
 156static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
 157	cond0,
 158	cond1,
 159	cond2,
 160	cond3,
 161	cond4,
 162	cond5,
 163	cond6,
 164	cond7,
 165	cond8,
 166	cond9,
 167	cond10,
 168	cond11,
 169	cond12,
 170	cond13,
 
 171};
 172
 
 
 
 
 173/******************************* Data Types **********************************/
 174
 175enum platform_ids {
 176	PLATFORM_ASIC,
 177	PLATFORM_RESERVED,
 178	PLATFORM_RESERVED2,
 179	PLATFORM_RESERVED3,
 180	MAX_PLATFORM_IDS
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 181};
 182
 183/* Chip constant definitions */
 184struct chip_defs {
 185	const char *name;
 
 
 
 
 186};
 187
 188/* Platform constant definitions */
 189struct platform_defs {
 190	const char *name;
 191	u32 delay_factor;
 192	u32 dmae_thresh;
 193	u32 log_thresh;
 194};
 195
 
 
 
 
 
 
 196/* Storm constant definitions.
 197 * Addresses are in bytes, sizes are in quad-regs.
 198 */
 199struct storm_defs {
 200	char letter;
 201	enum block_id block_id;
 202	enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
 203	bool has_vfc;
 204	u32 sem_fast_mem_addr;
 205	u32 sem_frame_mode_addr;
 206	u32 sem_slow_enable_addr;
 207	u32 sem_slow_mode_addr;
 208	u32 sem_slow_mode1_conf_addr;
 209	u32 sem_sync_dbg_empty_addr;
 210	u32 sem_slow_dbg_empty_addr;
 211	u32 cm_ctx_wr_addr;
 212	u32 cm_conn_ag_ctx_lid_size;
 213	u32 cm_conn_ag_ctx_rd_addr;
 214	u32 cm_conn_st_ctx_lid_size;
 215	u32 cm_conn_st_ctx_rd_addr;
 216	u32 cm_task_ag_ctx_lid_size;
 217	u32 cm_task_ag_ctx_rd_addr;
 218	u32 cm_task_st_ctx_lid_size;
 219	u32 cm_task_st_ctx_rd_addr;
 220};
 221
 222/* Block constant definitions */
 223struct block_defs {
 224	const char *name;
 225	bool exists[MAX_CHIP_IDS];
 226	bool associated_to_storm;
 227
 228	/* Valid only if associated_to_storm is true */
 229	u32 storm_id;
 230	enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
 231	u32 dbg_select_addr;
 232	u32 dbg_enable_addr;
 233	u32 dbg_shift_addr;
 234	u32 dbg_force_valid_addr;
 235	u32 dbg_force_frame_addr;
 236	bool has_reset_bit;
 237
 238	/* If true, block is taken out of reset before dump */
 239	bool unreset;
 240	enum dbg_reset_regs reset_reg;
 241
 242	/* Bit offset in reset register */
 243	u8 reset_bit_offset;
 244};
 245
 246/* Reset register definitions */
 247struct reset_reg_defs {
 248	u32 addr;
 
 
 
 
 249	bool exists[MAX_CHIP_IDS];
 250	u32 unreset_val[MAX_CHIP_IDS];
 251};
 252
 253struct grc_param_defs {
 254	u32 default_val[MAX_CHIP_IDS];
 255	u32 min;
 256	u32 max;
 257	bool is_preset;
 258	bool is_persistent;
 259	u32 exclude_all_preset_val;
 260	u32 crash_preset_val;
 261};
 262
 263/* Address is in 128b units. Width is in bits. */
 264struct rss_mem_defs {
 265	const char *mem_name;
 266	const char *type_name;
 267	u32 addr;
 268	u32 entry_width;
 269	u32 num_entries[MAX_CHIP_IDS];
 270};
 271
 272struct vfc_ram_defs {
 273	const char *mem_name;
 274	const char *type_name;
 275	u32 base_row;
 276	u32 num_rows;
 277};
 278
 279struct big_ram_defs {
 280	const char *instance_name;
 281	enum mem_groups mem_group_id;
 282	enum mem_groups ram_mem_group_id;
 283	enum dbg_grc_params grc_param;
 284	u32 addr_reg_addr;
 285	u32 data_reg_addr;
 286	u32 is_256b_reg_addr;
 287	u32 is_256b_bit_offset[MAX_CHIP_IDS];
 288	u32 ram_size[MAX_CHIP_IDS]; /* In dwords */
 289};
 290
 291struct phy_defs {
 292	const char *phy_name;
 293
 294	/* PHY base GRC address */
 295	u32 base_addr;
 296
 297	/* Relative address of indirect TBUS address register (bits 0..7) */
 298	u32 tbus_addr_lo_addr;
 299
 300	/* Relative address of indirect TBUS address register (bits 8..10) */
 301	u32 tbus_addr_hi_addr;
 302
 303	/* Relative address of indirect TBUS data register (bits 0..7) */
 304	u32 tbus_data_lo_addr;
 305
 306	/* Relative address of indirect TBUS data register (bits 8..11) */
 307	u32 tbus_data_hi_addr;
 308};
 309
 310/* Split type definitions */
 311struct split_type_defs {
 312	const char *name;
 313};
 314
 315/******************************** Constants **********************************/
 316
 317#define MAX_LCIDS			320
 318#define MAX_LTIDS			320
 319
 320#define NUM_IOR_SETS			2
 321#define IORS_PER_SET			176
 322#define IOR_SET_OFFSET(set_id)		((set_id) * 256)
 323
 324#define BYTES_IN_DWORD			sizeof(u32)
 325
 326/* In the macros below, size and offset are specified in bits */
 327#define CEIL_DWORDS(size)		DIV_ROUND_UP(size, 32)
 328#define FIELD_BIT_OFFSET(type, field)	type ## _ ## field ## _ ## OFFSET
 329#define FIELD_BIT_SIZE(type, field)	type ## _ ## field ## _ ## SIZE
 330#define FIELD_DWORD_OFFSET(type, field) \
 331	 (int)(FIELD_BIT_OFFSET(type, field) / 32)
 332#define FIELD_DWORD_SHIFT(type, field)	(FIELD_BIT_OFFSET(type, field) % 32)
 333#define FIELD_BIT_MASK(type, field) \
 334	(((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
 335	 FIELD_DWORD_SHIFT(type, field))
 336
 337#define SET_VAR_FIELD(var, type, field, val) \
 338	do { \
 339		var[FIELD_DWORD_OFFSET(type, field)] &=	\
 340		(~FIELD_BIT_MASK(type, field));	\
 341		var[FIELD_DWORD_OFFSET(type, field)] |= \
 342		(val) << FIELD_DWORD_SHIFT(type, field); \
 343	} while (0)
 344
 345#define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
 346	do { \
 347		for (i = 0; i < (arr_size); i++) \
 348			qed_wr(dev, ptt, addr,	(arr)[i]); \
 349	} while (0)
 350
 351#define ARR_REG_RD(dev, ptt, addr, arr, arr_size) \
 352	do { \
 353		for (i = 0; i < (arr_size); i++) \
 354			(arr)[i] = qed_rd(dev, ptt, addr); \
 355	} while (0)
 356
 357#define DWORDS_TO_BYTES(dwords)		((dwords) * BYTES_IN_DWORD)
 358#define BYTES_TO_DWORDS(bytes)		((bytes) / BYTES_IN_DWORD)
 359
 360/* Extra lines include a signature line + optional latency events line */
 361#define NUM_EXTRA_DBG_LINES(block_desc) \
 362	(1 + ((block_desc)->has_latency_events ? 1 : 0))
 363#define NUM_DBG_LINES(block_desc) \
 364	((block_desc)->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc))
 
 
 
 365
 366#define RAM_LINES_TO_DWORDS(lines)	((lines) * 2)
 367#define RAM_LINES_TO_BYTES(lines) \
 368	DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
 369
 370#define REG_DUMP_LEN_SHIFT		24
 371#define MEM_DUMP_ENTRY_SIZE_DWORDS \
 372	BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
 373
 374#define IDLE_CHK_RULE_SIZE_DWORDS \
 375	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
 376
 377#define IDLE_CHK_RESULT_HDR_DWORDS \
 378	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
 379
 380#define IDLE_CHK_RESULT_REG_HDR_DWORDS \
 381	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
 382
 
 
 
 383#define IDLE_CHK_MAX_ENTRIES_SIZE	32
 384
 385/* The sizes and offsets below are specified in bits */
 386#define VFC_CAM_CMD_STRUCT_SIZE		64
 387#define VFC_CAM_CMD_ROW_OFFSET		48
 388#define VFC_CAM_CMD_ROW_SIZE		9
 389#define VFC_CAM_ADDR_STRUCT_SIZE	16
 390#define VFC_CAM_ADDR_OP_OFFSET		0
 391#define VFC_CAM_ADDR_OP_SIZE		4
 392#define VFC_CAM_RESP_STRUCT_SIZE	256
 393#define VFC_RAM_ADDR_STRUCT_SIZE	16
 394#define VFC_RAM_ADDR_OP_OFFSET		0
 395#define VFC_RAM_ADDR_OP_SIZE		2
 396#define VFC_RAM_ADDR_ROW_OFFSET		2
 397#define VFC_RAM_ADDR_ROW_SIZE		10
 398#define VFC_RAM_RESP_STRUCT_SIZE	256
 399
 400#define VFC_CAM_CMD_DWORDS		CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
 401#define VFC_CAM_ADDR_DWORDS		CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
 402#define VFC_CAM_RESP_DWORDS		CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
 403#define VFC_RAM_CMD_DWORDS		VFC_CAM_CMD_DWORDS
 404#define VFC_RAM_ADDR_DWORDS		CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
 405#define VFC_RAM_RESP_DWORDS		CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
 406
 407#define NUM_VFC_RAM_TYPES		4
 408
 409#define VFC_CAM_NUM_ROWS		512
 410
 411#define VFC_OPCODE_CAM_RD		14
 412#define VFC_OPCODE_RAM_RD		0
 413
 414#define NUM_RSS_MEM_TYPES		5
 415
 416#define NUM_BIG_RAM_TYPES		3
 417#define BIG_RAM_NAME_LEN		3
 418
 419#define NUM_PHY_TBUS_ADDRESSES		2048
 420#define PHY_DUMP_SIZE_DWORDS		(NUM_PHY_TBUS_ADDRESSES / 2)
 421
 422#define RESET_REG_UNRESET_OFFSET	4
 423
 424#define STALL_DELAY_MS			500
 425
 426#define STATIC_DEBUG_LINE_DWORDS	9
 427
 428#define NUM_COMMON_GLOBAL_PARAMS	8
 
 
 429
 
 430#define FW_IMG_MAIN			1
 
 431
 432#define REG_FIFO_ELEMENT_DWORDS		2
 433#define REG_FIFO_DEPTH_ELEMENTS		32
 434#define REG_FIFO_DEPTH_DWORDS \
 435	(REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
 436
 437#define IGU_FIFO_ELEMENT_DWORDS		4
 438#define IGU_FIFO_DEPTH_ELEMENTS		64
 439#define IGU_FIFO_DEPTH_DWORDS \
 440	(IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
 441
 442#define PROTECTION_OVERRIDE_ELEMENT_DWORDS	2
 443#define PROTECTION_OVERRIDE_DEPTH_ELEMENTS	20
 444#define PROTECTION_OVERRIDE_DEPTH_DWORDS \
 445	(PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
 446	 PROTECTION_OVERRIDE_ELEMENT_DWORDS)
 447
 448#define MCP_SPAD_TRACE_OFFSIZE_ADDR \
 449	(MCP_REG_SCRATCH + \
 450	 offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
 451
 
 
 452#define EMPTY_FW_VERSION_STR		"???_???_???_???"
 453#define EMPTY_FW_IMAGE_STR		"???????????????"
 454
 455/***************************** Constant Arrays *******************************/
 456
 457struct dbg_array {
 458	const u32 *ptr;
 459	u32 size_in_dwords;
 
 
 
 
 
 
 
 
 460};
 461
 462/* Debug arrays */
 463static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
 464
 465/* Chip constant definitions array */
 466static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
 467	{"bb"},
 468	{"ah"},
 469	{"reserved"},
 
 470};
 471
 472/* Storm constant definitions array */
 473static struct storm_defs s_storm_defs[] = {
 474	/* Tstorm */
 475	{'T', BLOCK_TSEM,
 476	 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
 477	  DBG_BUS_CLIENT_RBCT}, true,
 478	 TSEM_REG_FAST_MEMORY,
 479	 TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
 480	 TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
 481	 TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
 482	 TCM_REG_CTX_RBC_ACCS,
 483	 4, TCM_REG_AGG_CON_CTX,
 484	 16, TCM_REG_SM_CON_CTX,
 485	 2, TCM_REG_AGG_TASK_CTX,
 486	 4, TCM_REG_SM_TASK_CTX},
 487
 488	/* Mstorm */
 489	{'M', BLOCK_MSEM,
 490	 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM,
 491	  DBG_BUS_CLIENT_RBCM}, false,
 492	 MSEM_REG_FAST_MEMORY,
 493	 MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
 494	 MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2,
 495	 MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY_BB_K2,
 496	 MCM_REG_CTX_RBC_ACCS,
 497	 1, MCM_REG_AGG_CON_CTX,
 498	 10, MCM_REG_SM_CON_CTX,
 499	 2, MCM_REG_AGG_TASK_CTX,
 500	 7, MCM_REG_SM_TASK_CTX},
 
 
 
 501
 502	/* Ustorm */
 503	{'U', BLOCK_USEM,
 504	 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
 505	  DBG_BUS_CLIENT_RBCU}, false,
 506	 USEM_REG_FAST_MEMORY,
 507	 USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
 508	 USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2,
 509	 USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY_BB_K2,
 510	 UCM_REG_CTX_RBC_ACCS,
 511	 2, UCM_REG_AGG_CON_CTX,
 512	 13, UCM_REG_SM_CON_CTX,
 513	 3, UCM_REG_AGG_TASK_CTX,
 514	 3, UCM_REG_SM_TASK_CTX},
 
 
 
 515
 516	/* Xstorm */
 517	{'X', BLOCK_XSEM,
 518	 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
 519	  DBG_BUS_CLIENT_RBCX}, false,
 520	 XSEM_REG_FAST_MEMORY,
 521	 XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
 522	 XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2,
 523	 XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY_BB_K2,
 524	 XCM_REG_CTX_RBC_ACCS,
 525	 9, XCM_REG_AGG_CON_CTX,
 526	 15, XCM_REG_SM_CON_CTX,
 527	 0, 0,
 528	 0, 0},
 
 
 529
 530	/* Ystorm */
 531	{'Y', BLOCK_YSEM,
 532	 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY,
 533	  DBG_BUS_CLIENT_RBCY}, false,
 534	 YSEM_REG_FAST_MEMORY,
 535	 YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
 536	 YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2,
 537	 YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
 538	 YCM_REG_CTX_RBC_ACCS,
 539	 2, YCM_REG_AGG_CON_CTX,
 540	 3, YCM_REG_SM_CON_CTX,
 541	 2, YCM_REG_AGG_TASK_CTX,
 542	 12, YCM_REG_SM_TASK_CTX},
 
 
 
 543
 544	/* Pstorm */
 545	{'P', BLOCK_PSEM,
 546	 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS,
 547	  DBG_BUS_CLIENT_RBCS}, true,
 548	 PSEM_REG_FAST_MEMORY,
 549	 PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
 550	 PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2,
 551	 PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY_BB_K2,
 552	 PCM_REG_CTX_RBC_ACCS,
 553	 0, 0,
 554	 10, PCM_REG_SM_CON_CTX,
 555	 0, 0,
 556	 0, 0}
 557};
 558
 559/* Block definitions array */
 560
 561static struct block_defs block_grc_defs = {
 562	"grc",
 563	{true, true, true}, false, 0,
 564	{DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
 565	GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
 566	GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
 567	GRC_REG_DBG_FORCE_FRAME,
 568	true, false, DBG_RESET_REG_MISC_PL_UA, 1
 569};
 570
 571static struct block_defs block_miscs_defs = {
 572	"miscs", {true, true, true}, false, 0,
 573	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 574	0, 0, 0, 0, 0,
 575	false, false, MAX_DBG_RESET_REGS, 0
 576};
 577
 578static struct block_defs block_misc_defs = {
 579	"misc", {true, true, true}, false, 0,
 580	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 581	0, 0, 0, 0, 0,
 582	false, false, MAX_DBG_RESET_REGS, 0
 583};
 584
 585static struct block_defs block_dbu_defs = {
 586	"dbu", {true, true, true}, false, 0,
 587	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 588	0, 0, 0, 0, 0,
 589	false, false, MAX_DBG_RESET_REGS, 0
 590};
 591
 592static struct block_defs block_pglue_b_defs = {
 593	"pglue_b",
 594	{true, true, true}, false, 0,
 595	{DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
 596	PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
 597	PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
 598	PGLUE_B_REG_DBG_FORCE_FRAME,
 599	true, false, DBG_RESET_REG_MISCS_PL_HV, 1
 600};
 601
 602static struct block_defs block_cnig_defs = {
 603	"cnig",
 604	{true, true, true}, false, 0,
 605	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW,
 606	 DBG_BUS_CLIENT_RBCW},
 607	CNIG_REG_DBG_SELECT_K2_E5, CNIG_REG_DBG_DWORD_ENABLE_K2_E5,
 608	CNIG_REG_DBG_SHIFT_K2_E5, CNIG_REG_DBG_FORCE_VALID_K2_E5,
 609	CNIG_REG_DBG_FORCE_FRAME_K2_E5,
 610	true, false, DBG_RESET_REG_MISCS_PL_HV, 0
 611};
 612
 613static struct block_defs block_cpmu_defs = {
 614	"cpmu", {true, true, true}, false, 0,
 615	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 616	0, 0, 0, 0, 0,
 617	true, false, DBG_RESET_REG_MISCS_PL_HV, 8
 618};
 619
 620static struct block_defs block_ncsi_defs = {
 621	"ncsi",
 622	{true, true, true}, false, 0,
 623	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
 624	NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
 625	NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
 626	NCSI_REG_DBG_FORCE_FRAME,
 627	true, false, DBG_RESET_REG_MISCS_PL_HV, 5
 628};
 629
 630static struct block_defs block_opte_defs = {
 631	"opte", {true, true, false}, false, 0,
 632	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 633	0, 0, 0, 0, 0,
 634	true, false, DBG_RESET_REG_MISCS_PL_HV, 4
 635};
 636
 637static struct block_defs block_bmb_defs = {
 638	"bmb",
 639	{true, true, true}, false, 0,
 640	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB, DBG_BUS_CLIENT_RBCB},
 641	BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
 642	BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
 643	BMB_REG_DBG_FORCE_FRAME,
 644	true, false, DBG_RESET_REG_MISCS_PL_UA, 7
 645};
 646
 647static struct block_defs block_pcie_defs = {
 648	"pcie",
 649	{true, true, true}, false, 0,
 650	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
 651	 DBG_BUS_CLIENT_RBCH},
 652	PCIE_REG_DBG_COMMON_SELECT_K2_E5,
 653	PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
 654	PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
 655	PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
 656	PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
 657	false, false, MAX_DBG_RESET_REGS, 0
 658};
 659
 660static struct block_defs block_mcp_defs = {
 661	"mcp", {true, true, true}, false, 0,
 662	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 663	0, 0, 0, 0, 0,
 664	false, false, MAX_DBG_RESET_REGS, 0
 665};
 666
 667static struct block_defs block_mcp2_defs = {
 668	"mcp2",
 669	{true, true, true}, false, 0,
 670	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
 671	MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
 672	MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
 673	MCP2_REG_DBG_FORCE_FRAME,
 674	false, false, MAX_DBG_RESET_REGS, 0
 675};
 676
 677static struct block_defs block_pswhst_defs = {
 678	"pswhst",
 679	{true, true, true}, false, 0,
 680	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 681	PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
 682	PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
 683	PSWHST_REG_DBG_FORCE_FRAME,
 684	true, false, DBG_RESET_REG_MISC_PL_HV, 0
 685};
 686
 687static struct block_defs block_pswhst2_defs = {
 688	"pswhst2",
 689	{true, true, true}, false, 0,
 690	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 691	PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
 692	PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
 693	PSWHST2_REG_DBG_FORCE_FRAME,
 694	true, false, DBG_RESET_REG_MISC_PL_HV, 0
 695};
 696
 697static struct block_defs block_pswrd_defs = {
 698	"pswrd",
 699	{true, true, true}, false, 0,
 700	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 701	PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
 702	PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
 703	PSWRD_REG_DBG_FORCE_FRAME,
 704	true, false, DBG_RESET_REG_MISC_PL_HV, 2
 705};
 706
 707static struct block_defs block_pswrd2_defs = {
 708	"pswrd2",
 709	{true, true, true}, false, 0,
 710	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 711	PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
 712	PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
 713	PSWRD2_REG_DBG_FORCE_FRAME,
 714	true, false, DBG_RESET_REG_MISC_PL_HV, 2
 715};
 716
 717static struct block_defs block_pswwr_defs = {
 718	"pswwr",
 719	{true, true, true}, false, 0,
 720	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 721	PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
 722	PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
 723	PSWWR_REG_DBG_FORCE_FRAME,
 724	true, false, DBG_RESET_REG_MISC_PL_HV, 3
 725};
 726
 727static struct block_defs block_pswwr2_defs = {
 728	"pswwr2", {true, true, true}, false, 0,
 729	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 730	0, 0, 0, 0, 0,
 731	true, false, DBG_RESET_REG_MISC_PL_HV, 3
 732};
 733
 734static struct block_defs block_pswrq_defs = {
 735	"pswrq",
 736	{true, true, true}, false, 0,
 737	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 738	PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
 739	PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
 740	PSWRQ_REG_DBG_FORCE_FRAME,
 741	true, false, DBG_RESET_REG_MISC_PL_HV, 1
 742};
 743
 744static struct block_defs block_pswrq2_defs = {
 745	"pswrq2",
 746	{true, true, true}, false, 0,
 747	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 748	PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
 749	PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
 750	PSWRQ2_REG_DBG_FORCE_FRAME,
 751	true, false, DBG_RESET_REG_MISC_PL_HV, 1
 752};
 753
 754static struct block_defs block_pglcs_defs = {
 755	"pglcs",
 756	{true, true, true}, false, 0,
 757	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
 758	 DBG_BUS_CLIENT_RBCH},
 759	PGLCS_REG_DBG_SELECT_K2_E5, PGLCS_REG_DBG_DWORD_ENABLE_K2_E5,
 760	PGLCS_REG_DBG_SHIFT_K2_E5, PGLCS_REG_DBG_FORCE_VALID_K2_E5,
 761	PGLCS_REG_DBG_FORCE_FRAME_K2_E5,
 762	true, false, DBG_RESET_REG_MISCS_PL_HV, 2
 763};
 764
 765static struct block_defs block_ptu_defs = {
 766	"ptu",
 767	{true, true, true}, false, 0,
 768	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 769	PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
 770	PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
 771	PTU_REG_DBG_FORCE_FRAME,
 772	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 20
 773};
 774
 775static struct block_defs block_dmae_defs = {
 776	"dmae",
 777	{true, true, true}, false, 0,
 778	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 779	DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
 780	DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
 781	DMAE_REG_DBG_FORCE_FRAME,
 782	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 28
 783};
 784
 785static struct block_defs block_tcm_defs = {
 786	"tcm",
 787	{true, true, true}, true, DBG_TSTORM_ID,
 788	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
 789	TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
 790	TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
 791	TCM_REG_DBG_FORCE_FRAME,
 792	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 5
 793};
 794
 795static struct block_defs block_mcm_defs = {
 796	"mcm",
 797	{true, true, true}, true, DBG_MSTORM_ID,
 798	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
 799	MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
 800	MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
 801	MCM_REG_DBG_FORCE_FRAME,
 802	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 3
 803};
 804
 805static struct block_defs block_ucm_defs = {
 806	"ucm",
 807	{true, true, true}, true, DBG_USTORM_ID,
 808	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
 809	UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
 810	UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
 811	UCM_REG_DBG_FORCE_FRAME,
 812	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 8
 813};
 814
 815static struct block_defs block_xcm_defs = {
 816	"xcm",
 817	{true, true, true}, true, DBG_XSTORM_ID,
 818	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
 819	XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
 820	XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
 821	XCM_REG_DBG_FORCE_FRAME,
 822	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 19
 823};
 824
 825static struct block_defs block_ycm_defs = {
 826	"ycm",
 827	{true, true, true}, true, DBG_YSTORM_ID,
 828	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
 829	YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
 830	YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
 831	YCM_REG_DBG_FORCE_FRAME,
 832	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 5
 833};
 834
 835static struct block_defs block_pcm_defs = {
 836	"pcm",
 837	{true, true, true}, true, DBG_PSTORM_ID,
 838	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
 839	PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
 840	PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
 841	PCM_REG_DBG_FORCE_FRAME,
 842	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 4
 843};
 844
 845static struct block_defs block_qm_defs = {
 846	"qm",
 847	{true, true, true}, false, 0,
 848	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ, DBG_BUS_CLIENT_RBCQ},
 849	QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
 850	QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
 851	QM_REG_DBG_FORCE_FRAME,
 852	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 16
 853};
 854
 855static struct block_defs block_tm_defs = {
 856	"tm",
 857	{true, true, true}, false, 0,
 858	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
 859	TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
 860	TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
 861	TM_REG_DBG_FORCE_FRAME,
 862	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 17
 863};
 864
 865static struct block_defs block_dorq_defs = {
 866	"dorq",
 867	{true, true, true}, false, 0,
 868	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
 869	DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
 870	DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
 871	DORQ_REG_DBG_FORCE_FRAME,
 872	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 18
 873};
 874
 875static struct block_defs block_brb_defs = {
 876	"brb",
 877	{true, true, true}, false, 0,
 878	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
 879	BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
 880	BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
 881	BRB_REG_DBG_FORCE_FRAME,
 882	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 0
 883};
 884
 885static struct block_defs block_src_defs = {
 886	"src",
 887	{true, true, true}, false, 0,
 888	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
 889	SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
 890	SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
 891	SRC_REG_DBG_FORCE_FRAME,
 892	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 2
 893};
 894
 895static struct block_defs block_prs_defs = {
 896	"prs",
 897	{true, true, true}, false, 0,
 898	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
 899	PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
 900	PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
 901	PRS_REG_DBG_FORCE_FRAME,
 902	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 1
 903};
 904
 905static struct block_defs block_tsdm_defs = {
 906	"tsdm",
 907	{true, true, true}, true, DBG_TSTORM_ID,
 908	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
 909	TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
 910	TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
 911	TSDM_REG_DBG_FORCE_FRAME,
 912	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 3
 913};
 914
 915static struct block_defs block_msdm_defs = {
 916	"msdm",
 917	{true, true, true}, true, DBG_MSTORM_ID,
 918	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
 919	MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
 920	MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
 921	MSDM_REG_DBG_FORCE_FRAME,
 922	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 6
 923};
 924
 925static struct block_defs block_usdm_defs = {
 926	"usdm",
 927	{true, true, true}, true, DBG_USTORM_ID,
 928	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
 929	USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
 930	USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
 931	USDM_REG_DBG_FORCE_FRAME,
 932	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 7
 933};
 934
 935static struct block_defs block_xsdm_defs = {
 936	"xsdm",
 937	{true, true, true}, true, DBG_XSTORM_ID,
 938	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
 939	XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
 940	XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
 941	XSDM_REG_DBG_FORCE_FRAME,
 942	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 20
 943};
 944
 945static struct block_defs block_ysdm_defs = {
 946	"ysdm",
 947	{true, true, true}, true, DBG_YSTORM_ID,
 948	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
 949	YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
 950	YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
 951	YSDM_REG_DBG_FORCE_FRAME,
 952	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 8
 953};
 954
 955static struct block_defs block_psdm_defs = {
 956	"psdm",
 957	{true, true, true}, true, DBG_PSTORM_ID,
 958	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
 959	PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
 960	PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
 961	PSDM_REG_DBG_FORCE_FRAME,
 962	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 7
 963};
 964
 965static struct block_defs block_tsem_defs = {
 966	"tsem",
 967	{true, true, true}, true, DBG_TSTORM_ID,
 968	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
 969	TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
 970	TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
 971	TSEM_REG_DBG_FORCE_FRAME,
 972	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 4
 973};
 974
 975static struct block_defs block_msem_defs = {
 976	"msem",
 977	{true, true, true}, true, DBG_MSTORM_ID,
 978	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
 979	MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
 980	MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
 981	MSEM_REG_DBG_FORCE_FRAME,
 982	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 9
 983};
 984
 985static struct block_defs block_usem_defs = {
 986	"usem",
 987	{true, true, true}, true, DBG_USTORM_ID,
 988	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
 989	USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
 990	USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
 991	USEM_REG_DBG_FORCE_FRAME,
 992	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 9
 993};
 994
 995static struct block_defs block_xsem_defs = {
 996	"xsem",
 997	{true, true, true}, true, DBG_XSTORM_ID,
 998	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
 999	XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
1000	XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
1001	XSEM_REG_DBG_FORCE_FRAME,
1002	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 21
1003};
1004
1005static struct block_defs block_ysem_defs = {
1006	"ysem",
1007	{true, true, true}, true, DBG_YSTORM_ID,
1008	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
1009	YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
1010	YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
1011	YSEM_REG_DBG_FORCE_FRAME,
1012	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 11
1013};
1014
1015static struct block_defs block_psem_defs = {
1016	"psem",
1017	{true, true, true}, true, DBG_PSTORM_ID,
1018	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
1019	PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
1020	PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
1021	PSEM_REG_DBG_FORCE_FRAME,
1022	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 10
1023};
1024
1025static struct block_defs block_rss_defs = {
1026	"rss",
1027	{true, true, true}, false, 0,
1028	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
1029	RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
1030	RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
1031	RSS_REG_DBG_FORCE_FRAME,
1032	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 18
1033};
1034
1035static struct block_defs block_tmld_defs = {
1036	"tmld",
1037	{true, true, true}, false, 0,
1038	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1039	TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
1040	TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
1041	TMLD_REG_DBG_FORCE_FRAME,
1042	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 13
1043};
1044
1045static struct block_defs block_muld_defs = {
1046	"muld",
1047	{true, true, true}, false, 0,
1048	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
1049	MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
1050	MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
1051	MULD_REG_DBG_FORCE_FRAME,
1052	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 14
1053};
1054
1055static struct block_defs block_yuld_defs = {
1056	"yuld",
1057	{true, true, false}, false, 0,
1058	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
1059	 MAX_DBG_BUS_CLIENTS},
1060	YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2,
1061	YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2,
1062	YULD_REG_DBG_FORCE_FRAME_BB_K2,
1063	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
1064	15
1065};
1066
1067static struct block_defs block_xyld_defs = {
1068	"xyld",
1069	{true, true, true}, false, 0,
1070	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
1071	XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
1072	XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
1073	XYLD_REG_DBG_FORCE_FRAME,
1074	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12
1075};
1076
1077static struct block_defs block_ptld_defs = {
1078	"ptld",
1079	{false, false, true}, false, 0,
1080	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCT},
1081	PTLD_REG_DBG_SELECT_E5, PTLD_REG_DBG_DWORD_ENABLE_E5,
1082	PTLD_REG_DBG_SHIFT_E5, PTLD_REG_DBG_FORCE_VALID_E5,
1083	PTLD_REG_DBG_FORCE_FRAME_E5,
1084	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
1085	28
1086};
1087
1088static struct block_defs block_ypld_defs = {
1089	"ypld",
1090	{false, false, true}, false, 0,
1091	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCS},
1092	YPLD_REG_DBG_SELECT_E5, YPLD_REG_DBG_DWORD_ENABLE_E5,
1093	YPLD_REG_DBG_SHIFT_E5, YPLD_REG_DBG_FORCE_VALID_E5,
1094	YPLD_REG_DBG_FORCE_FRAME_E5,
1095	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
1096	27
1097};
1098
1099static struct block_defs block_prm_defs = {
1100	"prm",
1101	{true, true, true}, false, 0,
1102	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1103	PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
1104	PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
1105	PRM_REG_DBG_FORCE_FRAME,
1106	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 21
1107};
1108
1109static struct block_defs block_pbf_pb1_defs = {
1110	"pbf_pb1",
1111	{true, true, true}, false, 0,
1112	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1113	PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
1114	PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
1115	PBF_PB1_REG_DBG_FORCE_FRAME,
1116	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1117	11
1118};
1119
1120static struct block_defs block_pbf_pb2_defs = {
1121	"pbf_pb2",
1122	{true, true, true}, false, 0,
1123	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1124	PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
1125	PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
1126	PBF_PB2_REG_DBG_FORCE_FRAME,
1127	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1128	12
1129};
1130
1131static struct block_defs block_rpb_defs = {
1132	"rpb",
1133	{true, true, true}, false, 0,
1134	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1135	RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
1136	RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
1137	RPB_REG_DBG_FORCE_FRAME,
1138	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 13
1139};
1140
1141static struct block_defs block_btb_defs = {
1142	"btb",
1143	{true, true, true}, false, 0,
1144	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1145	BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
1146	BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
1147	BTB_REG_DBG_FORCE_FRAME,
1148	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 10
1149};
1150
1151static struct block_defs block_pbf_defs = {
1152	"pbf",
1153	{true, true, true}, false, 0,
1154	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1155	PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
1156	PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
1157	PBF_REG_DBG_FORCE_FRAME,
1158	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 15
1159};
1160
1161static struct block_defs block_rdif_defs = {
1162	"rdif",
1163	{true, true, true}, false, 0,
1164	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1165	RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
1166	RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
1167	RDIF_REG_DBG_FORCE_FRAME,
1168	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 16
1169};
1170
1171static struct block_defs block_tdif_defs = {
1172	"tdif",
1173	{true, true, true}, false, 0,
1174	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
1175	TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
1176	TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
1177	TDIF_REG_DBG_FORCE_FRAME,
1178	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 17
1179};
1180
1181static struct block_defs block_cdu_defs = {
1182	"cdu",
1183	{true, true, true}, false, 0,
1184	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1185	CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
1186	CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
1187	CDU_REG_DBG_FORCE_FRAME,
1188	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 23
1189};
1190
1191static struct block_defs block_ccfc_defs = {
1192	"ccfc",
1193	{true, true, true}, false, 0,
1194	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1195	CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
1196	CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
1197	CCFC_REG_DBG_FORCE_FRAME,
1198	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 24
1199};
1200
1201static struct block_defs block_tcfc_defs = {
1202	"tcfc",
1203	{true, true, true}, false, 0,
1204	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1205	TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
1206	TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
1207	TCFC_REG_DBG_FORCE_FRAME,
1208	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 25
1209};
1210
1211static struct block_defs block_igu_defs = {
1212	"igu",
1213	{true, true, true}, false, 0,
1214	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
1215	IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
1216	IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
1217	IGU_REG_DBG_FORCE_FRAME,
1218	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 27
1219};
1220
1221static struct block_defs block_cau_defs = {
1222	"cau",
1223	{true, true, true}, false, 0,
1224	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
1225	CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
1226	CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
1227	CAU_REG_DBG_FORCE_FRAME,
1228	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19
1229};
1230
1231static struct block_defs block_rgfs_defs = {
1232	"rgfs", {false, false, true}, false, 0,
1233	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1234	0, 0, 0, 0, 0,
1235	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 29
1236};
1237
1238static struct block_defs block_rgsrc_defs = {
1239	"rgsrc",
1240	{false, false, true}, false, 0,
1241	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
1242	RGSRC_REG_DBG_SELECT_E5, RGSRC_REG_DBG_DWORD_ENABLE_E5,
1243	RGSRC_REG_DBG_SHIFT_E5, RGSRC_REG_DBG_FORCE_VALID_E5,
1244	RGSRC_REG_DBG_FORCE_FRAME_E5,
1245	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1246	30
1247};
1248
1249static struct block_defs block_tgfs_defs = {
1250	"tgfs", {false, false, true}, false, 0,
1251	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1252	0, 0, 0, 0, 0,
1253	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 30
1254};
1255
1256static struct block_defs block_tgsrc_defs = {
1257	"tgsrc",
1258	{false, false, true}, false, 0,
1259	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCV},
1260	TGSRC_REG_DBG_SELECT_E5, TGSRC_REG_DBG_DWORD_ENABLE_E5,
1261	TGSRC_REG_DBG_SHIFT_E5, TGSRC_REG_DBG_FORCE_VALID_E5,
1262	TGSRC_REG_DBG_FORCE_FRAME_E5,
1263	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1264	31
1265};
1266
1267static struct block_defs block_umac_defs = {
1268	"umac",
1269	{true, true, true}, false, 0,
1270	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ,
1271	 DBG_BUS_CLIENT_RBCZ},
1272	UMAC_REG_DBG_SELECT_K2_E5, UMAC_REG_DBG_DWORD_ENABLE_K2_E5,
1273	UMAC_REG_DBG_SHIFT_K2_E5, UMAC_REG_DBG_FORCE_VALID_K2_E5,
1274	UMAC_REG_DBG_FORCE_FRAME_K2_E5,
1275	true, false, DBG_RESET_REG_MISCS_PL_HV, 6
1276};
1277
1278static struct block_defs block_xmac_defs = {
1279	"xmac", {true, false, false}, false, 0,
1280	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1281	0, 0, 0, 0, 0,
1282	false, false, MAX_DBG_RESET_REGS, 0
1283};
1284
1285static struct block_defs block_dbg_defs = {
1286	"dbg", {true, true, true}, false, 0,
1287	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1288	0, 0, 0, 0, 0,
1289	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
1290};
1291
1292static struct block_defs block_nig_defs = {
1293	"nig",
1294	{true, true, true}, false, 0,
1295	{DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
1296	NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
1297	NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
1298	NIG_REG_DBG_FORCE_FRAME,
1299	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 0
1300};
1301
1302static struct block_defs block_wol_defs = {
1303	"wol",
1304	{false, true, true}, false, 0,
1305	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
1306	WOL_REG_DBG_SELECT_K2_E5, WOL_REG_DBG_DWORD_ENABLE_K2_E5,
1307	WOL_REG_DBG_SHIFT_K2_E5, WOL_REG_DBG_FORCE_VALID_K2_E5,
1308	WOL_REG_DBG_FORCE_FRAME_K2_E5,
1309	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7
1310};
1311
1312static struct block_defs block_bmbn_defs = {
1313	"bmbn",
1314	{false, true, true}, false, 0,
1315	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB,
1316	 DBG_BUS_CLIENT_RBCB},
1317	BMBN_REG_DBG_SELECT_K2_E5, BMBN_REG_DBG_DWORD_ENABLE_K2_E5,
1318	BMBN_REG_DBG_SHIFT_K2_E5, BMBN_REG_DBG_FORCE_VALID_K2_E5,
1319	BMBN_REG_DBG_FORCE_FRAME_K2_E5,
1320	false, false, MAX_DBG_RESET_REGS, 0
1321};
1322
1323static struct block_defs block_ipc_defs = {
1324	"ipc", {true, true, true}, false, 0,
1325	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1326	0, 0, 0, 0, 0,
1327	true, false, DBG_RESET_REG_MISCS_PL_UA, 8
1328};
1329
1330static struct block_defs block_nwm_defs = {
1331	"nwm",
1332	{false, true, true}, false, 0,
1333	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
1334	NWM_REG_DBG_SELECT_K2_E5, NWM_REG_DBG_DWORD_ENABLE_K2_E5,
1335	NWM_REG_DBG_SHIFT_K2_E5, NWM_REG_DBG_FORCE_VALID_K2_E5,
1336	NWM_REG_DBG_FORCE_FRAME_K2_E5,
1337	true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0
1338};
1339
1340static struct block_defs block_nws_defs = {
1341	"nws",
1342	{false, true, true}, false, 0,
1343	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
1344	NWS_REG_DBG_SELECT_K2_E5, NWS_REG_DBG_DWORD_ENABLE_K2_E5,
1345	NWS_REG_DBG_SHIFT_K2_E5, NWS_REG_DBG_FORCE_VALID_K2_E5,
1346	NWS_REG_DBG_FORCE_FRAME_K2_E5,
1347	true, false, DBG_RESET_REG_MISCS_PL_HV, 12
1348};
1349
1350static struct block_defs block_ms_defs = {
1351	"ms",
1352	{false, true, true}, false, 0,
1353	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
1354	MS_REG_DBG_SELECT_K2_E5, MS_REG_DBG_DWORD_ENABLE_K2_E5,
1355	MS_REG_DBG_SHIFT_K2_E5, MS_REG_DBG_FORCE_VALID_K2_E5,
1356	MS_REG_DBG_FORCE_FRAME_K2_E5,
1357	true, false, DBG_RESET_REG_MISCS_PL_HV, 13
1358};
1359
1360static struct block_defs block_phy_pcie_defs = {
1361	"phy_pcie",
1362	{false, true, true}, false, 0,
1363	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
1364	 DBG_BUS_CLIENT_RBCH},
1365	PCIE_REG_DBG_COMMON_SELECT_K2_E5,
1366	PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
1367	PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
1368	PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
1369	PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
1370	false, false, MAX_DBG_RESET_REGS, 0
1371};
1372
1373static struct block_defs block_led_defs = {
1374	"led", {false, true, true}, false, 0,
1375	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1376	0, 0, 0, 0, 0,
1377	true, false, DBG_RESET_REG_MISCS_PL_HV, 14
1378};
1379
1380static struct block_defs block_avs_wrap_defs = {
1381	"avs_wrap", {false, true, false}, false, 0,
1382	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1383	0, 0, 0, 0, 0,
1384	true, false, DBG_RESET_REG_MISCS_PL_UA, 11
1385};
1386
1387static struct block_defs block_pxpreqbus_defs = {
1388	"pxpreqbus", {false, false, false}, false, 0,
1389	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1390	0, 0, 0, 0, 0,
1391	false, false, MAX_DBG_RESET_REGS, 0
1392};
1393
1394static struct block_defs block_misc_aeu_defs = {
1395	"misc_aeu", {true, true, true}, false, 0,
1396	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1397	0, 0, 0, 0, 0,
1398	false, false, MAX_DBG_RESET_REGS, 0
1399};
1400
1401static struct block_defs block_bar0_map_defs = {
1402	"bar0_map", {true, true, true}, false, 0,
1403	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1404	0, 0, 0, 0, 0,
1405	false, false, MAX_DBG_RESET_REGS, 0
1406};
1407
1408static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
1409	&block_grc_defs,
1410	&block_miscs_defs,
1411	&block_misc_defs,
1412	&block_dbu_defs,
1413	&block_pglue_b_defs,
1414	&block_cnig_defs,
1415	&block_cpmu_defs,
1416	&block_ncsi_defs,
1417	&block_opte_defs,
1418	&block_bmb_defs,
1419	&block_pcie_defs,
1420	&block_mcp_defs,
1421	&block_mcp2_defs,
1422	&block_pswhst_defs,
1423	&block_pswhst2_defs,
1424	&block_pswrd_defs,
1425	&block_pswrd2_defs,
1426	&block_pswwr_defs,
1427	&block_pswwr2_defs,
1428	&block_pswrq_defs,
1429	&block_pswrq2_defs,
1430	&block_pglcs_defs,
1431	&block_dmae_defs,
1432	&block_ptu_defs,
1433	&block_tcm_defs,
1434	&block_mcm_defs,
1435	&block_ucm_defs,
1436	&block_xcm_defs,
1437	&block_ycm_defs,
1438	&block_pcm_defs,
1439	&block_qm_defs,
1440	&block_tm_defs,
1441	&block_dorq_defs,
1442	&block_brb_defs,
1443	&block_src_defs,
1444	&block_prs_defs,
1445	&block_tsdm_defs,
1446	&block_msdm_defs,
1447	&block_usdm_defs,
1448	&block_xsdm_defs,
1449	&block_ysdm_defs,
1450	&block_psdm_defs,
1451	&block_tsem_defs,
1452	&block_msem_defs,
1453	&block_usem_defs,
1454	&block_xsem_defs,
1455	&block_ysem_defs,
1456	&block_psem_defs,
1457	&block_rss_defs,
1458	&block_tmld_defs,
1459	&block_muld_defs,
1460	&block_yuld_defs,
1461	&block_xyld_defs,
1462	&block_ptld_defs,
1463	&block_ypld_defs,
1464	&block_prm_defs,
1465	&block_pbf_pb1_defs,
1466	&block_pbf_pb2_defs,
1467	&block_rpb_defs,
1468	&block_btb_defs,
1469	&block_pbf_defs,
1470	&block_rdif_defs,
1471	&block_tdif_defs,
1472	&block_cdu_defs,
1473	&block_ccfc_defs,
1474	&block_tcfc_defs,
1475	&block_igu_defs,
1476	&block_cau_defs,
1477	&block_rgfs_defs,
1478	&block_rgsrc_defs,
1479	&block_tgfs_defs,
1480	&block_tgsrc_defs,
1481	&block_umac_defs,
1482	&block_xmac_defs,
1483	&block_dbg_defs,
1484	&block_nig_defs,
1485	&block_wol_defs,
1486	&block_bmbn_defs,
1487	&block_ipc_defs,
1488	&block_nwm_defs,
1489	&block_nws_defs,
1490	&block_ms_defs,
1491	&block_phy_pcie_defs,
1492	&block_led_defs,
1493	&block_avs_wrap_defs,
1494	&block_pxpreqbus_defs,
1495	&block_misc_aeu_defs,
1496	&block_bar0_map_defs,
1497};
1498
1499static struct platform_defs s_platform_defs[] = {
1500	{"asic", 1, 256, 32768},
1501	{"reserved", 0, 0, 0},
1502	{"reserved2", 0, 0, 0},
1503	{"reserved3", 0, 0, 0}
 
1504};
1505
1506static struct grc_param_defs s_grc_param_defs[] = {
1507	/* DBG_GRC_PARAM_DUMP_TSTORM */
1508	{{1, 1, 1}, 0, 1, false, false, 1, 1},
1509
1510	/* DBG_GRC_PARAM_DUMP_MSTORM */
1511	{{1, 1, 1}, 0, 1, false, false, 1, 1},
1512
1513	/* DBG_GRC_PARAM_DUMP_USTORM */
1514	{{1, 1, 1}, 0, 1, false, false, 1, 1},
1515
1516	/* DBG_GRC_PARAM_DUMP_XSTORM */
1517	{{1, 1, 1}, 0, 1, false, false, 1, 1},
1518
1519	/* DBG_GRC_PARAM_DUMP_YSTORM */
1520	{{1, 1, 1}, 0, 1, false, false, 1, 1},
1521
1522	/* DBG_GRC_PARAM_DUMP_PSTORM */
1523	{{1, 1, 1}, 0, 1, false, false, 1, 1},
1524
1525	/* DBG_GRC_PARAM_DUMP_REGS */
1526	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1527
1528	/* DBG_GRC_PARAM_DUMP_RAM */
1529	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1530
1531	/* DBG_GRC_PARAM_DUMP_PBUF */
1532	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1533
1534	/* DBG_GRC_PARAM_DUMP_IOR */
1535	{{0, 0, 0}, 0, 1, false, false, 0, 1},
1536
1537	/* DBG_GRC_PARAM_DUMP_VFC */
1538	{{0, 0, 0}, 0, 1, false, false, 0, 1},
1539
1540	/* DBG_GRC_PARAM_DUMP_CM_CTX */
1541	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1542
1543	/* DBG_GRC_PARAM_DUMP_ILT */
1544	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1545
1546	/* DBG_GRC_PARAM_DUMP_RSS */
1547	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1548
1549	/* DBG_GRC_PARAM_DUMP_CAU */
1550	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1551
1552	/* DBG_GRC_PARAM_DUMP_QM */
1553	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1554
1555	/* DBG_GRC_PARAM_DUMP_MCP */
1556	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1557
1558	/* DBG_GRC_PARAM_MCP_TRACE_META_SIZE */
1559	{{1, 1, 1}, 1, 0xffffffff, false, true, 0, 1},
1560
1561	/* DBG_GRC_PARAM_DUMP_CFC */
1562	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1563
1564	/* DBG_GRC_PARAM_DUMP_IGU */
1565	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1566
1567	/* DBG_GRC_PARAM_DUMP_BRB */
1568	{{0, 0, 0}, 0, 1, false, false, 0, 1},
1569
1570	/* DBG_GRC_PARAM_DUMP_BTB */
1571	{{0, 0, 0}, 0, 1, false, false, 0, 1},
1572
1573	/* DBG_GRC_PARAM_DUMP_BMB */
1574	{{0, 0, 0}, 0, 1, false, false, 0, 0},
1575
1576	/* DBG_GRC_PARAM_DUMP_NIG */
1577	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1578
1579	/* DBG_GRC_PARAM_DUMP_MULD */
1580	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1581
1582	/* DBG_GRC_PARAM_DUMP_PRS */
1583	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1584
1585	/* DBG_GRC_PARAM_DUMP_DMAE */
1586	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1587
1588	/* DBG_GRC_PARAM_DUMP_TM */
1589	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1590
1591	/* DBG_GRC_PARAM_DUMP_SDM */
1592	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1593
1594	/* DBG_GRC_PARAM_DUMP_DIF */
1595	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1596
1597	/* DBG_GRC_PARAM_DUMP_STATIC */
1598	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1599
1600	/* DBG_GRC_PARAM_UNSTALL */
1601	{{0, 0, 0}, 0, 1, false, false, 0, 0},
1602
1603	/* DBG_GRC_PARAM_NUM_LCIDS */
1604	{{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, false,
1605	 MAX_LCIDS, MAX_LCIDS},
1606
1607	/* DBG_GRC_PARAM_NUM_LTIDS */
1608	{{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, false,
1609	 MAX_LTIDS, MAX_LTIDS},
1610
1611	/* DBG_GRC_PARAM_EXCLUDE_ALL */
1612	{{0, 0, 0}, 0, 1, true, false, 0, 0},
1613
1614	/* DBG_GRC_PARAM_CRASH */
1615	{{0, 0, 0}, 0, 1, true, false, 0, 0},
1616
1617	/* DBG_GRC_PARAM_PARITY_SAFE */
1618	{{0, 0, 0}, 0, 1, false, false, 1, 0},
1619
1620	/* DBG_GRC_PARAM_DUMP_CM */
1621	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1622
1623	/* DBG_GRC_PARAM_DUMP_PHY */
1624	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1625
1626	/* DBG_GRC_PARAM_NO_MCP */
1627	{{0, 0, 0}, 0, 1, false, false, 0, 0},
1628
1629	/* DBG_GRC_PARAM_NO_FW_VER */
1630	{{0, 0, 0}, 0, 1, false, false, 0, 0}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1631};
1632
1633static struct rss_mem_defs s_rss_mem_defs[] = {
1634	{ "rss_mem_cid", "rss_cid", 0, 32,
1635	  {256, 320, 512} },
1636
1637	{ "rss_mem_key_msb", "rss_key", 1024, 256,
1638	  {128, 208, 257} },
1639
1640	{ "rss_mem_key_lsb", "rss_key", 2048, 64,
1641	  {128, 208, 257} },
1642
1643	{ "rss_mem_info", "rss_info", 3072, 16,
1644	  {128, 208, 256} },
1645
1646	{ "rss_mem_ind", "rss_ind", 4096, 16,
1647	  {16384, 26624, 32768} }
1648};
1649
1650static struct vfc_ram_defs s_vfc_ram_defs[] = {
1651	{"vfc_ram_tt1", "vfc_ram", 0, 512},
1652	{"vfc_ram_mtt2", "vfc_ram", 512, 128},
1653	{"vfc_ram_stt2", "vfc_ram", 640, 32},
1654	{"vfc_ram_ro_vect", "vfc_ram", 672, 32}
1655};
1656
1657static struct big_ram_defs s_big_ram_defs[] = {
1658	{ "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
1659	  BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
1660	  MISC_REG_BLOCK_256B_EN, {0, 0, 0},
1661	  {153600, 180224, 282624} },
1662
1663	{ "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
1664	  BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
1665	  MISC_REG_BLOCK_256B_EN, {0, 1, 1},
1666	  {92160, 117760, 168960} },
1667
1668	{ "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
1669	  BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
1670	  MISCS_REG_BLOCK_256B_EN, {0, 0, 0},
1671	  {36864, 36864, 36864} }
1672};
1673
1674static struct reset_reg_defs s_reset_regs_defs[] = {
1675	/* DBG_RESET_REG_MISCS_PL_UA */
1676	{ MISCS_REG_RESET_PL_UA,
1677	  {true, true, true}, {0x0, 0x0, 0x0} },
1678
1679	/* DBG_RESET_REG_MISCS_PL_HV */
1680	{ MISCS_REG_RESET_PL_HV,
1681	  {true, true, true}, {0x0, 0x400, 0x600} },
1682
1683	/* DBG_RESET_REG_MISCS_PL_HV_2 */
1684	{ MISCS_REG_RESET_PL_HV_2_K2_E5,
1685	  {false, true, true}, {0x0, 0x0, 0x0} },
1686
1687	/* DBG_RESET_REG_MISC_PL_UA */
1688	{ MISC_REG_RESET_PL_UA,
1689	  {true, true, true}, {0x0, 0x0, 0x0} },
1690
1691	/* DBG_RESET_REG_MISC_PL_HV */
1692	{ MISC_REG_RESET_PL_HV,
1693	  {true, true, true}, {0x0, 0x0, 0x0} },
1694
1695	/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
1696	{ MISC_REG_RESET_PL_PDA_VMAIN_1,
1697	  {true, true, true}, {0x4404040, 0x4404040, 0x404040} },
1698
1699	/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
1700	{ MISC_REG_RESET_PL_PDA_VMAIN_2,
1701	  {true, true, true}, {0x7, 0x7c00007, 0x5c08007} },
1702
1703	/* DBG_RESET_REG_MISC_PL_PDA_VAUX */
1704	{ MISC_REG_RESET_PL_PDA_VAUX,
1705	  {true, true, true}, {0x2, 0x2, 0x2} },
1706};
1707
1708static struct phy_defs s_phy_defs[] = {
1709	{"nw_phy", NWS_REG_NWS_CMU_K2,
1710	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5,
1711	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5,
1712	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5,
1713	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5},
1714	{"sgmii_phy", MS_REG_MS_CMU_K2_E5,
1715	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
1716	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
1717	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
1718	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
1719	{"pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5,
1720	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
1721	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
1722	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
1723	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
1724	{"pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5,
1725	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
1726	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
1727	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
1728	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
1729};
1730
1731static struct split_type_defs s_split_type_defs[] = {
1732	/* SPLIT_TYPE_NONE */
1733	{"eng"},
1734
1735	/* SPLIT_TYPE_PORT */
1736	{"port"},
1737
1738	/* SPLIT_TYPE_PF */
1739	{"pf"},
1740
1741	/* SPLIT_TYPE_PORT_PF */
1742	{"port"},
1743
1744	/* SPLIT_TYPE_VF */
1745	{"vf"}
1746};
1747
 
 
 
 
 
1748/**************************** Private Functions ******************************/
1749
 
 
 
 
1750/* Reads and returns a single dword from the specified unaligned buffer */
1751static u32 qed_read_unaligned_dword(u8 *buf)
1752{
1753	u32 dword;
1754
1755	memcpy((u8 *)&dword, buf, sizeof(dword));
1756	return dword;
1757}
1758
1759/* Sets the value of the specified GRC param */
1760static void qed_grc_set_param(struct qed_hwfn *p_hwfn,
1761			      enum dbg_grc_params grc_param, u32 val)
1762{
1763	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1764
1765	dev_data->grc.param_val[grc_param] = val;
1766}
1767
1768/* Returns the value of the specified GRC param */
1769static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
1770			     enum dbg_grc_params grc_param)
1771{
1772	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1773
1774	return dev_data->grc.param_val[grc_param];
1775}
1776
1777/* Initializes the GRC parameters */
1778static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
1779{
1780	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1781
1782	if (!dev_data->grc.params_initialized) {
1783		qed_dbg_grc_set_params_default(p_hwfn);
1784		dev_data->grc.params_initialized = 1;
1785	}
1786}
1787
 
 
 
 
 
 
 
 
 
 
 
1788/* Initializes debug data for the specified device */
1789static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
1790					struct qed_ptt *p_ptt)
1791{
1792	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1793	u8 num_pfs = 0, max_pfs_per_port = 0;
1794
1795	if (dev_data->initialized)
1796		return DBG_STATUS_OK;
1797
 
 
 
1798	/* Set chip */
1799	if (QED_IS_K2(p_hwfn->cdev)) {
1800		dev_data->chip_id = CHIP_K2;
1801		dev_data->mode_enable[MODE_K2] = 1;
1802		dev_data->num_vfs = MAX_NUM_VFS_K2;
1803		num_pfs = MAX_NUM_PFS_K2;
1804		max_pfs_per_port = MAX_NUM_PFS_K2 / 2;
1805	} else if (QED_IS_BB_B0(p_hwfn->cdev)) {
1806		dev_data->chip_id = CHIP_BB;
1807		dev_data->mode_enable[MODE_BB] = 1;
1808		dev_data->num_vfs = MAX_NUM_VFS_BB;
1809		num_pfs = MAX_NUM_PFS_BB;
1810		max_pfs_per_port = MAX_NUM_PFS_BB;
1811	} else {
1812		return DBG_STATUS_UNKNOWN_CHIP;
1813	}
1814
1815	/* Set platofrm */
1816	dev_data->platform_id = PLATFORM_ASIC;
1817	dev_data->mode_enable[MODE_ASIC] = 1;
1818
1819	/* Set port mode */
1820	switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
1821	case 0:
1822		dev_data->mode_enable[MODE_PORTS_PER_ENG_1] = 1;
1823		break;
1824	case 1:
1825		dev_data->mode_enable[MODE_PORTS_PER_ENG_2] = 1;
1826		break;
1827	case 2:
1828		dev_data->mode_enable[MODE_PORTS_PER_ENG_4] = 1;
1829		break;
1830	}
1831
1832	/* Set 100G mode */
1833	if (dev_data->chip_id == CHIP_BB &&
1834	    qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB) == 2)
1835		dev_data->mode_enable[MODE_100G] = 1;
1836
1837	/* Set number of ports */
1838	if (dev_data->mode_enable[MODE_PORTS_PER_ENG_1] ||
1839	    dev_data->mode_enable[MODE_100G])
1840		dev_data->num_ports = 1;
1841	else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_2])
1842		dev_data->num_ports = 2;
1843	else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_4])
1844		dev_data->num_ports = 4;
1845
1846	/* Set number of PFs per port */
1847	dev_data->num_pfs_per_port = min_t(u32,
1848					   num_pfs / dev_data->num_ports,
1849					   max_pfs_per_port);
1850
1851	/* Initializes the GRC parameters */
1852	qed_dbg_grc_init_params(p_hwfn);
1853
1854	dev_data->use_dmae = true;
1855	dev_data->initialized = 1;
1856
1857	return DBG_STATUS_OK;
1858}
1859
1860static struct dbg_bus_block *get_dbg_bus_block_desc(struct qed_hwfn *p_hwfn,
1861						    enum block_id block_id)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1862{
1863	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1864
1865	return (struct dbg_bus_block *)&dbg_bus_blocks[block_id *
1866						       MAX_CHIP_IDS +
1867						       dev_data->chip_id];
1868}
1869
1870/* Reads the FW info structure for the specified Storm from the chip,
1871 * and writes it to the specified fw_info pointer.
1872 */
1873static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
1874				   struct qed_ptt *p_ptt,
1875				   u8 storm_id, struct fw_info *fw_info)
1876{
1877	struct storm_defs *storm = &s_storm_defs[storm_id];
1878	struct fw_info_location fw_info_location;
1879	u32 addr, i, *dest;
1880
1881	memset(&fw_info_location, 0, sizeof(fw_info_location));
1882	memset(fw_info, 0, sizeof(*fw_info));
1883
1884	/* Read first the address that points to fw_info location.
1885	 * The address is located in the last line of the Storm RAM.
1886	 */
1887	addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
1888	       DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE_BB_K2) -
1889	       sizeof(fw_info_location);
 
1890	dest = (u32 *)&fw_info_location;
 
1891
1892	for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location));
1893	     i++, addr += BYTES_IN_DWORD)
1894		dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1895
1896	/* Read FW version info from Storm RAM */
1897	if (fw_info_location.size > 0 && fw_info_location.size <=
1898	    sizeof(*fw_info)) {
1899		addr = fw_info_location.grc_addr;
1900		dest = (u32 *)fw_info;
1901		for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size);
1902		     i++, addr += BYTES_IN_DWORD)
1903			dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1904	}
 
 
1905}
1906
1907/* Dumps the specified string to the specified buffer.
1908 * Returns the dumped size in bytes.
1909 */
1910static u32 qed_dump_str(char *dump_buf, bool dump, const char *str)
1911{
1912	if (dump)
1913		strcpy(dump_buf, str);
1914
1915	return (u32)strlen(str) + 1;
1916}
1917
1918/* Dumps zeros to align the specified buffer to dwords.
1919 * Returns the dumped size in bytes.
1920 */
1921static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset)
1922{
1923	u8 offset_in_dword, align_size;
1924
1925	offset_in_dword = (u8)(byte_offset & 0x3);
1926	align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
1927
1928	if (dump && align_size)
1929		memset(dump_buf, 0, align_size);
1930
1931	return align_size;
1932}
1933
1934/* Writes the specified string param to the specified buffer.
1935 * Returns the dumped size in dwords.
1936 */
1937static u32 qed_dump_str_param(u32 *dump_buf,
1938			      bool dump,
1939			      const char *param_name, const char *param_val)
1940{
1941	char *char_buf = (char *)dump_buf;
1942	u32 offset = 0;
1943
1944	/* Dump param name */
1945	offset += qed_dump_str(char_buf + offset, dump, param_name);
1946
1947	/* Indicate a string param value */
1948	if (dump)
1949		*(char_buf + offset) = 1;
1950	offset++;
1951
1952	/* Dump param value */
1953	offset += qed_dump_str(char_buf + offset, dump, param_val);
1954
1955	/* Align buffer to next dword */
1956	offset += qed_dump_align(char_buf + offset, dump, offset);
1957
1958	return BYTES_TO_DWORDS(offset);
1959}
1960
1961/* Writes the specified numeric param to the specified buffer.
1962 * Returns the dumped size in dwords.
1963 */
1964static u32 qed_dump_num_param(u32 *dump_buf,
1965			      bool dump, const char *param_name, u32 param_val)
1966{
1967	char *char_buf = (char *)dump_buf;
1968	u32 offset = 0;
1969
1970	/* Dump param name */
1971	offset += qed_dump_str(char_buf + offset, dump, param_name);
1972
1973	/* Indicate a numeric param value */
1974	if (dump)
1975		*(char_buf + offset) = 0;
1976	offset++;
1977
1978	/* Align buffer to next dword */
1979	offset += qed_dump_align(char_buf + offset, dump, offset);
1980
1981	/* Dump param value (and change offset from bytes to dwords) */
1982	offset = BYTES_TO_DWORDS(offset);
1983	if (dump)
1984		*(dump_buf + offset) = param_val;
1985	offset++;
1986
1987	return offset;
1988}
1989
1990/* Reads the FW version and writes it as a param to the specified buffer.
1991 * Returns the dumped size in dwords.
1992 */
1993static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
1994				 struct qed_ptt *p_ptt,
1995				 u32 *dump_buf, bool dump)
1996{
1997	char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
1998	char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
1999	struct fw_info fw_info = { {0}, {0} };
2000	u32 offset = 0;
2001
2002	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
2003		/* Read FW info from chip */
2004		qed_read_fw_info(p_hwfn, p_ptt, &fw_info);
2005
2006		/* Create FW version/image strings */
2007		if (snprintf(fw_ver_str, sizeof(fw_ver_str),
2008			     "%d_%d_%d_%d", fw_info.ver.num.major,
2009			     fw_info.ver.num.minor, fw_info.ver.num.rev,
2010			     fw_info.ver.num.eng) < 0)
2011			DP_NOTICE(p_hwfn,
2012				  "Unexpected debug error: invalid FW version string\n");
2013		switch (fw_info.ver.image_id) {
 
 
 
2014		case FW_IMG_MAIN:
2015			strcpy(fw_img_str, "main");
2016			break;
 
 
 
2017		default:
2018			strcpy(fw_img_str, "unknown");
2019			break;
2020		}
2021	}
2022
2023	/* Dump FW version, image and timestamp */
2024	offset += qed_dump_str_param(dump_buf + offset,
2025				     dump, "fw-version", fw_ver_str);
2026	offset += qed_dump_str_param(dump_buf + offset,
2027				     dump, "fw-image", fw_img_str);
2028	offset += qed_dump_num_param(dump_buf + offset,
2029				     dump,
2030				     "fw-timestamp", fw_info.ver.timestamp);
2031
2032	return offset;
2033}
2034
2035/* Reads the MFW version and writes it as a param to the specified buffer.
2036 * Returns the dumped size in dwords.
2037 */
2038static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
2039				  struct qed_ptt *p_ptt,
2040				  u32 *dump_buf, bool dump)
2041{
2042	char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
2043
2044	if (dump &&
2045	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
2046		u32 global_section_offsize, global_section_addr, mfw_ver;
2047		u32 public_data_addr, global_section_offsize_addr;
2048
2049		/* Find MCP public data GRC address. Needs to be ORed with
2050		 * MCP_REG_SCRATCH due to a HW bug.
2051		 */
2052		public_data_addr = qed_rd(p_hwfn,
2053					  p_ptt,
2054					  MISC_REG_SHARED_MEM_ADDR) |
2055				   MCP_REG_SCRATCH;
2056
2057		/* Find MCP public global section offset */
2058		global_section_offsize_addr = public_data_addr +
2059					      offsetof(struct mcp_public_data,
2060						       sections) +
2061					      sizeof(offsize_t) * PUBLIC_GLOBAL;
2062		global_section_offsize = qed_rd(p_hwfn, p_ptt,
2063						global_section_offsize_addr);
2064		global_section_addr =
2065			MCP_REG_SCRATCH +
2066			(global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
2067
2068		/* Read MFW version from MCP public global section */
2069		mfw_ver = qed_rd(p_hwfn, p_ptt,
2070				 global_section_addr +
2071				 offsetof(struct public_global, mfw_ver));
2072
2073		/* Dump MFW version param */
2074		if (snprintf(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d",
2075			     (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16),
2076			     (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
2077			DP_NOTICE(p_hwfn,
2078				  "Unexpected debug error: invalid MFW version string\n");
2079	}
2080
2081	return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
2082}
2083
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2084/* Writes a section header to the specified buffer.
2085 * Returns the dumped size in dwords.
2086 */
2087static u32 qed_dump_section_hdr(u32 *dump_buf,
2088				bool dump, const char *name, u32 num_params)
2089{
2090	return qed_dump_num_param(dump_buf, dump, name, num_params);
2091}
2092
2093/* Writes the common global params to the specified buffer.
2094 * Returns the dumped size in dwords.
2095 */
2096static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
2097					 struct qed_ptt *p_ptt,
2098					 u32 *dump_buf,
2099					 bool dump,
2100					 u8 num_specific_global_params)
2101{
2102	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2103	u32 offset = 0;
2104	u8 num_params;
2105
2106	/* Dump global params section header */
2107	num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
 
2108	offset += qed_dump_section_hdr(dump_buf + offset,
2109				       dump, "global_params", num_params);
2110
2111	/* Store params */
2112	offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
2113	offset += qed_dump_mfw_ver_param(p_hwfn,
2114					 p_ptt, dump_buf + offset, dump);
 
 
2115	offset += qed_dump_num_param(dump_buf + offset,
2116				     dump, "tools-version", TOOLS_VERSION);
2117	offset += qed_dump_str_param(dump_buf + offset,
2118				     dump,
2119				     "chip",
2120				     s_chip_defs[dev_data->chip_id].name);
2121	offset += qed_dump_str_param(dump_buf + offset,
2122				     dump,
2123				     "platform",
2124				     s_platform_defs[dev_data->platform_id].
2125				     name);
2126	offset +=
2127	    qed_dump_num_param(dump_buf + offset, dump, "pci-func",
2128			       p_hwfn->abs_pf_id);
 
 
 
2129
2130	return offset;
2131}
2132
2133/* Writes the "last" section (including CRC) to the specified buffer at the
2134 * given offset. Returns the dumped size in dwords.
2135 */
2136static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
2137{
2138	u32 start_offset = offset;
2139
2140	/* Dump CRC section header */
2141	offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
2142
2143	/* Calculate CRC32 and add it to the dword after the "last" section */
2144	if (dump)
2145		*(dump_buf + offset) = ~crc32(0xffffffff,
2146					      (u8 *)dump_buf,
2147					      DWORDS_TO_BYTES(offset));
2148
2149	offset++;
2150
2151	return offset - start_offset;
2152}
2153
2154/* Update blocks reset state  */
2155static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
2156					  struct qed_ptt *p_ptt)
2157{
2158	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2159	u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2160	u32 i;
 
2161
2162	/* Read reset registers */
2163	for (i = 0; i < MAX_DBG_RESET_REGS; i++)
2164		if (s_reset_regs_defs[i].exists[dev_data->chip_id])
2165			reg_val[i] = qed_rd(p_hwfn,
2166					    p_ptt, s_reset_regs_defs[i].addr);
 
 
 
 
 
 
 
 
 
 
 
2167
2168	/* Check if blocks are in reset */
2169	for (i = 0; i < MAX_BLOCK_ID; i++) {
2170		struct block_defs *block = s_block_defs[i];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2171
2172		dev_data->block_in_reset[i] = block->has_reset_bit &&
2173		    !(reg_val[block->reset_reg] & BIT(block->reset_bit_offset));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2174	}
2175}
2176
 
 
 
 
 
 
2177/* Enable / disable the Debug block */
2178static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
2179				     struct qed_ptt *p_ptt, bool enable)
2180{
2181	qed_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
2182}
2183
2184/* Resets the Debug block */
2185static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
2186				    struct qed_ptt *p_ptt)
2187{
2188	u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
2189	struct block_defs *dbg_block = s_block_defs[BLOCK_DBG];
 
 
 
 
 
 
2190
2191	dbg_reset_reg_addr = s_reset_regs_defs[dbg_block->reset_reg].addr;
2192	old_reset_reg_val = qed_rd(p_hwfn, p_ptt, dbg_reset_reg_addr);
2193	new_reset_reg_val =
2194	    old_reset_reg_val & ~BIT(dbg_block->reset_bit_offset);
2195
2196	qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val);
2197	qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val);
2198}
2199
2200static void qed_bus_set_framing_mode(struct qed_hwfn *p_hwfn,
2201				     struct qed_ptt *p_ptt,
2202				     enum dbg_bus_frame_modes mode)
2203{
2204	qed_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode);
2205}
2206
2207/* Enable / disable Debug Bus clients according to the specified mask
2208 * (1 = enable, 0 = disable).
2209 */
2210static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
2211				   struct qed_ptt *p_ptt, u32 client_mask)
2212{
2213	qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
2214}
2215
2216static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2217{
2218	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2219	bool arg1, arg2;
2220	const u32 *ptr;
2221	u8 tree_val;
 
 
 
 
 
 
 
 
 
2222
2223	/* Get next element from modes tree buffer */
2224	ptr = s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
2225	tree_val = ((u8 *)ptr)[(*modes_buf_offset)++];
 
 
 
 
 
 
 
 
2226
2227	switch (tree_val) {
2228	case INIT_MODE_OP_NOT:
2229		return !qed_is_mode_match(p_hwfn, modes_buf_offset);
2230	case INIT_MODE_OP_OR:
2231	case INIT_MODE_OP_AND:
2232		arg1 = qed_is_mode_match(p_hwfn, modes_buf_offset);
2233		arg2 = qed_is_mode_match(p_hwfn, modes_buf_offset);
2234		return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
2235							arg2) : (arg1 && arg2);
2236	default:
2237		return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
2238	}
2239}
2240
2241/* Returns true if the specified entity (indicated by GRC param) should be
2242 * included in the dump, false otherwise.
2243 */
2244static bool qed_grc_is_included(struct qed_hwfn *p_hwfn,
2245				enum dbg_grc_params grc_param)
2246{
2247	return qed_grc_get_param(p_hwfn, grc_param) > 0;
2248}
2249
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2250/* Returns true of the specified Storm should be included in the dump, false
2251 * otherwise.
2252 */
2253static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
2254				      enum dbg_storms storm)
2255{
2256	return qed_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
2257}
2258
2259/* Returns true if the specified memory should be included in the dump, false
2260 * otherwise.
2261 */
2262static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
2263				    enum block_id block_id, u8 mem_group_id)
2264{
2265	struct block_defs *block = s_block_defs[block_id];
2266	u8 i;
2267
2268	/* Check Storm match */
2269	if (block->associated_to_storm &&
2270	    !qed_grc_is_storm_included(p_hwfn,
2271				       (enum dbg_storms)block->storm_id))
2272		return false;
 
 
 
 
 
 
2273
2274	for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
2275		struct big_ram_defs *big_ram = &s_big_ram_defs[i];
2276
2277		if (mem_group_id == big_ram->mem_group_id ||
2278		    mem_group_id == big_ram->ram_mem_group_id)
2279			return qed_grc_is_included(p_hwfn, big_ram->grc_param);
2280	}
2281
2282	switch (mem_group_id) {
2283	case MEM_GROUP_PXP_ILT:
2284	case MEM_GROUP_PXP_MEM:
2285		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
2286	case MEM_GROUP_RAM:
2287		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
2288	case MEM_GROUP_PBUF:
2289		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
2290	case MEM_GROUP_CAU_MEM:
2291	case MEM_GROUP_CAU_SB:
2292	case MEM_GROUP_CAU_PI:
2293		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
 
 
2294	case MEM_GROUP_QM_MEM:
2295		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
2296	case MEM_GROUP_CFC_MEM:
2297	case MEM_GROUP_CONN_CFC_MEM:
2298	case MEM_GROUP_TASK_CFC_MEM:
2299		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) ||
2300		       qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
 
 
2301	case MEM_GROUP_IGU_MEM:
2302	case MEM_GROUP_IGU_MSIX:
2303		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
2304	case MEM_GROUP_MULD_MEM:
2305		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
2306	case MEM_GROUP_PRS_MEM:
2307		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
2308	case MEM_GROUP_DMAE_MEM:
2309		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
2310	case MEM_GROUP_TM_MEM:
2311		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
2312	case MEM_GROUP_SDM_MEM:
2313		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
2314	case MEM_GROUP_TDIF_CTX:
2315	case MEM_GROUP_RDIF_CTX:
2316		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
2317	case MEM_GROUP_CM_MEM:
2318		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
2319	case MEM_GROUP_IOR:
2320		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
2321	default:
2322		return true;
2323	}
2324}
2325
2326/* Stalls all Storms */
2327static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
2328				 struct qed_ptt *p_ptt, bool stall)
2329{
2330	u32 reg_addr;
2331	u8 storm_id;
2332
2333	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2334		if (!qed_grc_is_storm_included(p_hwfn,
2335					       (enum dbg_storms)storm_id))
2336			continue;
2337
2338		reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr +
2339		    SEM_FAST_REG_STALL_0_BB_K2;
2340		qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
2341	}
2342
2343	msleep(STALL_DELAY_MS);
2344}
2345
2346/* Takes all blocks out of reset */
 
 
2347static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
2348				   struct qed_ptt *p_ptt)
2349{
2350	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2351	u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2352	u32 block_id, i;
2353
2354	/* Fill reset regs values */
2355	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2356		struct block_defs *block = s_block_defs[block_id];
 
 
 
 
 
 
 
 
 
 
2357
2358		if (block->exists[dev_data->chip_id] && block->has_reset_bit &&
2359		    block->unreset)
2360			reg_val[block->reset_reg] |=
2361			    BIT(block->reset_bit_offset);
2362	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2363
2364	/* Write reset registers */
2365	for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2366		if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
2367			continue;
2368
2369		reg_val[i] |=
2370			s_reset_regs_defs[i].unreset_val[dev_data->chip_id];
 
2371
2372		if (reg_val[i])
2373			qed_wr(p_hwfn,
2374			       p_ptt,
2375			       s_reset_regs_defs[i].addr +
2376			       RESET_REG_UNRESET_OFFSET, reg_val[i]);
 
 
 
 
 
 
2377	}
2378}
2379
2380/* Returns the attention block data of the specified block */
2381static const struct dbg_attn_block_type_data *
2382qed_get_block_attn_data(enum block_id block_id, enum dbg_attn_type attn_type)
 
2383{
2384	const struct dbg_attn_block *base_attn_block_arr =
2385		(const struct dbg_attn_block *)
2386		s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
2387
2388	return &base_attn_block_arr[block_id].per_type_data[attn_type];
2389}
2390
2391/* Returns the attention registers of the specified block */
2392static const struct dbg_attn_reg *
2393qed_get_block_attn_regs(enum block_id block_id, enum dbg_attn_type attn_type,
 
2394			u8 *num_attn_regs)
2395{
2396	const struct dbg_attn_block_type_data *block_type_data =
2397		qed_get_block_attn_data(block_id, attn_type);
2398
2399	*num_attn_regs = block_type_data->num_regs;
2400
2401	return &((const struct dbg_attn_reg *)
2402		 s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data->
2403							  regs_offset];
2404}
2405
2406/* For each block, clear the status of all parities */
2407static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
2408				   struct qed_ptt *p_ptt)
2409{
2410	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2411	const struct dbg_attn_reg *attn_reg_arr;
 
2412	u8 reg_idx, num_attn_regs;
2413	u32 block_id;
2414
2415	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2416		if (dev_data->block_in_reset[block_id])
2417			continue;
2418
2419		attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
 
2420						       ATTN_TYPE_PARITY,
2421						       &num_attn_regs);
2422
2423		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2424			const struct dbg_attn_reg *reg_data =
2425				&attn_reg_arr[reg_idx];
2426			u16 modes_buf_offset;
2427			bool eval_mode;
2428
2429			/* Check mode */
2430			eval_mode = GET_FIELD(reg_data->mode.data,
2431					      DBG_MODE_HDR_EVAL_MODE) > 0;
2432			modes_buf_offset =
2433				GET_FIELD(reg_data->mode.data,
2434					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2435
 
2436			/* If Mode match: clear parity status */
2437			if (!eval_mode ||
2438			    qed_is_mode_match(p_hwfn, &modes_buf_offset))
2439				qed_rd(p_hwfn, p_ptt,
2440				       DWORDS_TO_BYTES(reg_data->
2441						       sts_clr_address));
2442		}
2443	}
2444}
2445
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2446/* Dumps GRC registers section header. Returns the dumped size in dwords.
2447 * The following parameters are dumped:
2448 * - count: no. of dumped entries
2449 * - split_type: split type
2450 * - split_id: split ID (dumped only if split_id != SPLIT_TYPE_NONE)
2451 * - param_name: user parameter value (dumped only if param_name != NULL
2452 *		 and param_val != NULL).
2453 */
2454static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
2455				 bool dump,
2456				 u32 num_reg_entries,
2457				 enum init_split_types split_type,
2458				 u8 split_id,
2459				 const char *param_name, const char *param_val)
2460{
2461	u8 num_params = 2 +
2462	    (split_type != SPLIT_TYPE_NONE ? 1 : 0) + (param_name ? 1 : 0);
2463	u32 offset = 0;
2464
2465	offset += qed_dump_section_hdr(dump_buf + offset,
2466				       dump, "grc_regs", num_params);
2467	offset += qed_dump_num_param(dump_buf + offset,
2468				     dump, "count", num_reg_entries);
2469	offset += qed_dump_str_param(dump_buf + offset,
2470				     dump, "split",
2471				     s_split_type_defs[split_type].name);
2472	if (split_type != SPLIT_TYPE_NONE)
2473		offset += qed_dump_num_param(dump_buf + offset,
2474					     dump, "id", split_id);
2475	if (param_name && param_val)
2476		offset += qed_dump_str_param(dump_buf + offset,
2477					     dump, param_name, param_val);
2478
2479	return offset;
2480}
2481
2482/* Reads the specified registers into the specified buffer.
2483 * The addr and len arguments are specified in dwords.
2484 */
2485void qed_read_regs(struct qed_hwfn *p_hwfn,
2486		   struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len)
2487{
2488	u32 i;
2489
2490	for (i = 0; i < len; i++)
2491		buf[i] = qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i));
2492}
2493
2494/* Dumps the GRC registers in the specified address range.
2495 * Returns the dumped size in dwords.
2496 * The addr and len arguments are specified in dwords.
2497 */
2498static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
2499				   struct qed_ptt *p_ptt,
2500				   u32 *dump_buf,
2501				   bool dump, u32 addr, u32 len, bool wide_bus,
2502				   enum init_split_types split_type,
2503				   u8 split_id)
2504{
2505	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2506	u8 port_id = 0, pf_id = 0, vf_id = 0, fid = 0;
 
 
 
2507
2508	if (!dump)
2509		return len;
2510
2511	/* Print log if needed */
2512	dev_data->num_regs_read += len;
2513	if (dev_data->num_regs_read >=
2514	    s_platform_defs[dev_data->platform_id].log_thresh) {
2515		DP_VERBOSE(p_hwfn,
2516			   QED_MSG_DEBUG,
2517			   "Dumping %d registers...\n",
2518			   dev_data->num_regs_read);
2519		dev_data->num_regs_read = 0;
2520	}
2521
2522	switch (split_type) {
2523	case SPLIT_TYPE_PORT:
2524		port_id = split_id;
2525		break;
2526	case SPLIT_TYPE_PF:
2527		pf_id = split_id;
2528		break;
2529	case SPLIT_TYPE_PORT_PF:
2530		port_id = split_id / dev_data->num_pfs_per_port;
2531		pf_id = port_id + dev_data->num_ports *
2532		    (split_id % dev_data->num_pfs_per_port);
2533		break;
2534	case SPLIT_TYPE_VF:
2535		vf_id = split_id;
2536		break;
2537	default:
2538		break;
2539	}
2540
2541	/* Try reading using DMAE */
2542	if (dev_data->use_dmae && split_type == SPLIT_TYPE_NONE &&
2543	    (len >= s_platform_defs[dev_data->platform_id].dmae_thresh ||
2544	     wide_bus)) {
2545		if (!qed_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr),
2546				       (u64)(uintptr_t)(dump_buf), len, NULL))
2547			return len;
2548		dev_data->use_dmae = 0;
2549		DP_VERBOSE(p_hwfn,
2550			   QED_MSG_DEBUG,
2551			   "Failed reading from chip using DMAE, using GRC instead\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2552	}
2553
 
 
 
2554	/* If not read using DMAE, read using GRC */
2555
2556	/* Set pretend */
2557	if (split_type != dev_data->pretend.split_type || split_id !=
2558	    dev_data->pretend.split_id) {
2559		switch (split_type) {
2560		case SPLIT_TYPE_PORT:
2561			qed_port_pretend(p_hwfn, p_ptt, port_id);
2562			break;
2563		case SPLIT_TYPE_PF:
2564			fid = pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
 
2565			qed_fid_pretend(p_hwfn, p_ptt, fid);
2566			break;
2567		case SPLIT_TYPE_PORT_PF:
2568			fid = pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
 
2569			qed_port_fid_pretend(p_hwfn, p_ptt, port_id, fid);
2570			break;
2571		case SPLIT_TYPE_VF:
2572			fid = BIT(PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT) |
2573			      (vf_id << PXP_PRETEND_CONCRETE_FID_VFID_SHIFT);
 
2574			qed_fid_pretend(p_hwfn, p_ptt, fid);
2575			break;
2576		default:
2577			break;
2578		}
2579
2580		dev_data->pretend.split_type = (u8)split_type;
2581		dev_data->pretend.split_id = split_id;
2582	}
2583
2584	/* Read registers using GRC */
2585	qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
2586
 
 
 
 
 
 
 
 
 
 
2587	return len;
2588}
2589
2590/* Dumps GRC registers sequence header. Returns the dumped size in dwords.
2591 * The addr and len arguments are specified in dwords.
2592 */
2593static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf,
2594				      bool dump, u32 addr, u32 len)
2595{
2596	if (dump)
2597		*dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
2598
2599	return 1;
2600}
2601
2602/* Dumps GRC registers sequence. Returns the dumped size in dwords.
2603 * The addr and len arguments are specified in dwords.
2604 */
2605static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
2606				  struct qed_ptt *p_ptt,
2607				  u32 *dump_buf,
2608				  bool dump, u32 addr, u32 len, bool wide_bus,
2609				  enum init_split_types split_type, u8 split_id)
2610{
2611	u32 offset = 0;
2612
2613	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
2614	offset += qed_grc_dump_addr_range(p_hwfn,
2615					  p_ptt,
2616					  dump_buf + offset,
2617					  dump, addr, len, wide_bus,
2618					  split_type, split_id);
2619
2620	return offset;
2621}
2622
2623/* Dumps GRC registers sequence with skip cycle.
2624 * Returns the dumped size in dwords.
2625 * - addr:	start GRC address in dwords
2626 * - total_len:	total no. of dwords to dump
2627 * - read_len:	no. consecutive dwords to read
2628 * - skip_len:	no. of dwords to skip (and fill with zeros)
2629 */
2630static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
2631				       struct qed_ptt *p_ptt,
2632				       u32 *dump_buf,
2633				       bool dump,
2634				       u32 addr,
2635				       u32 total_len,
2636				       u32 read_len, u32 skip_len)
2637{
2638	u32 offset = 0, reg_offset = 0;
2639
2640	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
2641
2642	if (!dump)
2643		return offset + total_len;
2644
2645	while (reg_offset < total_len) {
2646		u32 curr_len = min_t(u32, read_len, total_len - reg_offset);
2647
2648		offset += qed_grc_dump_addr_range(p_hwfn,
2649						  p_ptt,
2650						  dump_buf + offset,
2651						  dump,  addr, curr_len, false,
2652						  SPLIT_TYPE_NONE, 0);
2653		reg_offset += curr_len;
2654		addr += curr_len;
2655
2656		if (reg_offset < total_len) {
2657			curr_len = min_t(u32, skip_len, total_len - skip_len);
2658			memset(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
2659			offset += curr_len;
2660			reg_offset += curr_len;
2661			addr += curr_len;
2662		}
2663	}
2664
2665	return offset;
2666}
2667
2668/* Dumps GRC registers entries. Returns the dumped size in dwords. */
2669static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
2670				     struct qed_ptt *p_ptt,
2671				     struct dbg_array input_regs_arr,
2672				     u32 *dump_buf,
2673				     bool dump,
2674				     enum init_split_types split_type,
2675				     u8 split_id,
2676				     bool block_enable[MAX_BLOCK_ID],
2677				     u32 *num_dumped_reg_entries)
2678{
2679	u32 i, offset = 0, input_offset = 0;
2680	bool mode_match = true;
2681
2682	*num_dumped_reg_entries = 0;
2683
2684	while (input_offset < input_regs_arr.size_in_dwords) {
2685		const struct dbg_dump_cond_hdr *cond_hdr =
2686		    (const struct dbg_dump_cond_hdr *)
2687		    &input_regs_arr.ptr[input_offset++];
2688		u16 modes_buf_offset;
2689		bool eval_mode;
2690
2691		/* Check mode/block */
2692		eval_mode = GET_FIELD(cond_hdr->mode.data,
2693				      DBG_MODE_HDR_EVAL_MODE) > 0;
2694		if (eval_mode) {
2695			modes_buf_offset =
2696				GET_FIELD(cond_hdr->mode.data,
2697					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2698			mode_match = qed_is_mode_match(p_hwfn,
2699						       &modes_buf_offset);
2700		}
2701
2702		if (!mode_match || !block_enable[cond_hdr->block_id]) {
2703			input_offset += cond_hdr->data_size;
2704			continue;
2705		}
2706
2707		for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
2708			const struct dbg_dump_reg *reg =
2709			    (const struct dbg_dump_reg *)
2710			    &input_regs_arr.ptr[input_offset];
2711			u32 addr, len;
2712			bool wide_bus;
2713
2714			addr = GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS);
2715			len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
2716			wide_bus = GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS);
2717			offset += qed_grc_dump_reg_entry(p_hwfn,
2718							 p_ptt,
2719							 dump_buf + offset,
2720							 dump,
2721							 addr,
2722							 len,
2723							 wide_bus,
2724							 split_type, split_id);
2725			(*num_dumped_reg_entries)++;
2726		}
2727	}
2728
2729	return offset;
2730}
2731
2732/* Dumps GRC registers entries. Returns the dumped size in dwords. */
2733static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
2734				   struct qed_ptt *p_ptt,
2735				   struct dbg_array input_regs_arr,
2736				   u32 *dump_buf,
2737				   bool dump,
2738				   bool block_enable[MAX_BLOCK_ID],
2739				   enum init_split_types split_type,
2740				   u8 split_id,
2741				   const char *param_name,
2742				   const char *param_val)
2743{
2744	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2745	enum init_split_types hdr_split_type = split_type;
2746	u32 num_dumped_reg_entries, offset;
2747	u8 hdr_split_id = split_id;
2748
2749	/* In PORT_PF split type, print a port split header */
2750	if (split_type == SPLIT_TYPE_PORT_PF) {
2751		hdr_split_type = SPLIT_TYPE_PORT;
2752		hdr_split_id = split_id / dev_data->num_pfs_per_port;
2753	}
2754
2755	/* Calculate register dump header size (and skip it for now) */
2756	offset = qed_grc_dump_regs_hdr(dump_buf,
2757				       false,
2758				       0,
2759				       hdr_split_type,
2760				       hdr_split_id, param_name, param_val);
2761
2762	/* Dump registers */
2763	offset += qed_grc_dump_regs_entries(p_hwfn,
2764					    p_ptt,
2765					    input_regs_arr,
2766					    dump_buf + offset,
2767					    dump,
2768					    split_type,
2769					    split_id,
2770					    block_enable,
2771					    &num_dumped_reg_entries);
2772
2773	/* Write register dump header */
2774	if (dump && num_dumped_reg_entries > 0)
2775		qed_grc_dump_regs_hdr(dump_buf,
2776				      dump,
2777				      num_dumped_reg_entries,
2778				      hdr_split_type,
2779				      hdr_split_id, param_name, param_val);
2780
2781	return num_dumped_reg_entries > 0 ? offset : 0;
2782}
2783
2784/* Dumps registers according to the input registers array. Returns the dumped
2785 * size in dwords.
2786 */
2787static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
2788				  struct qed_ptt *p_ptt,
2789				  u32 *dump_buf,
2790				  bool dump,
2791				  bool block_enable[MAX_BLOCK_ID],
2792				  const char *param_name, const char *param_val)
2793{
 
 
2794	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2795	u32 offset = 0, input_offset = 0;
2796	u16 fid;
2797	while (input_offset <
2798	       s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
2799		const struct dbg_dump_split_hdr *split_hdr;
2800		struct dbg_array curr_input_regs_arr;
2801		enum init_split_types split_type;
2802		u16 split_count = 0;
2803		u32 split_data_size;
2804		u8 split_id;
2805
2806		split_hdr =
2807			(const struct dbg_dump_split_hdr *)
2808			&s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
2809		split_type =
2810			GET_FIELD(split_hdr->hdr,
2811				  DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2812		split_data_size =
2813			GET_FIELD(split_hdr->hdr,
2814				  DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2815		curr_input_regs_arr.ptr =
2816			&s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset];
2817		curr_input_regs_arr.size_in_dwords = split_data_size;
 
2818
2819		switch (split_type) {
2820		case SPLIT_TYPE_NONE:
2821			split_count = 1;
2822			break;
2823		case SPLIT_TYPE_PORT:
2824			split_count = dev_data->num_ports;
2825			break;
2826		case SPLIT_TYPE_PF:
2827		case SPLIT_TYPE_PORT_PF:
2828			split_count = dev_data->num_ports *
2829			    dev_data->num_pfs_per_port;
2830			break;
2831		case SPLIT_TYPE_VF:
2832			split_count = dev_data->num_vfs;
2833			break;
2834		default:
2835			return 0;
2836		}
2837
2838		for (split_id = 0; split_id < split_count; split_id++)
2839			offset += qed_grc_dump_split_data(p_hwfn, p_ptt,
2840							  curr_input_regs_arr,
2841							  dump_buf + offset,
2842							  dump, block_enable,
2843							  split_type,
2844							  split_id,
2845							  param_name,
2846							  param_val);
2847
2848		input_offset += split_data_size;
2849	}
2850
2851	/* Cancel pretends (pretend to original PF) */
2852	if (dump) {
2853		fid = p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
2854		qed_fid_pretend(p_hwfn, p_ptt, fid);
 
2855		dev_data->pretend.split_type = SPLIT_TYPE_NONE;
2856		dev_data->pretend.split_id = 0;
2857	}
2858
2859	return offset;
2860}
2861
2862/* Dump reset registers. Returns the dumped size in dwords. */
2863static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
2864				   struct qed_ptt *p_ptt,
2865				   u32 *dump_buf, bool dump)
2866{
2867	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2868	u32 i, offset = 0, num_regs = 0;
2869
2870	/* Calculate header size */
2871	offset += qed_grc_dump_regs_hdr(dump_buf,
2872					false, 0,
2873					SPLIT_TYPE_NONE, 0, NULL, NULL);
2874
2875	/* Write reset registers */
2876	for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2877		if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
 
 
 
 
 
 
2878			continue;
2879
 
2880		offset += qed_grc_dump_reg_entry(p_hwfn,
2881						 p_ptt,
2882						 dump_buf + offset,
2883						 dump,
2884						 BYTES_TO_DWORDS
2885						 (s_reset_regs_defs[i].addr), 1,
2886						 false, SPLIT_TYPE_NONE, 0);
2887		num_regs++;
2888	}
2889
2890	/* Write header */
2891	if (dump)
2892		qed_grc_dump_regs_hdr(dump_buf,
2893				      true, num_regs, SPLIT_TYPE_NONE,
2894				      0, NULL, NULL);
2895
2896	return offset;
2897}
2898
2899/* Dump registers that are modified during GRC Dump and therefore must be
2900 * dumped first. Returns the dumped size in dwords.
2901 */
2902static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
2903				      struct qed_ptt *p_ptt,
2904				      u32 *dump_buf, bool dump)
2905{
2906	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2907	u32 block_id, offset = 0, num_reg_entries = 0;
2908	const struct dbg_attn_reg *attn_reg_arr;
2909	u8 storm_id, reg_idx, num_attn_regs;
 
2910
2911	/* Calculate header size */
2912	offset += qed_grc_dump_regs_hdr(dump_buf,
2913					false, 0, SPLIT_TYPE_NONE,
2914					0, NULL, NULL);
2915
2916	/* Write parity registers */
2917	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2918		if (dev_data->block_in_reset[block_id] && dump)
2919			continue;
2920
2921		attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
 
2922						       ATTN_TYPE_PARITY,
2923						       &num_attn_regs);
2924
2925		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2926			const struct dbg_attn_reg *reg_data =
2927				&attn_reg_arr[reg_idx];
2928			u16 modes_buf_offset;
2929			bool eval_mode;
2930			u32 addr;
2931
2932			/* Check mode */
2933			eval_mode = GET_FIELD(reg_data->mode.data,
2934					      DBG_MODE_HDR_EVAL_MODE) > 0;
2935			modes_buf_offset =
2936				GET_FIELD(reg_data->mode.data,
2937					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2938			if (eval_mode &&
2939			    !qed_is_mode_match(p_hwfn, &modes_buf_offset))
2940				continue;
2941
2942			/* Mode match: read & dump registers */
2943			addr = reg_data->mask_address;
2944			offset += qed_grc_dump_reg_entry(p_hwfn,
2945							 p_ptt,
2946							 dump_buf + offset,
2947							 dump,
2948							 addr,
2949							 1, false,
2950							 SPLIT_TYPE_NONE, 0);
2951			addr = GET_FIELD(reg_data->data,
2952					 DBG_ATTN_REG_STS_ADDRESS);
2953			offset += qed_grc_dump_reg_entry(p_hwfn,
2954							 p_ptt,
2955							 dump_buf + offset,
2956							 dump,
2957							 addr,
2958							 1, false,
2959							 SPLIT_TYPE_NONE, 0);
2960			num_reg_entries += 2;
2961		}
2962	}
2963
 
 
 
 
 
 
 
 
 
 
 
 
2964	/* Write Storm stall status registers */
2965	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
 
2966		struct storm_defs *storm = &s_storm_defs[storm_id];
2967		u32 addr;
2968
2969		if (dev_data->block_in_reset[storm->block_id] && dump)
2970			continue;
2971
2972		addr =
2973		    BYTES_TO_DWORDS(s_storm_defs[storm_id].sem_fast_mem_addr +
2974				    SEM_FAST_REG_STALLED);
2975		offset += qed_grc_dump_reg_entry(p_hwfn,
2976						 p_ptt,
2977						 dump_buf + offset,
2978						 dump,
2979						 addr,
2980						 1,
2981						 false, SPLIT_TYPE_NONE, 0);
2982		num_reg_entries++;
2983	}
2984
2985	/* Write header */
2986	if (dump)
2987		qed_grc_dump_regs_hdr(dump_buf,
2988				      true,
2989				      num_reg_entries, SPLIT_TYPE_NONE,
2990				      0, NULL, NULL);
2991
2992	return offset;
2993}
2994
2995/* Dumps registers that can't be represented in the debug arrays */
2996static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
2997				     struct qed_ptt *p_ptt,
2998				     u32 *dump_buf, bool dump)
2999{
3000	u32 offset = 0, addr;
3001
3002	offset += qed_grc_dump_regs_hdr(dump_buf,
3003					dump, 2, SPLIT_TYPE_NONE, 0,
3004					NULL, NULL);
3005
3006	/* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
3007	 * skipped).
3008	 */
3009	addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO);
3010	offset += qed_grc_dump_reg_entry_skip(p_hwfn,
3011					      p_ptt,
3012					      dump_buf + offset,
3013					      dump,
3014					      addr,
3015					      RDIF_REG_DEBUG_ERROR_INFO_SIZE,
3016					      7,
3017					      1);
3018	addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO);
3019	offset +=
3020	    qed_grc_dump_reg_entry_skip(p_hwfn,
3021					p_ptt,
3022					dump_buf + offset,
3023					dump,
3024					addr,
3025					TDIF_REG_DEBUG_ERROR_INFO_SIZE,
3026					7,
3027					1);
3028
3029	return offset;
3030}
3031
3032/* Dumps a GRC memory header (section and params). Returns the dumped size in
3033 * dwords. The following parameters are dumped:
3034 * - name:	   dumped only if it's not NULL.
3035 * - addr:	   in dwords, dumped only if name is NULL.
3036 * - len:	   in dwords, always dumped.
3037 * - width:	   dumped if it's not zero.
3038 * - packed:	   dumped only if it's not false.
3039 * - mem_group:	   always dumped.
3040 * - is_storm:	   true only if the memory is related to a Storm.
3041 * - storm_letter: valid only if is_storm is true.
3042 *
3043 */
3044static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
3045				u32 *dump_buf,
3046				bool dump,
3047				const char *name,
3048				u32 addr,
3049				u32 len,
3050				u32 bit_width,
3051				bool packed,
3052				const char *mem_group,
3053				bool is_storm, char storm_letter)
3054{
3055	u8 num_params = 3;
3056	u32 offset = 0;
3057	char buf[64];
3058
3059	if (!len)
3060		DP_NOTICE(p_hwfn,
3061			  "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
3062
3063	if (bit_width)
3064		num_params++;
3065	if (packed)
3066		num_params++;
3067
3068	/* Dump section header */
3069	offset += qed_dump_section_hdr(dump_buf + offset,
3070				       dump, "grc_mem", num_params);
3071
3072	if (name) {
3073		/* Dump name */
3074		if (is_storm) {
3075			strcpy(buf, "?STORM_");
3076			buf[0] = storm_letter;
3077			strcpy(buf + strlen(buf), name);
3078		} else {
3079			strcpy(buf, name);
3080		}
3081
3082		offset += qed_dump_str_param(dump_buf + offset,
3083					     dump, "name", buf);
3084	} else {
3085		/* Dump address */
3086		u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
3087
3088		offset += qed_dump_num_param(dump_buf + offset,
3089					     dump, "addr", addr_in_bytes);
3090	}
3091
3092	/* Dump len */
3093	offset += qed_dump_num_param(dump_buf + offset, dump, "len", len);
3094
3095	/* Dump bit width */
3096	if (bit_width)
3097		offset += qed_dump_num_param(dump_buf + offset,
3098					     dump, "width", bit_width);
3099
3100	/* Dump packed */
3101	if (packed)
3102		offset += qed_dump_num_param(dump_buf + offset,
3103					     dump, "packed", 1);
3104
3105	/* Dump reg type */
3106	if (is_storm) {
3107		strcpy(buf, "?STORM_");
3108		buf[0] = storm_letter;
3109		strcpy(buf + strlen(buf), mem_group);
3110	} else {
3111		strcpy(buf, mem_group);
3112	}
3113
3114	offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf);
3115
3116	return offset;
3117}
3118
3119/* Dumps a single GRC memory. If name is NULL, the memory is stored by address.
3120 * Returns the dumped size in dwords.
3121 * The addr and len arguments are specified in dwords.
3122 */
3123static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
3124			    struct qed_ptt *p_ptt,
3125			    u32 *dump_buf,
3126			    bool dump,
3127			    const char *name,
3128			    u32 addr,
3129			    u32 len,
3130			    bool wide_bus,
3131			    u32 bit_width,
3132			    bool packed,
3133			    const char *mem_group,
3134			    bool is_storm, char storm_letter)
3135{
3136	u32 offset = 0;
3137
3138	offset += qed_grc_dump_mem_hdr(p_hwfn,
3139				       dump_buf + offset,
3140				       dump,
3141				       name,
3142				       addr,
3143				       len,
3144				       bit_width,
3145				       packed,
3146				       mem_group, is_storm, storm_letter);
3147	offset += qed_grc_dump_addr_range(p_hwfn,
3148					  p_ptt,
3149					  dump_buf + offset,
3150					  dump, addr, len, wide_bus,
3151					  SPLIT_TYPE_NONE, 0);
3152
3153	return offset;
3154}
3155
3156/* Dumps GRC memories entries. Returns the dumped size in dwords. */
3157static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
3158				    struct qed_ptt *p_ptt,
3159				    struct dbg_array input_mems_arr,
3160				    u32 *dump_buf, bool dump)
3161{
3162	u32 i, offset = 0, input_offset = 0;
3163	bool mode_match = true;
3164
3165	while (input_offset < input_mems_arr.size_in_dwords) {
3166		const struct dbg_dump_cond_hdr *cond_hdr;
3167		u16 modes_buf_offset;
3168		u32 num_entries;
3169		bool eval_mode;
3170
3171		cond_hdr = (const struct dbg_dump_cond_hdr *)
3172			   &input_mems_arr.ptr[input_offset++];
 
3173		num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
3174
3175		/* Check required mode */
3176		eval_mode = GET_FIELD(cond_hdr->mode.data,
3177				      DBG_MODE_HDR_EVAL_MODE) > 0;
3178		if (eval_mode) {
3179			modes_buf_offset =
3180				GET_FIELD(cond_hdr->mode.data,
3181					  DBG_MODE_HDR_MODES_BUF_OFFSET);
3182			mode_match = qed_is_mode_match(p_hwfn,
3183						       &modes_buf_offset);
3184		}
3185
3186		if (!mode_match) {
3187			input_offset += cond_hdr->data_size;
3188			continue;
3189		}
3190
3191		for (i = 0; i < num_entries;
3192		     i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
3193			const struct dbg_dump_mem *mem =
3194				(const struct dbg_dump_mem *)
3195				&input_mems_arr.ptr[input_offset];
3196			u8 mem_group_id = GET_FIELD(mem->dword0,
3197						    DBG_DUMP_MEM_MEM_GROUP_ID);
3198			bool is_storm = false, mem_wide_bus;
3199			enum dbg_grc_params grc_param;
3200			char storm_letter = 'a';
3201			enum block_id block_id;
3202			u32 mem_addr, mem_len;
 
 
3203
 
 
3204			if (mem_group_id >= MEM_GROUPS_NUM) {
3205				DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
3206				return 0;
3207			}
3208
3209			block_id = (enum block_id)cond_hdr->block_id;
3210			if (!qed_grc_is_mem_included(p_hwfn,
3211						     block_id,
 
3212						     mem_group_id))
3213				continue;
3214
3215			mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS);
3216			mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH);
3217			mem_wide_bus = GET_FIELD(mem->dword1,
3218						 DBG_DUMP_MEM_WIDE_BUS);
3219
3220			/* Update memory length for CCFC/TCFC memories
3221			 * according to number of LCIDs/LTIDs.
3222			 */
3223			if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
3224				if (mem_len % MAX_LCIDS) {
3225					DP_NOTICE(p_hwfn,
3226						  "Invalid CCFC connection memory size\n");
3227					return 0;
3228				}
3229
3230				grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3231				mem_len = qed_grc_get_param(p_hwfn, grc_param) *
3232					  (mem_len / MAX_LCIDS);
3233			} else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM) {
3234				if (mem_len % MAX_LTIDS) {
3235					DP_NOTICE(p_hwfn,
3236						  "Invalid TCFC task memory size\n");
3237					return 0;
3238				}
3239
3240				grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3241				mem_len = qed_grc_get_param(p_hwfn, grc_param) *
3242					  (mem_len / MAX_LTIDS);
3243			}
3244
3245			/* If memory is associated with Storm, update Storm
3246			 * details.
3247			 */
3248			if (s_block_defs
3249			    [cond_hdr->block_id]->associated_to_storm) {
3250				is_storm = true;
3251				storm_letter =
3252				    s_storm_defs[s_block_defs
3253						 [cond_hdr->block_id]->
3254						 storm_id].letter;
3255			}
3256
3257			/* Dump memory */
3258			offset += qed_grc_dump_mem(p_hwfn,
3259						p_ptt,
3260						dump_buf + offset,
3261						dump,
3262						NULL,
3263						mem_addr,
3264						mem_len,
3265						mem_wide_bus,
3266						0,
3267						false,
3268						s_mem_group_names[mem_group_id],
3269						is_storm,
3270						storm_letter);
3271		}
3272	}
3273
3274	return offset;
3275}
3276
3277/* Dumps GRC memories according to the input array dump_mem.
3278 * Returns the dumped size in dwords.
3279 */
3280static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
3281				 struct qed_ptt *p_ptt,
3282				 u32 *dump_buf, bool dump)
3283{
 
 
3284	u32 offset = 0, input_offset = 0;
3285
3286	while (input_offset <
3287	       s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
3288		const struct dbg_dump_split_hdr *split_hdr;
3289		struct dbg_array curr_input_mems_arr;
3290		enum init_split_types split_type;
3291		u32 split_data_size;
3292
3293		split_hdr = (const struct dbg_dump_split_hdr *)
3294			&s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
3295		split_type =
3296			GET_FIELD(split_hdr->hdr,
3297				  DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
3298		split_data_size =
3299			GET_FIELD(split_hdr->hdr,
3300				  DBG_DUMP_SPLIT_HDR_DATA_SIZE);
3301		curr_input_mems_arr.ptr =
3302			&s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset];
3303		curr_input_mems_arr.size_in_dwords = split_data_size;
3304
3305		if (split_type == SPLIT_TYPE_NONE)
3306			offset += qed_grc_dump_mem_entries(p_hwfn,
3307							   p_ptt,
3308							   curr_input_mems_arr,
3309							   dump_buf + offset,
3310							   dump);
3311		else
3312			DP_NOTICE(p_hwfn,
3313				  "Dumping split memories is currently not supported\n");
3314
3315		input_offset += split_data_size;
3316	}
3317
3318	return offset;
3319}
3320
3321/* Dumps GRC context data for the specified Storm.
3322 * Returns the dumped size in dwords.
3323 * The lid_size argument is specified in quad-regs.
3324 */
3325static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
3326				 struct qed_ptt *p_ptt,
3327				 u32 *dump_buf,
3328				 bool dump,
3329				 const char *name,
3330				 u32 num_lids,
3331				 u32 lid_size,
3332				 u32 rd_reg_addr,
3333				 u8 storm_id)
3334{
 
3335	struct storm_defs *storm = &s_storm_defs[storm_id];
3336	u32 i, lid, total_size, offset = 0;
 
 
 
 
3337
3338	if (!lid_size)
3339		return 0;
3340
3341	lid_size *= BYTES_IN_DWORD;
3342	total_size = num_lids * lid_size;
3343
3344	offset += qed_grc_dump_mem_hdr(p_hwfn,
3345				       dump_buf + offset,
3346				       dump,
3347				       name,
3348				       0,
3349				       total_size,
3350				       lid_size * 32,
3351				       false, name, true, storm->letter);
3352
3353	if (!dump)
3354		return offset + total_size;
3355
 
 
3356	/* Dump context data */
3357	for (lid = 0; lid < num_lids; lid++) {
3358		for (i = 0; i < lid_size; i++, offset++) {
3359			qed_wr(p_hwfn,
3360			       p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
3361			*(dump_buf + offset) = qed_rd(p_hwfn,
3362						      p_ptt, rd_reg_addr);
 
 
 
 
 
 
3363		}
3364	}
3365
3366	return offset;
3367}
3368
3369/* Dumps GRC contexts. Returns the dumped size in dwords. */
3370static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
3371			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3372{
3373	enum dbg_grc_params grc_param;
3374	u32 offset = 0;
3375	u8 storm_id;
3376
3377	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3378		struct storm_defs *storm = &s_storm_defs[storm_id];
3379
3380		if (!qed_grc_is_storm_included(p_hwfn,
3381					       (enum dbg_storms)storm_id))
3382			continue;
3383
3384		/* Dump Conn AG context size */
3385		grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3386		offset +=
3387			qed_grc_dump_ctx_data(p_hwfn,
3388					      p_ptt,
3389					      dump_buf + offset,
3390					      dump,
3391					      "CONN_AG_CTX",
3392					      qed_grc_get_param(p_hwfn,
3393								grc_param),
3394					      storm->cm_conn_ag_ctx_lid_size,
3395					      storm->cm_conn_ag_ctx_rd_addr,
3396					      storm_id);
3397
3398		/* Dump Conn ST context size */
3399		grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3400		offset +=
3401			qed_grc_dump_ctx_data(p_hwfn,
3402					      p_ptt,
3403					      dump_buf + offset,
3404					      dump,
3405					      "CONN_ST_CTX",
3406					      qed_grc_get_param(p_hwfn,
3407								grc_param),
3408					      storm->cm_conn_st_ctx_lid_size,
3409					      storm->cm_conn_st_ctx_rd_addr,
3410					      storm_id);
3411
3412		/* Dump Task AG context size */
3413		grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3414		offset +=
3415			qed_grc_dump_ctx_data(p_hwfn,
3416					      p_ptt,
3417					      dump_buf + offset,
3418					      dump,
3419					      "TASK_AG_CTX",
3420					      qed_grc_get_param(p_hwfn,
3421								grc_param),
3422					      storm->cm_task_ag_ctx_lid_size,
3423					      storm->cm_task_ag_ctx_rd_addr,
3424					      storm_id);
3425
3426		/* Dump Task ST context size */
3427		grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3428		offset +=
3429			qed_grc_dump_ctx_data(p_hwfn,
3430					      p_ptt,
3431					      dump_buf + offset,
3432					      dump,
3433					      "TASK_ST_CTX",
3434					      qed_grc_get_param(p_hwfn,
3435								grc_param),
3436					      storm->cm_task_st_ctx_lid_size,
3437					      storm->cm_task_st_ctx_rd_addr,
3438					      storm_id);
3439	}
3440
3441	return offset;
3442}
3443
3444/* Dumps GRC IORs data. Returns the dumped size in dwords. */
3445static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
3446			     struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3447{
3448	char buf[10] = "IOR_SET_?";
3449	u32 addr, offset = 0;
3450	u8 storm_id, set_id;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3451
3452	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3453		struct storm_defs *storm = &s_storm_defs[storm_id];
 
3454
3455		if (!qed_grc_is_storm_included(p_hwfn,
3456					       (enum dbg_storms)storm_id))
3457			continue;
3458
3459		for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
3460			addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr +
3461					       SEM_FAST_REG_STORM_REG_FILE) +
3462			       IOR_SET_OFFSET(set_id);
3463			if (strlen(buf) > 0)
3464				buf[strlen(buf) - 1] = '0' + set_id;
3465			offset += qed_grc_dump_mem(p_hwfn,
3466						   p_ptt,
3467						   dump_buf + offset,
3468						   dump,
3469						   buf,
3470						   addr,
3471						   IORS_PER_SET,
3472						   false,
3473						   32,
3474						   false,
3475						   "ior",
3476						   true,
3477						   storm->letter);
3478		}
3479	}
3480
3481	return offset;
3482}
3483
3484/* Dump VFC CAM. Returns the dumped size in dwords. */
3485static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
3486				struct qed_ptt *p_ptt,
3487				u32 *dump_buf, bool dump, u8 storm_id)
3488{
3489	u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
3490	struct storm_defs *storm = &s_storm_defs[storm_id];
3491	u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
3492	u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
3493	u32 row, i, offset = 0;
3494
3495	offset += qed_grc_dump_mem_hdr(p_hwfn,
3496				       dump_buf + offset,
3497				       dump,
3498				       "vfc_cam",
3499				       0,
3500				       total_size,
3501				       256,
3502				       false, "vfc_cam", true, storm->letter);
3503
3504	if (!dump)
3505		return offset + total_size;
3506
3507	/* Prepare CAM address */
3508	SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
3509
3510	for (row = 0; row < VFC_CAM_NUM_ROWS;
3511	     row++, offset += VFC_CAM_RESP_DWORDS) {
3512		/* Write VFC CAM command */
3513		SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
3514		ARR_REG_WR(p_hwfn,
3515			   p_ptt,
3516			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
3517			   cam_cmd, VFC_CAM_CMD_DWORDS);
3518
3519		/* Write VFC CAM address */
3520		ARR_REG_WR(p_hwfn,
3521			   p_ptt,
3522			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
3523			   cam_addr, VFC_CAM_ADDR_DWORDS);
3524
3525		/* Read VFC CAM read response */
3526		ARR_REG_RD(p_hwfn,
3527			   p_ptt,
3528			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
3529			   dump_buf + offset, VFC_CAM_RESP_DWORDS);
3530	}
3531
3532	return offset;
3533}
3534
3535/* Dump VFC RAM. Returns the dumped size in dwords. */
3536static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
3537				struct qed_ptt *p_ptt,
3538				u32 *dump_buf,
3539				bool dump,
3540				u8 storm_id, struct vfc_ram_defs *ram_defs)
3541{
3542	u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
3543	struct storm_defs *storm = &s_storm_defs[storm_id];
3544	u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
3545	u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
3546	u32 row, i, offset = 0;
3547
3548	offset += qed_grc_dump_mem_hdr(p_hwfn,
3549				       dump_buf + offset,
3550				       dump,
3551				       ram_defs->mem_name,
3552				       0,
3553				       total_size,
3554				       256,
3555				       false,
3556				       ram_defs->type_name,
3557				       true, storm->letter);
 
 
 
3558
3559	/* Prepare RAM address */
3560	SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
3561
3562	if (!dump)
3563		return offset + total_size;
3564
3565	for (row = ram_defs->base_row;
3566	     row < ram_defs->base_row + ram_defs->num_rows;
3567	     row++, offset += VFC_RAM_RESP_DWORDS) {
3568		/* Write VFC RAM command */
3569		ARR_REG_WR(p_hwfn,
3570			   p_ptt,
3571			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
3572			   ram_cmd, VFC_RAM_CMD_DWORDS);
3573
3574		/* Write VFC RAM address */
3575		SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
3576		ARR_REG_WR(p_hwfn,
3577			   p_ptt,
3578			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
3579			   ram_addr, VFC_RAM_ADDR_DWORDS);
3580
3581		/* Read VFC RAM read response */
3582		ARR_REG_RD(p_hwfn,
3583			   p_ptt,
3584			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
3585			   dump_buf + offset, VFC_RAM_RESP_DWORDS);
3586	}
3587
3588	return offset;
3589}
3590
3591/* Dumps GRC VFC data. Returns the dumped size in dwords. */
3592static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn,
3593			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3594{
3595	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3596	u8 storm_id, i;
3597	u32 offset = 0;
3598
3599	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3600		if (!qed_grc_is_storm_included(p_hwfn,
3601					       (enum dbg_storms)storm_id) ||
3602		    !s_storm_defs[storm_id].has_vfc ||
3603		    (storm_id == DBG_PSTORM_ID && dev_data->platform_id !=
3604		     PLATFORM_ASIC))
3605			continue;
3606
3607		/* Read CAM */
3608		offset += qed_grc_dump_vfc_cam(p_hwfn,
3609					       p_ptt,
3610					       dump_buf + offset,
3611					       dump, storm_id);
3612
3613		/* Read RAM */
3614		for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
3615			offset += qed_grc_dump_vfc_ram(p_hwfn,
3616						       p_ptt,
3617						       dump_buf + offset,
3618						       dump,
3619						       storm_id,
3620						       &s_vfc_ram_defs[i]);
3621	}
3622
3623	return offset;
3624}
3625
3626/* Dumps GRC RSS data. Returns the dumped size in dwords. */
3627static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
3628			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3629{
3630	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3631	u32 offset = 0;
3632	u8 rss_mem_id;
3633
3634	for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
3635		u32 rss_addr, num_entries, total_dwords;
3636		struct rss_mem_defs *rss_defs;
3637		u32 addr, num_dwords_to_read;
3638		bool packed;
3639
3640		rss_defs = &s_rss_mem_defs[rss_mem_id];
3641		rss_addr = rss_defs->addr;
3642		num_entries = rss_defs->num_entries[dev_data->chip_id];
3643		total_dwords = (num_entries * rss_defs->entry_width) / 32;
3644		packed = (rss_defs->entry_width == 16);
3645
3646		offset += qed_grc_dump_mem_hdr(p_hwfn,
3647					       dump_buf + offset,
3648					       dump,
3649					       rss_defs->mem_name,
3650					       0,
3651					       total_dwords,
3652					       rss_defs->entry_width,
3653					       packed,
3654					       rss_defs->type_name, false, 0);
3655
3656		/* Dump RSS data */
3657		if (!dump) {
3658			offset += total_dwords;
3659			continue;
3660		}
3661
3662		addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
3663		while (total_dwords) {
3664			num_dwords_to_read = min_t(u32,
3665						   RSS_REG_RSS_RAM_DATA_SIZE,
3666						   total_dwords);
3667			qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
3668			offset += qed_grc_dump_addr_range(p_hwfn,
3669							  p_ptt,
3670							  dump_buf + offset,
3671							  dump,
3672							  addr,
3673							  num_dwords_to_read,
3674							  false,
3675							  SPLIT_TYPE_NONE, 0);
3676			total_dwords -= num_dwords_to_read;
3677			rss_addr++;
3678		}
3679	}
3680
3681	return offset;
3682}
3683
3684/* Dumps GRC Big RAM. Returns the dumped size in dwords. */
3685static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
3686				struct qed_ptt *p_ptt,
3687				u32 *dump_buf, bool dump, u8 big_ram_id)
3688{
3689	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3690	u32 block_size, ram_size, offset = 0, reg_val, i;
3691	char mem_name[12] = "???_BIG_RAM";
3692	char type_name[8] = "???_RAM";
3693	struct big_ram_defs *big_ram;
3694
3695	big_ram = &s_big_ram_defs[big_ram_id];
3696	ram_size = big_ram->ram_size[dev_data->chip_id];
3697
3698	reg_val = qed_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr);
3699	block_size = reg_val &
3700		     BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256
3701									 : 128;
3702
3703	strncpy(type_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3704	strncpy(mem_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3705
3706	/* Dump memory header */
3707	offset += qed_grc_dump_mem_hdr(p_hwfn,
3708				       dump_buf + offset,
3709				       dump,
3710				       mem_name,
3711				       0,
3712				       ram_size,
3713				       block_size * 8,
3714				       false, type_name, false, 0);
3715
3716	/* Read and dump Big RAM data */
3717	if (!dump)
3718		return offset + ram_size;
3719
3720	/* Dump Big RAM */
3721	for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE);
3722	     i++) {
3723		u32 addr, len;
3724
3725		qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
3726		addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
3727		len = BRB_REG_BIG_RAM_DATA_SIZE;
3728		offset += qed_grc_dump_addr_range(p_hwfn,
3729						  p_ptt,
3730						  dump_buf + offset,
3731						  dump,
3732						  addr,
3733						  len,
3734						  false, SPLIT_TYPE_NONE, 0);
3735	}
3736
3737	return offset;
3738}
3739
 
3740static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
3741			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3742{
3743	bool block_enable[MAX_BLOCK_ID] = { 0 };
3744	u32 offset = 0, addr;
3745	bool halted = false;
3746
3747	/* Halt MCP */
3748	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3749		halted = !qed_mcp_halt(p_hwfn, p_ptt);
3750		if (!halted)
3751			DP_NOTICE(p_hwfn, "MCP halt failed!\n");
3752	}
3753
3754	/* Dump MCP scratchpad */
3755	offset += qed_grc_dump_mem(p_hwfn,
3756				   p_ptt,
3757				   dump_buf + offset,
3758				   dump,
3759				   NULL,
3760				   BYTES_TO_DWORDS(MCP_REG_SCRATCH),
3761				   MCP_REG_SCRATCH_SIZE_BB_K2,
3762				   false, 0, false, "MCP", false, 0);
3763
3764	/* Dump MCP cpu_reg_file */
3765	offset += qed_grc_dump_mem(p_hwfn,
3766				   p_ptt,
3767				   dump_buf + offset,
3768				   dump,
3769				   NULL,
3770				   BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
3771				   MCP_REG_CPU_REG_FILE_SIZE,
3772				   false, 0, false, "MCP", false, 0);
3773
3774	/* Dump MCP registers */
3775	block_enable[BLOCK_MCP] = true;
3776	offset += qed_grc_dump_registers(p_hwfn,
3777					 p_ptt,
3778					 dump_buf + offset,
3779					 dump, block_enable, "block", "MCP");
3780
3781	/* Dump required non-MCP registers */
3782	offset += qed_grc_dump_regs_hdr(dump_buf + offset,
3783					dump, 1, SPLIT_TYPE_NONE, 0,
3784					"block", "MCP");
3785	addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
3786	offset += qed_grc_dump_reg_entry(p_hwfn,
3787					 p_ptt,
3788					 dump_buf + offset,
3789					 dump,
3790					 addr,
3791					 1,
3792					 false, SPLIT_TYPE_NONE, 0);
3793
3794	/* Release MCP */
3795	if (halted && qed_mcp_resume(p_hwfn, p_ptt))
3796		DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
3797
3798	return offset;
3799}
3800
3801/* Dumps the tbus indirect memory for all PHYs. */
 
 
3802static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
3803			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3804{
3805	u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
3806	char mem_name[32];
3807	u8 phy_id;
3808
3809	for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) {
3810		u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr;
3811		struct phy_defs *phy_defs;
3812		u8 *bytes_buf;
3813
3814		phy_defs = &s_phy_defs[phy_id];
3815		addr_lo_addr = phy_defs->base_addr +
3816			       phy_defs->tbus_addr_lo_addr;
3817		addr_hi_addr = phy_defs->base_addr +
3818			       phy_defs->tbus_addr_hi_addr;
3819		data_lo_addr = phy_defs->base_addr +
3820			       phy_defs->tbus_data_lo_addr;
3821		data_hi_addr = phy_defs->base_addr +
3822			       phy_defs->tbus_data_hi_addr;
3823
3824		if (snprintf(mem_name, sizeof(mem_name), "tbus_%s",
3825			     phy_defs->phy_name) < 0)
3826			DP_NOTICE(p_hwfn,
3827				  "Unexpected debug error: invalid PHY memory name\n");
3828
3829		offset += qed_grc_dump_mem_hdr(p_hwfn,
3830					       dump_buf + offset,
3831					       dump,
3832					       mem_name,
3833					       0,
3834					       PHY_DUMP_SIZE_DWORDS,
3835					       16, true, mem_name, false, 0);
3836
3837		if (!dump) {
3838			offset += PHY_DUMP_SIZE_DWORDS;
3839			continue;
3840		}
3841
3842		bytes_buf = (u8 *)(dump_buf + offset);
3843		for (tbus_hi_offset = 0;
3844		     tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
3845		     tbus_hi_offset++) {
3846			qed_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset);
3847			for (tbus_lo_offset = 0; tbus_lo_offset < 256;
3848			     tbus_lo_offset++) {
3849				qed_wr(p_hwfn,
3850				       p_ptt, addr_lo_addr, tbus_lo_offset);
3851				*(bytes_buf++) = (u8)qed_rd(p_hwfn,
3852							    p_ptt,
3853							    data_lo_addr);
3854				*(bytes_buf++) = (u8)qed_rd(p_hwfn,
3855							    p_ptt,
3856							    data_hi_addr);
3857			}
3858		}
3859
3860		offset += PHY_DUMP_SIZE_DWORDS;
3861	}
3862
3863	return offset;
3864}
3865
3866static void qed_config_dbg_line(struct qed_hwfn *p_hwfn,
3867				struct qed_ptt *p_ptt,
3868				enum block_id block_id,
3869				u8 line_id,
3870				u8 enable_mask,
3871				u8 right_shift,
3872				u8 force_valid_mask, u8 force_frame_mask)
3873{
3874	struct block_defs *block = s_block_defs[block_id];
3875
3876	qed_wr(p_hwfn, p_ptt, block->dbg_select_addr, line_id);
3877	qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, enable_mask);
3878	qed_wr(p_hwfn, p_ptt, block->dbg_shift_addr, right_shift);
3879	qed_wr(p_hwfn, p_ptt, block->dbg_force_valid_addr, force_valid_mask);
3880	qed_wr(p_hwfn, p_ptt, block->dbg_force_frame_addr, force_frame_mask);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3881}
3882
3883/* Dumps Static Debug data. Returns the dumped size in dwords. */
3884static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
3885				     struct qed_ptt *p_ptt,
3886				     u32 *dump_buf, bool dump)
3887{
3888	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3889	u32 block_id, line_id, offset = 0;
3890
3891	/* Don't dump static debug if a debug bus recording is in progress */
3892	if (dump && qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
3893		return 0;
3894
3895	if (dump) {
3896		/* Disable all blocks debug output */
3897		for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3898			struct block_defs *block = s_block_defs[block_id];
3899
3900			if (block->dbg_client_id[dev_data->chip_id] !=
3901			    MAX_DBG_BUS_CLIENTS)
3902				qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr,
3903				       0);
3904		}
3905
3906		qed_bus_reset_dbg_block(p_hwfn, p_ptt);
3907		qed_bus_set_framing_mode(p_hwfn,
3908					 p_ptt, DBG_BUS_FRAME_MODE_8HW_0ST);
3909		qed_wr(p_hwfn,
3910		       p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
3911		qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
3912		qed_bus_enable_dbg_block(p_hwfn, p_ptt, true);
3913	}
3914
3915	/* Dump all static debug lines for each relevant block */
3916	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3917		struct block_defs *block = s_block_defs[block_id];
3918		struct dbg_bus_block *block_desc;
3919		u32 block_dwords, addr, len;
3920		u8 dbg_client_id;
 
3921
3922		if (block->dbg_client_id[dev_data->chip_id] ==
3923		    MAX_DBG_BUS_CLIENTS)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3924			continue;
3925
3926		block_desc = get_dbg_bus_block_desc(p_hwfn,
3927						    (enum block_id)block_id);
3928		block_dwords = NUM_DBG_LINES(block_desc) *
3929			       STATIC_DEBUG_LINE_DWORDS;
3930
3931		/* Dump static section params */
 
3932		offset += qed_grc_dump_mem_hdr(p_hwfn,
3933					       dump_buf + offset,
3934					       dump,
3935					       block->name,
3936					       0,
3937					       block_dwords,
3938					       32, false, "STATIC", false, 0);
3939
3940		if (!dump) {
3941			offset += block_dwords;
3942			continue;
3943		}
3944
3945		/* If all lines are invalid - dump zeros */
3946		if (dev_data->block_in_reset[block_id]) {
3947			memset(dump_buf + offset, 0,
3948			       DWORDS_TO_BYTES(block_dwords));
3949			offset += block_dwords;
3950			continue;
3951		}
3952
3953		/* Enable block's client */
3954		dbg_client_id = block->dbg_client_id[dev_data->chip_id];
3955		qed_bus_enable_clients(p_hwfn,
3956				       p_ptt,
3957				       BIT(dbg_client_id));
3958
3959		addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
3960		len = STATIC_DEBUG_LINE_DWORDS;
3961		for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_desc);
3962		     line_id++) {
3963			/* Configure debug line ID */
3964			qed_config_dbg_line(p_hwfn,
3965					    p_ptt,
3966					    (enum block_id)block_id,
3967					    (u8)line_id, 0xf, 0, 0, 0);
3968
3969			/* Read debug line info */
3970			offset += qed_grc_dump_addr_range(p_hwfn,
3971							  p_ptt,
3972							  dump_buf + offset,
3973							  dump,
3974							  addr,
3975							  len,
3976							  true, SPLIT_TYPE_NONE,
3977							  0);
3978		}
3979
3980		/* Disable block's client and debug output */
3981		qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3982		qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
 
3983	}
3984
3985	if (dump) {
3986		qed_bus_enable_dbg_block(p_hwfn, p_ptt, false);
3987		qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3988	}
3989
3990	return offset;
3991}
3992
3993/* Performs GRC Dump to the specified buffer.
3994 * Returns the dumped size in dwords.
3995 */
3996static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
3997				    struct qed_ptt *p_ptt,
3998				    u32 *dump_buf,
3999				    bool dump, u32 *num_dumped_dwords)
4000{
4001	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4002	bool parities_masked = false;
4003	u32 offset = 0;
4004	u8 i;
4005
4006	*num_dumped_dwords = 0;
4007	dev_data->num_regs_read = 0;
4008
4009	/* Update reset state */
4010	if (dump)
4011		qed_update_blocks_reset_state(p_hwfn, p_ptt);
4012
4013	/* Dump global params */
4014	offset += qed_dump_common_global_params(p_hwfn,
4015						p_ptt,
4016						dump_buf + offset, dump, 4);
4017	offset += qed_dump_str_param(dump_buf + offset,
4018				     dump, "dump-type", "grc-dump");
4019	offset += qed_dump_num_param(dump_buf + offset,
4020				     dump,
4021				     "num-lcids",
4022				     qed_grc_get_param(p_hwfn,
4023						DBG_GRC_PARAM_NUM_LCIDS));
4024	offset += qed_dump_num_param(dump_buf + offset,
4025				     dump,
4026				     "num-ltids",
4027				     qed_grc_get_param(p_hwfn,
4028						DBG_GRC_PARAM_NUM_LTIDS));
4029	offset += qed_dump_num_param(dump_buf + offset,
4030				     dump, "num-ports", dev_data->num_ports);
4031
4032	/* Dump reset registers (dumped before taking blocks out of reset ) */
4033	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
4034		offset += qed_grc_dump_reset_regs(p_hwfn,
4035						  p_ptt,
4036						  dump_buf + offset, dump);
4037
4038	/* Take all blocks out of reset (using reset registers) */
4039	if (dump) {
4040		qed_grc_unreset_blocks(p_hwfn, p_ptt);
4041		qed_update_blocks_reset_state(p_hwfn, p_ptt);
4042	}
4043
4044	/* Disable all parities using MFW command */
4045	if (dump &&
4046	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
4047		parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
4048		if (!parities_masked) {
4049			DP_NOTICE(p_hwfn,
4050				  "Failed to mask parities using MFW\n");
4051			if (qed_grc_get_param
4052			    (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
4053				return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
4054		}
4055	}
4056
4057	/* Dump modified registers (dumped before modifying them) */
4058	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
4059		offset += qed_grc_dump_modified_regs(p_hwfn,
4060						     p_ptt,
4061						     dump_buf + offset, dump);
4062
4063	/* Stall storms */
4064	if (dump &&
4065	    (qed_grc_is_included(p_hwfn,
4066				 DBG_GRC_PARAM_DUMP_IOR) ||
4067	     qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
4068		qed_grc_stall_storms(p_hwfn, p_ptt, true);
4069
4070	/* Dump all regs  */
4071	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
4072		bool block_enable[MAX_BLOCK_ID];
4073
4074		/* Dump all blocks except MCP */
4075		for (i = 0; i < MAX_BLOCK_ID; i++)
4076			block_enable[i] = true;
4077		block_enable[BLOCK_MCP] = false;
4078		offset += qed_grc_dump_registers(p_hwfn,
4079						 p_ptt,
4080						 dump_buf +
4081						 offset,
4082						 dump,
4083						 block_enable, NULL, NULL);
4084
4085		/* Dump special registers */
4086		offset += qed_grc_dump_special_regs(p_hwfn,
4087						    p_ptt,
4088						    dump_buf + offset, dump);
4089	}
4090
4091	/* Dump memories */
4092	offset += qed_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
4093
4094	/* Dump MCP */
4095	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
4096		offset += qed_grc_dump_mcp(p_hwfn,
4097					   p_ptt, dump_buf + offset, dump);
4098
4099	/* Dump context */
4100	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
4101		offset += qed_grc_dump_ctx(p_hwfn,
4102					   p_ptt, dump_buf + offset, dump);
4103
4104	/* Dump RSS memories */
4105	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
4106		offset += qed_grc_dump_rss(p_hwfn,
4107					   p_ptt, dump_buf + offset, dump);
4108
4109	/* Dump Big RAM */
4110	for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
4111		if (qed_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
4112			offset += qed_grc_dump_big_ram(p_hwfn,
4113						       p_ptt,
4114						       dump_buf + offset,
4115						       dump, i);
4116
4117	/* Dump IORs */
4118	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR))
4119		offset += qed_grc_dump_iors(p_hwfn,
4120					    p_ptt, dump_buf + offset, dump);
4121
4122	/* Dump VFC */
4123	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC))
4124		offset += qed_grc_dump_vfc(p_hwfn,
4125					   p_ptt, dump_buf + offset, dump);
 
 
 
 
4126
4127	/* Dump PHY tbus */
4128	if (qed_grc_is_included(p_hwfn,
4129				DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id ==
4130	    CHIP_K2 && dev_data->platform_id == PLATFORM_ASIC)
4131		offset += qed_grc_dump_phy(p_hwfn,
4132					   p_ptt, dump_buf + offset, dump);
4133
 
 
 
 
 
 
 
4134	/* Dump static debug data (only if not during debug bus recording) */
4135	if (qed_grc_is_included(p_hwfn,
4136				DBG_GRC_PARAM_DUMP_STATIC) &&
4137	    (!dump || dev_data->bus.state == DBG_BUS_STATE_IDLE))
4138		offset += qed_grc_dump_static_debug(p_hwfn,
4139						    p_ptt,
4140						    dump_buf + offset, dump);
4141
4142	/* Dump last section */
4143	offset += qed_dump_last_section(dump_buf, offset, dump);
4144
4145	if (dump) {
4146		/* Unstall storms */
4147		if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
4148			qed_grc_stall_storms(p_hwfn, p_ptt, false);
4149
4150		/* Clear parity status */
4151		qed_grc_clear_all_prty(p_hwfn, p_ptt);
4152
4153		/* Enable all parities using MFW command */
4154		if (parities_masked)
4155			qed_mcp_mask_parities(p_hwfn, p_ptt, 0);
4156	}
4157
4158	*num_dumped_dwords = offset;
4159
4160	return DBG_STATUS_OK;
4161}
4162
4163/* Writes the specified failing Idle Check rule to the specified buffer.
4164 * Returns the dumped size in dwords.
4165 */
4166static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
4167				     struct qed_ptt *p_ptt,
4168				     u32 *
4169				     dump_buf,
4170				     bool dump,
4171				     u16 rule_id,
4172				     const struct dbg_idle_chk_rule *rule,
4173				     u16 fail_entry_id, u32 *cond_reg_values)
4174{
4175	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4176	const struct dbg_idle_chk_cond_reg *cond_regs;
4177	const struct dbg_idle_chk_info_reg *info_regs;
4178	u32 i, next_reg_offset = 0, offset = 0;
4179	struct dbg_idle_chk_result_hdr *hdr;
4180	const union dbg_idle_chk_reg *regs;
4181	u8 reg_id;
4182
4183	hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
4184	regs = &((const union dbg_idle_chk_reg *)
4185		 s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset];
 
4186	cond_regs = &regs[0].cond_reg;
4187	info_regs = &regs[rule->num_cond_regs].info_reg;
4188
4189	/* Dump rule data */
4190	if (dump) {
4191		memset(hdr, 0, sizeof(*hdr));
4192		hdr->rule_id = rule_id;
4193		hdr->mem_entry_id = fail_entry_id;
4194		hdr->severity = rule->severity;
4195		hdr->num_dumped_cond_regs = rule->num_cond_regs;
4196	}
4197
4198	offset += IDLE_CHK_RESULT_HDR_DWORDS;
4199
4200	/* Dump condition register values */
4201	for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
4202		const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
4203		struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4204
4205		reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
4206			  (dump_buf + offset);
4207
4208		/* Write register header */
4209		if (!dump) {
4210			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS +
4211			    reg->entry_size;
4212			continue;
4213		}
4214
4215		offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4216		memset(reg_hdr, 0, sizeof(*reg_hdr));
4217		reg_hdr->start_entry = reg->start_entry;
4218		reg_hdr->size = reg->entry_size;
4219		SET_FIELD(reg_hdr->data,
4220			  DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
4221			  reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0);
4222		SET_FIELD(reg_hdr->data,
4223			  DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
4224
4225		/* Write register values */
4226		for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++)
4227			dump_buf[offset] = cond_reg_values[next_reg_offset];
4228	}
4229
4230	/* Dump info register values */
4231	for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
4232		const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
4233		u32 block_id;
4234
4235		/* Check if register's block is in reset */
4236		if (!dump) {
4237			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
4238			continue;
4239		}
4240
4241		block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
4242		if (block_id >= MAX_BLOCK_ID) {
4243			DP_NOTICE(p_hwfn, "Invalid block_id\n");
4244			return 0;
4245		}
4246
4247		if (!dev_data->block_in_reset[block_id]) {
4248			struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4249			bool wide_bus, eval_mode, mode_match = true;
4250			u16 modes_buf_offset;
4251			u32 addr;
4252
4253			reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
4254				  (dump_buf + offset);
4255
4256			/* Check mode */
4257			eval_mode = GET_FIELD(reg->mode.data,
4258					      DBG_MODE_HDR_EVAL_MODE) > 0;
4259			if (eval_mode) {
4260				modes_buf_offset =
4261				    GET_FIELD(reg->mode.data,
4262					      DBG_MODE_HDR_MODES_BUF_OFFSET);
4263				mode_match =
4264					qed_is_mode_match(p_hwfn,
4265							  &modes_buf_offset);
4266			}
4267
4268			if (!mode_match)
4269				continue;
4270
4271			addr = GET_FIELD(reg->data,
4272					 DBG_IDLE_CHK_INFO_REG_ADDRESS);
4273			wide_bus = GET_FIELD(reg->data,
4274					     DBG_IDLE_CHK_INFO_REG_WIDE_BUS);
4275
4276			/* Write register header */
4277			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4278			hdr->num_dumped_info_regs++;
4279			memset(reg_hdr, 0, sizeof(*reg_hdr));
4280			reg_hdr->size = reg->size;
4281			SET_FIELD(reg_hdr->data,
4282				  DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
4283				  rule->num_cond_regs + reg_id);
4284
4285			/* Write register values */
4286			offset += qed_grc_dump_addr_range(p_hwfn,
4287							  p_ptt,
4288							  dump_buf + offset,
4289							  dump,
4290							  addr,
4291							  reg->size, wide_bus,
4292							  SPLIT_TYPE_NONE, 0);
4293		}
4294	}
4295
4296	return offset;
4297}
4298
4299/* Dumps idle check rule entries. Returns the dumped size in dwords. */
4300static u32
4301qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
4302			       u32 *dump_buf, bool dump,
4303			       const struct dbg_idle_chk_rule *input_rules,
4304			       u32 num_input_rules, u32 *num_failing_rules)
4305{
4306	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4307	u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
4308	u32 i, offset = 0;
4309	u16 entry_id;
4310	u8 reg_id;
4311
4312	*num_failing_rules = 0;
4313
4314	for (i = 0; i < num_input_rules; i++) {
4315		const struct dbg_idle_chk_cond_reg *cond_regs;
4316		const struct dbg_idle_chk_rule *rule;
4317		const union dbg_idle_chk_reg *regs;
4318		u16 num_reg_entries = 1;
4319		bool check_rule = true;
4320		const u32 *imm_values;
4321
4322		rule = &input_rules[i];
4323		regs = &((const union dbg_idle_chk_reg *)
4324			 s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)
4325			[rule->reg_offset];
4326		cond_regs = &regs[0].cond_reg;
4327		imm_values = &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr
4328			     [rule->imm_offset];
 
4329
4330		/* Check if all condition register blocks are out of reset, and
4331		 * find maximal number of entries (all condition registers that
4332		 * are memories must have the same size, which is > 1).
4333		 */
4334		for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule;
4335		     reg_id++) {
4336			u32 block_id =
4337				GET_FIELD(cond_regs[reg_id].data,
4338					  DBG_IDLE_CHK_COND_REG_BLOCK_ID);
4339
4340			if (block_id >= MAX_BLOCK_ID) {
4341				DP_NOTICE(p_hwfn, "Invalid block_id\n");
4342				return 0;
4343			}
4344
4345			check_rule = !dev_data->block_in_reset[block_id];
4346			if (cond_regs[reg_id].num_entries > num_reg_entries)
4347				num_reg_entries = cond_regs[reg_id].num_entries;
4348		}
4349
4350		if (!check_rule && dump)
4351			continue;
4352
4353		if (!dump) {
4354			u32 entry_dump_size =
4355				qed_idle_chk_dump_failure(p_hwfn,
4356							  p_ptt,
4357							  dump_buf + offset,
4358							  false,
4359							  rule->rule_id,
4360							  rule,
4361							  0,
4362							  NULL);
4363
4364			offset += num_reg_entries * entry_dump_size;
4365			(*num_failing_rules) += num_reg_entries;
4366			continue;
4367		}
4368
4369		/* Go over all register entries (number of entries is the same
4370		 * for all condition registers).
4371		 */
4372		for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
4373			u32 next_reg_offset = 0;
4374
4375			/* Read current entry of all condition registers */
4376			for (reg_id = 0; reg_id < rule->num_cond_regs;
4377			     reg_id++) {
4378				const struct dbg_idle_chk_cond_reg *reg =
4379					&cond_regs[reg_id];
4380				u32 padded_entry_size, addr;
4381				bool wide_bus;
4382
4383				/* Find GRC address (if it's a memory, the
4384				 * address of the specific entry is calculated).
4385				 */
4386				addr = GET_FIELD(reg->data,
4387						 DBG_IDLE_CHK_COND_REG_ADDRESS);
4388				wide_bus =
4389				    GET_FIELD(reg->data,
4390					      DBG_IDLE_CHK_COND_REG_WIDE_BUS);
4391				if (reg->num_entries > 1 ||
4392				    reg->start_entry > 0) {
4393					padded_entry_size =
4394					   reg->entry_size > 1 ?
4395					   roundup_pow_of_two(reg->entry_size) :
4396					   1;
4397					addr += (reg->start_entry + entry_id) *
4398						padded_entry_size;
4399				}
4400
4401				/* Read registers */
4402				if (next_reg_offset + reg->entry_size >=
4403				    IDLE_CHK_MAX_ENTRIES_SIZE) {
4404					DP_NOTICE(p_hwfn,
4405						  "idle check registers entry is too large\n");
4406					return 0;
4407				}
4408
4409				next_reg_offset +=
4410				    qed_grc_dump_addr_range(p_hwfn, p_ptt,
4411							    cond_reg_values +
4412							    next_reg_offset,
4413							    dump, addr,
4414							    reg->entry_size,
4415							    wide_bus,
4416							    SPLIT_TYPE_NONE, 0);
4417			}
4418
4419			/* Call rule condition function.
4420			 * If returns true, it's a failure.
4421			 */
4422			if ((*cond_arr[rule->cond_id]) (cond_reg_values,
4423							imm_values)) {
4424				offset += qed_idle_chk_dump_failure(p_hwfn,
4425							p_ptt,
4426							dump_buf + offset,
4427							dump,
4428							rule->rule_id,
4429							rule,
4430							entry_id,
4431							cond_reg_values);
4432				(*num_failing_rules)++;
4433			}
4434		}
4435	}
4436
4437	return offset;
4438}
4439
4440/* Performs Idle Check Dump to the specified buffer.
4441 * Returns the dumped size in dwords.
4442 */
4443static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
4444			     struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4445{
4446	u32 num_failing_rules_offset, offset = 0, input_offset = 0;
4447	u32 num_failing_rules = 0;
 
 
4448
4449	/* Dump global params */
4450	offset += qed_dump_common_global_params(p_hwfn,
4451						p_ptt,
4452						dump_buf + offset, dump, 1);
4453	offset += qed_dump_str_param(dump_buf + offset,
4454				     dump, "dump-type", "idle-chk");
4455
4456	/* Dump idle check section header with a single parameter */
4457	offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
4458	num_failing_rules_offset = offset;
4459	offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
4460
4461	while (input_offset <
4462	       s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) {
4463		const struct dbg_idle_chk_cond_hdr *cond_hdr =
4464			(const struct dbg_idle_chk_cond_hdr *)
4465			&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr
4466			[input_offset++];
4467		bool eval_mode, mode_match = true;
4468		u32 curr_failing_rules;
4469		u16 modes_buf_offset;
4470
4471		/* Check mode */
4472		eval_mode = GET_FIELD(cond_hdr->mode.data,
4473				      DBG_MODE_HDR_EVAL_MODE) > 0;
4474		if (eval_mode) {
4475			modes_buf_offset =
4476				GET_FIELD(cond_hdr->mode.data,
4477					  DBG_MODE_HDR_MODES_BUF_OFFSET);
4478			mode_match = qed_is_mode_match(p_hwfn,
4479						       &modes_buf_offset);
4480		}
4481
4482		if (mode_match) {
 
 
 
 
 
 
4483			offset +=
4484			    qed_idle_chk_dump_rule_entries(p_hwfn,
4485				p_ptt,
4486				dump_buf + offset,
4487				dump,
4488				(const struct dbg_idle_chk_rule *)
4489				&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].
4490				ptr[input_offset],
4491				cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS,
4492				&curr_failing_rules);
4493			num_failing_rules += curr_failing_rules;
4494		}
4495
4496		input_offset += cond_hdr->data_size;
4497	}
4498
4499	/* Overwrite num_rules parameter */
4500	if (dump)
4501		qed_dump_num_param(dump_buf + num_failing_rules_offset,
4502				   dump, "num_rules", num_failing_rules);
4503
4504	/* Dump last section */
4505	offset += qed_dump_last_section(dump_buf, offset, dump);
4506
4507	return offset;
4508}
4509
4510/* Finds the meta data image in NVRAM */
4511static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
4512					    struct qed_ptt *p_ptt,
4513					    u32 image_type,
4514					    u32 *nvram_offset_bytes,
4515					    u32 *nvram_size_bytes)
4516{
4517	u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
4518	struct mcp_file_att file_att;
4519	int nvm_result;
4520
4521	/* Call NVRAM get file command */
4522	nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
4523					p_ptt,
4524					DRV_MSG_CODE_NVM_GET_FILE_ATT,
4525					image_type,
4526					&ret_mcp_resp,
4527					&ret_mcp_param,
4528					&ret_txn_size, (u32 *)&file_att);
4529
4530	/* Check response */
4531	if (nvm_result ||
4532	    (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4533		return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4534
4535	/* Update return values */
4536	*nvram_offset_bytes = file_att.nvm_start_addr;
4537	*nvram_size_bytes = file_att.len;
4538
4539	DP_VERBOSE(p_hwfn,
4540		   QED_MSG_DEBUG,
4541		   "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
4542		   image_type, *nvram_offset_bytes, *nvram_size_bytes);
4543
4544	/* Check alignment */
4545	if (*nvram_size_bytes & 0x3)
4546		return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
4547
4548	return DBG_STATUS_OK;
4549}
4550
4551/* Reads data from NVRAM */
4552static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
4553				      struct qed_ptt *p_ptt,
4554				      u32 nvram_offset_bytes,
4555				      u32 nvram_size_bytes, u32 *ret_buf)
4556{
4557	u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
4558	s32 bytes_left = nvram_size_bytes;
4559	u32 read_offset = 0;
4560
4561	DP_VERBOSE(p_hwfn,
4562		   QED_MSG_DEBUG,
4563		   "nvram_read: reading image of size %d bytes from NVRAM\n",
4564		   nvram_size_bytes);
4565
4566	do {
4567		bytes_to_copy =
4568		    (bytes_left >
4569		     MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
4570
4571		/* Call NVRAM read command */
4572		if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
4573				       DRV_MSG_CODE_NVM_READ_NVRAM,
4574				       (nvram_offset_bytes +
4575					read_offset) |
4576				       (bytes_to_copy <<
4577					DRV_MB_PARAM_NVM_LEN_OFFSET),
4578				       &ret_mcp_resp, &ret_mcp_param,
4579				       &ret_read_size,
4580				       (u32 *)((u8 *)ret_buf + read_offset)))
4581			return DBG_STATUS_NVRAM_READ_FAILED;
4582
4583		/* Check response */
4584		if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4585			return DBG_STATUS_NVRAM_READ_FAILED;
4586
4587		/* Update read offset */
4588		read_offset += ret_read_size;
4589		bytes_left -= ret_read_size;
4590	} while (bytes_left > 0);
4591
4592	return DBG_STATUS_OK;
4593}
4594
4595/* Get info on the MCP Trace data in the scratchpad:
4596 * - trace_data_grc_addr (OUT): trace data GRC address in bytes
4597 * - trace_data_size (OUT): trace data size in bytes (without the header)
4598 */
4599static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn,
4600						   struct qed_ptt *p_ptt,
4601						   u32 *trace_data_grc_addr,
4602						   u32 *trace_data_size)
4603{
4604	u32 spad_trace_offsize, signature;
4605
4606	/* Read trace section offsize structure from MCP scratchpad */
4607	spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4608
4609	/* Extract trace section address from offsize (in scratchpad) */
4610	*trace_data_grc_addr =
4611		MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
4612
4613	/* Read signature from MCP trace section */
4614	signature = qed_rd(p_hwfn, p_ptt,
4615			   *trace_data_grc_addr +
4616			   offsetof(struct mcp_trace, signature));
4617
4618	if (signature != MFW_TRACE_SIGNATURE)
4619		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4620
4621	/* Read trace size from MCP trace section */
4622	*trace_data_size = qed_rd(p_hwfn,
4623				  p_ptt,
4624				  *trace_data_grc_addr +
4625				  offsetof(struct mcp_trace, size));
4626
4627	return DBG_STATUS_OK;
4628}
4629
4630/* Reads MCP trace meta data image from NVRAM
4631 * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file)
4632 * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when
4633 *			      loaded from file).
4634 * - trace_meta_size (OUT):   size in bytes of the trace meta data.
4635 */
4636static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
4637						   struct qed_ptt *p_ptt,
4638						   u32 trace_data_size_bytes,
4639						   u32 *running_bundle_id,
4640						   u32 *trace_meta_offset,
4641						   u32 *trace_meta_size)
4642{
4643	u32 spad_trace_offsize, nvram_image_type, running_mfw_addr;
4644
4645	/* Read MCP trace section offsize structure from MCP scratchpad */
4646	spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4647
4648	/* Find running bundle ID */
4649	running_mfw_addr =
4650		MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
4651		QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
4652	*running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
4653	if (*running_bundle_id > 1)
4654		return DBG_STATUS_INVALID_NVRAM_BUNDLE;
4655
4656	/* Find image in NVRAM */
4657	nvram_image_type =
4658	    (*running_bundle_id ==
4659	     DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
4660	return qed_find_nvram_image(p_hwfn,
4661				    p_ptt,
4662				    nvram_image_type,
4663				    trace_meta_offset, trace_meta_size);
 
 
4664}
4665
4666/* Reads the MCP Trace meta data from NVRAM into the specified buffer */
4667static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
4668					       struct qed_ptt *p_ptt,
4669					       u32 nvram_offset_in_bytes,
4670					       u32 size_in_bytes, u32 *buf)
4671{
4672	u8 modules_num, module_len, i, *byte_buf = (u8 *)buf;
4673	enum dbg_status status;
4674	u32 signature;
4675
4676	/* Read meta data from NVRAM */
4677	status = qed_nvram_read(p_hwfn,
4678				p_ptt,
4679				nvram_offset_in_bytes, size_in_bytes, buf);
 
 
 
4680	if (status != DBG_STATUS_OK)
4681		return status;
4682
4683	/* Extract and check first signature */
4684	signature = qed_read_unaligned_dword(byte_buf);
4685	byte_buf += sizeof(signature);
4686	if (signature != NVM_MAGIC_VALUE)
4687		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4688
4689	/* Extract number of modules */
4690	modules_num = *(byte_buf++);
4691
4692	/* Skip all modules */
4693	for (i = 0; i < modules_num; i++) {
4694		module_len = *(byte_buf++);
4695		byte_buf += module_len;
4696	}
4697
4698	/* Extract and check second signature */
4699	signature = qed_read_unaligned_dword(byte_buf);
4700	byte_buf += sizeof(signature);
4701	if (signature != NVM_MAGIC_VALUE)
4702		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4703
4704	return DBG_STATUS_OK;
4705}
4706
4707/* Dump MCP Trace */
4708static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
4709					  struct qed_ptt *p_ptt,
4710					  u32 *dump_buf,
4711					  bool dump, u32 *num_dumped_dwords)
4712{
4713	u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
4714	u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0;
4715	u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0;
4716	enum dbg_status status;
4717	bool mcp_access;
4718	int halted = 0;
 
4719
4720	*num_dumped_dwords = 0;
4721
4722	mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
4723
4724	/* Get trace data info */
4725	status = qed_mcp_trace_get_data_info(p_hwfn,
4726					     p_ptt,
4727					     &trace_data_grc_addr,
4728					     &trace_data_size_bytes);
4729	if (status != DBG_STATUS_OK)
4730		return status;
4731
4732	/* Dump global params */
4733	offset += qed_dump_common_global_params(p_hwfn,
4734						p_ptt,
4735						dump_buf + offset, dump, 1);
4736	offset += qed_dump_str_param(dump_buf + offset,
4737				     dump, "dump-type", "mcp-trace");
4738
4739	/* Halt MCP while reading from scratchpad so the read data will be
4740	 * consistent. if halt fails, MCP trace is taken anyway, with a small
4741	 * risk that it may be corrupt.
4742	 */
4743	if (dump && mcp_access) {
4744		halted = !qed_mcp_halt(p_hwfn, p_ptt);
4745		if (!halted)
4746			DP_NOTICE(p_hwfn, "MCP halt failed!\n");
4747	}
4748
4749	/* Find trace data size */
4750	trace_data_size_dwords =
4751	    DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
4752			 BYTES_IN_DWORD);
4753
4754	/* Dump trace data section header and param */
4755	offset += qed_dump_section_hdr(dump_buf + offset,
4756				       dump, "mcp_trace_data", 1);
4757	offset += qed_dump_num_param(dump_buf + offset,
4758				     dump, "size", trace_data_size_dwords);
4759
4760	/* Read trace data from scratchpad into dump buffer */
4761	offset += qed_grc_dump_addr_range(p_hwfn,
4762					  p_ptt,
4763					  dump_buf + offset,
4764					  dump,
4765					  BYTES_TO_DWORDS(trace_data_grc_addr),
4766					  trace_data_size_dwords, false,
4767					  SPLIT_TYPE_NONE, 0);
4768
4769	/* Resume MCP (only if halt succeeded) */
4770	if (halted && qed_mcp_resume(p_hwfn, p_ptt))
4771		DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
4772
4773	/* Dump trace meta section header */
4774	offset += qed_dump_section_hdr(dump_buf + offset,
4775				       dump, "mcp_trace_meta", 1);
4776
4777	/* If MCP Trace meta size parameter was set, use it.
4778	 * Otherwise, read trace meta.
4779	 * trace_meta_size_bytes is dword-aligned.
4780	 */
4781	trace_meta_size_bytes =
4782		qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_MCP_TRACE_META_SIZE);
4783	if ((!trace_meta_size_bytes || dump) && mcp_access) {
4784		status = qed_mcp_trace_get_meta_info(p_hwfn,
4785						     p_ptt,
4786						     trace_data_size_bytes,
4787						     &running_bundle_id,
4788						     &trace_meta_offset_bytes,
4789						     &trace_meta_size_bytes);
4790		if (status == DBG_STATUS_OK)
4791			trace_meta_size_dwords =
4792				BYTES_TO_DWORDS(trace_meta_size_bytes);
4793	}
4794
4795	/* Dump trace meta size param */
4796	offset += qed_dump_num_param(dump_buf + offset,
4797				     dump, "size", trace_meta_size_dwords);
4798
4799	/* Read trace meta image into dump buffer */
4800	if (dump && trace_meta_size_dwords)
4801		status = qed_mcp_trace_read_meta(p_hwfn,
4802						 p_ptt,
4803						 trace_meta_offset_bytes,
4804						 trace_meta_size_bytes,
4805						 dump_buf + offset);
4806	if (status == DBG_STATUS_OK)
4807		offset += trace_meta_size_dwords;
4808
4809	/* Dump last section */
4810	offset += qed_dump_last_section(dump_buf, offset, dump);
4811
4812	*num_dumped_dwords = offset;
4813
4814	/* If no mcp access, indicate that the dump doesn't contain the meta
4815	 * data from NVRAM.
4816	 */
4817	return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4818}
4819
4820/* Dump GRC FIFO */
4821static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
4822					 struct qed_ptt *p_ptt,
4823					 u32 *dump_buf,
4824					 bool dump, u32 *num_dumped_dwords)
4825{
4826	u32 dwords_read, size_param_offset, offset = 0, addr, len;
4827	bool fifo_has_data;
4828
4829	*num_dumped_dwords = 0;
4830
4831	/* Dump global params */
4832	offset += qed_dump_common_global_params(p_hwfn,
4833						p_ptt,
4834						dump_buf + offset, dump, 1);
4835	offset += qed_dump_str_param(dump_buf + offset,
4836				     dump, "dump-type", "reg-fifo");
4837
4838	/* Dump fifo data section header and param. The size param is 0 for
4839	 * now, and is overwritten after reading the FIFO.
4840	 */
4841	offset += qed_dump_section_hdr(dump_buf + offset,
4842				       dump, "reg_fifo_data", 1);
4843	size_param_offset = offset;
4844	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4845
4846	if (!dump) {
4847		/* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
4848		 * test how much data is available, except for reading it.
4849		 */
4850		offset += REG_FIFO_DEPTH_DWORDS;
4851		goto out;
4852	}
4853
4854	fifo_has_data = qed_rd(p_hwfn, p_ptt,
4855			       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4856
4857	/* Pull available data from fifo. Use DMAE since this is widebus memory
4858	 * and must be accessed atomically. Test for dwords_read not passing
4859	 * buffer size since more entries could be added to the buffer as we are
4860	 * emptying it.
4861	 */
4862	addr = BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO);
4863	len = REG_FIFO_ELEMENT_DWORDS;
4864	for (dwords_read = 0;
4865	     fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
4866	     dwords_read += REG_FIFO_ELEMENT_DWORDS) {
4867		offset += qed_grc_dump_addr_range(p_hwfn,
4868						  p_ptt,
4869						  dump_buf + offset,
4870						  true,
4871						  addr,
4872						  len,
4873						  true, SPLIT_TYPE_NONE,
4874						  0);
4875		fifo_has_data = qed_rd(p_hwfn, p_ptt,
4876				       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4877	}
4878
4879	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4880			   dwords_read);
4881out:
4882	/* Dump last section */
4883	offset += qed_dump_last_section(dump_buf, offset, dump);
4884
4885	*num_dumped_dwords = offset;
4886
4887	return DBG_STATUS_OK;
4888}
4889
4890/* Dump IGU FIFO */
4891static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
4892					 struct qed_ptt *p_ptt,
4893					 u32 *dump_buf,
4894					 bool dump, u32 *num_dumped_dwords)
4895{
4896	u32 dwords_read, size_param_offset, offset = 0, addr, len;
4897	bool fifo_has_data;
4898
4899	*num_dumped_dwords = 0;
4900
4901	/* Dump global params */
4902	offset += qed_dump_common_global_params(p_hwfn,
4903						p_ptt,
4904						dump_buf + offset, dump, 1);
4905	offset += qed_dump_str_param(dump_buf + offset,
4906				     dump, "dump-type", "igu-fifo");
4907
4908	/* Dump fifo data section header and param. The size param is 0 for
4909	 * now, and is overwritten after reading the FIFO.
4910	 */
4911	offset += qed_dump_section_hdr(dump_buf + offset,
4912				       dump, "igu_fifo_data", 1);
4913	size_param_offset = offset;
4914	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4915
4916	if (!dump) {
4917		/* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
4918		 * test how much data is available, except for reading it.
4919		 */
4920		offset += IGU_FIFO_DEPTH_DWORDS;
4921		goto out;
4922	}
4923
4924	fifo_has_data = qed_rd(p_hwfn, p_ptt,
4925			       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4926
4927	/* Pull available data from fifo. Use DMAE since this is widebus memory
4928	 * and must be accessed atomically. Test for dwords_read not passing
4929	 * buffer size since more entries could be added to the buffer as we are
4930	 * emptying it.
4931	 */
4932	addr = BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY);
4933	len = IGU_FIFO_ELEMENT_DWORDS;
4934	for (dwords_read = 0;
4935	     fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
4936	     dwords_read += IGU_FIFO_ELEMENT_DWORDS) {
4937		offset += qed_grc_dump_addr_range(p_hwfn,
4938						  p_ptt,
4939						  dump_buf + offset,
4940						  true,
4941						  addr,
4942						  len,
4943						  true, SPLIT_TYPE_NONE,
4944						  0);
4945		fifo_has_data = qed_rd(p_hwfn, p_ptt,
4946				       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4947	}
4948
4949	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4950			   dwords_read);
4951out:
4952	/* Dump last section */
4953	offset += qed_dump_last_section(dump_buf, offset, dump);
4954
4955	*num_dumped_dwords = offset;
4956
4957	return DBG_STATUS_OK;
4958}
4959
4960/* Protection Override dump */
4961static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
4962						    struct qed_ptt *p_ptt,
4963						    u32 *dump_buf,
4964						    bool dump,
4965						    u32 *num_dumped_dwords)
4966{
4967	u32 size_param_offset, override_window_dwords, offset = 0, addr;
4968
4969	*num_dumped_dwords = 0;
4970
4971	/* Dump global params */
4972	offset += qed_dump_common_global_params(p_hwfn,
4973						p_ptt,
4974						dump_buf + offset, dump, 1);
4975	offset += qed_dump_str_param(dump_buf + offset,
4976				     dump, "dump-type", "protection-override");
4977
4978	/* Dump data section header and param. The size param is 0 for now,
4979	 * and is overwritten after reading the data.
4980	 */
4981	offset += qed_dump_section_hdr(dump_buf + offset,
4982				       dump, "protection_override_data", 1);
4983	size_param_offset = offset;
4984	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4985
4986	if (!dump) {
4987		offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
4988		goto out;
4989	}
4990
4991	/* Add override window info to buffer */
4992	override_window_dwords =
4993		qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
4994		PROTECTION_OVERRIDE_ELEMENT_DWORDS;
4995	addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
4996	offset += qed_grc_dump_addr_range(p_hwfn,
4997					  p_ptt,
4998					  dump_buf + offset,
4999					  true,
5000					  addr,
5001					  override_window_dwords,
5002					  true, SPLIT_TYPE_NONE, 0);
5003	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
5004			   override_window_dwords);
 
 
5005out:
5006	/* Dump last section */
5007	offset += qed_dump_last_section(dump_buf, offset, dump);
5008
5009	*num_dumped_dwords = offset;
5010
5011	return DBG_STATUS_OK;
5012}
5013
5014/* Performs FW Asserts Dump to the specified buffer.
5015 * Returns the dumped size in dwords.
5016 */
5017static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
5018			       struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
5019{
5020	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5021	struct fw_asserts_ram_section *asserts;
5022	char storm_letter_str[2] = "?";
5023	struct fw_info fw_info;
5024	u32 offset = 0;
5025	u8 storm_id;
5026
5027	/* Dump global params */
5028	offset += qed_dump_common_global_params(p_hwfn,
5029						p_ptt,
5030						dump_buf + offset, dump, 1);
5031	offset += qed_dump_str_param(dump_buf + offset,
5032				     dump, "dump-type", "fw-asserts");
5033
5034	/* Find Storm dump size */
5035	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5036		u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
5037		struct storm_defs *storm = &s_storm_defs[storm_id];
5038		u32 last_list_idx, addr;
5039
5040		if (dev_data->block_in_reset[storm->block_id])
5041			continue;
5042
5043		/* Read FW info for the current Storm */
5044		qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
5045
5046		asserts = &fw_info.fw_asserts_section;
5047
5048		/* Dump FW Asserts section header and params */
5049		storm_letter_str[0] = storm->letter;
5050		offset += qed_dump_section_hdr(dump_buf + offset,
5051					       dump, "fw_asserts", 2);
5052		offset += qed_dump_str_param(dump_buf + offset,
5053					     dump, "storm", storm_letter_str);
5054		offset += qed_dump_num_param(dump_buf + offset,
5055					     dump,
5056					     "size",
5057					     asserts->list_element_dword_size);
5058
5059		/* Read and dump FW Asserts data */
5060		if (!dump) {
5061			offset += asserts->list_element_dword_size;
5062			continue;
5063		}
5064
 
5065		fw_asserts_section_addr = storm->sem_fast_mem_addr +
5066			SEM_FAST_REG_INT_RAM +
5067			RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
 
5068		next_list_idx_addr = fw_asserts_section_addr +
5069			DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
5070		next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
5071		last_list_idx = (next_list_idx > 0 ?
5072				 next_list_idx :
5073				 asserts->list_num_elements) - 1;
5074		addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
5075		       asserts->list_dword_offset +
5076		       last_list_idx * asserts->list_element_dword_size;
5077		offset +=
5078		    qed_grc_dump_addr_range(p_hwfn, p_ptt,
5079					    dump_buf + offset,
5080					    dump, addr,
5081					    asserts->list_element_dword_size,
5082						  false, SPLIT_TYPE_NONE, 0);
5083	}
5084
5085	/* Dump last section */
5086	offset += qed_dump_last_section(dump_buf, offset, dump);
5087
5088	return offset;
5089}
5090
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5091/***************************** Public Functions *******************************/
5092
5093enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
 
5094{
5095	struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
5096	u8 buf_id;
5097
5098	/* convert binary data to debug arrays */
5099	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
5100		s_dbg_arrays[buf_id].ptr =
5101		    (u32 *)(bin_ptr + buf_array[buf_id].offset);
5102		s_dbg_arrays[buf_id].size_in_dwords =
5103		    BYTES_TO_DWORDS(buf_array[buf_id].length);
5104	}
 
 
 
 
 
 
 
 
 
5105
5106	return DBG_STATUS_OK;
5107}
5108
5109bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
5110		      struct qed_ptt *p_ptt, struct fw_info *fw_info)
5111{
5112	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5113	u8 storm_id;
5114
5115	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5116		struct storm_defs *storm = &s_storm_defs[storm_id];
5117
5118		/* Skip Storm if it's in reset */
5119		if (dev_data->block_in_reset[storm->block_id])
5120			continue;
5121
5122		/* Read FW info for the current Storm */
5123		qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, fw_info);
5124
5125		return true;
5126	}
5127
5128	return false;
5129}
5130
5131enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
5132				   struct qed_ptt *p_ptt,
5133				   enum dbg_grc_params grc_param, u32 val)
5134{
 
5135	enum dbg_status status;
5136	int i;
5137
5138	DP_VERBOSE(p_hwfn, QED_MSG_DEBUG,
 
5139		   "dbg_grc_config: paramId = %d, val = %d\n", grc_param, val);
5140
5141	status = qed_dbg_dev_init(p_hwfn, p_ptt);
5142	if (status != DBG_STATUS_OK)
5143		return status;
5144
5145	/* Initializes the GRC parameters (if not initialized). Needed in order
5146	 * to set the default parameter values for the first time.
5147	 */
5148	qed_dbg_grc_init_params(p_hwfn);
5149
5150	if (grc_param >= MAX_DBG_GRC_PARAMS)
5151		return DBG_STATUS_INVALID_ARGS;
5152	if (val < s_grc_param_defs[grc_param].min ||
5153	    val > s_grc_param_defs[grc_param].max)
5154		return DBG_STATUS_INVALID_ARGS;
5155
5156	if (s_grc_param_defs[grc_param].is_preset) {
5157		/* Preset param */
5158
5159		/* Disabling a preset is not allowed. Call
5160		 * dbg_grc_set_params_default instead.
5161		 */
5162		if (!val)
5163			return DBG_STATUS_INVALID_ARGS;
5164
5165		/* Update all params with the preset values */
5166		for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) {
 
5167			u32 preset_val;
5168
5169			/* Skip persistent params */
5170			if (s_grc_param_defs[i].is_persistent)
5171				continue;
5172
5173			/* Find preset value */
5174			if (grc_param == DBG_GRC_PARAM_EXCLUDE_ALL)
5175				preset_val =
5176				    s_grc_param_defs[i].exclude_all_preset_val;
5177			else if (grc_param == DBG_GRC_PARAM_CRASH)
5178				preset_val =
5179				    s_grc_param_defs[i].crash_preset_val;
5180			else
5181				return DBG_STATUS_INVALID_ARGS;
5182
5183			qed_grc_set_param(p_hwfn,
5184					  (enum dbg_grc_params)i, preset_val);
5185		}
5186	} else {
5187		/* Regular param - set its value */
5188		qed_grc_set_param(p_hwfn, grc_param, val);
5189	}
5190
5191	return DBG_STATUS_OK;
5192}
5193
5194/* Assign default GRC param values */
5195void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
5196{
5197	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5198	u32 i;
5199
5200	for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
5201		if (!s_grc_param_defs[i].is_persistent)
5202			dev_data->grc.param_val[i] =
5203			    s_grc_param_defs[i].default_val[dev_data->chip_id];
5204}
5205
5206enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5207					      struct qed_ptt *p_ptt,
5208					      u32 *buf_size)
5209{
5210	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5211
5212	*buf_size = 0;
5213
5214	if (status != DBG_STATUS_OK)
5215		return status;
5216
5217	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5218	    !s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
5219	    !s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
5220	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5221	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5222		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5223
5224	return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5225}
5226
5227enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
5228				 struct qed_ptt *p_ptt,
5229				 u32 *dump_buf,
5230				 u32 buf_size_in_dwords,
5231				 u32 *num_dumped_dwords)
5232{
5233	u32 needed_buf_size_in_dwords;
5234	enum dbg_status status;
5235
5236	*num_dumped_dwords = 0;
5237
5238	status = qed_dbg_grc_get_dump_buf_size(p_hwfn,
5239					       p_ptt,
5240					       &needed_buf_size_in_dwords);
5241	if (status != DBG_STATUS_OK)
5242		return status;
5243
5244	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5245		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5246
 
 
 
5247	/* GRC Dump */
5248	status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
5249
5250	/* Revert GRC params to their default */
5251	qed_dbg_grc_set_params_default(p_hwfn);
5252
5253	return status;
5254}
5255
5256enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5257						   struct qed_ptt *p_ptt,
5258						   u32 *buf_size)
5259{
5260	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5261	struct idle_chk_data *idle_chk;
5262	enum dbg_status status;
5263
5264	idle_chk = &dev_data->idle_chk;
5265	*buf_size = 0;
5266
5267	status = qed_dbg_dev_init(p_hwfn, p_ptt);
5268	if (status != DBG_STATUS_OK)
5269		return status;
5270
5271	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5272	    !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
5273	    !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
5274	    !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
5275		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5276
5277	if (!idle_chk->buf_size_set) {
5278		idle_chk->buf_size = qed_idle_chk_dump(p_hwfn,
5279						       p_ptt, NULL, false);
5280		idle_chk->buf_size_set = true;
5281	}
5282
5283	*buf_size = idle_chk->buf_size;
5284
5285	return DBG_STATUS_OK;
5286}
5287
5288enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
5289				      struct qed_ptt *p_ptt,
5290				      u32 *dump_buf,
5291				      u32 buf_size_in_dwords,
5292				      u32 *num_dumped_dwords)
5293{
5294	u32 needed_buf_size_in_dwords;
5295	enum dbg_status status;
5296
5297	*num_dumped_dwords = 0;
5298
5299	status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn,
5300						    p_ptt,
5301						    &needed_buf_size_in_dwords);
5302	if (status != DBG_STATUS_OK)
5303		return status;
5304
5305	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5306		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5307
5308	/* Update reset state */
 
5309	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5310
5311	/* Idle Check Dump */
5312	*num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
5313
5314	/* Revert GRC params to their default */
5315	qed_dbg_grc_set_params_default(p_hwfn);
5316
5317	return DBG_STATUS_OK;
5318}
5319
5320enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5321						    struct qed_ptt *p_ptt,
5322						    u32 *buf_size)
5323{
5324	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5325
5326	*buf_size = 0;
5327
5328	if (status != DBG_STATUS_OK)
5329		return status;
5330
5331	return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5332}
5333
5334enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
5335				       struct qed_ptt *p_ptt,
5336				       u32 *dump_buf,
5337				       u32 buf_size_in_dwords,
5338				       u32 *num_dumped_dwords)
5339{
5340	u32 needed_buf_size_in_dwords;
5341	enum dbg_status status;
5342
5343	status =
5344		qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
5345						    p_ptt,
5346						    &needed_buf_size_in_dwords);
5347	if (status != DBG_STATUS_OK && status !=
5348	    DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
5349		return status;
5350
5351	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5352		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5353
5354	/* Update reset state */
5355	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5356
5357	/* Perform dump */
5358	status = qed_mcp_trace_dump(p_hwfn,
5359				    p_ptt, dump_buf, true, num_dumped_dwords);
5360
5361	/* Revert GRC params to their default */
5362	qed_dbg_grc_set_params_default(p_hwfn);
5363
5364	return status;
5365}
5366
5367enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5368						   struct qed_ptt *p_ptt,
5369						   u32 *buf_size)
5370{
5371	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5372
5373	*buf_size = 0;
5374
5375	if (status != DBG_STATUS_OK)
5376		return status;
5377
5378	return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5379}
5380
5381enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
5382				      struct qed_ptt *p_ptt,
5383				      u32 *dump_buf,
5384				      u32 buf_size_in_dwords,
5385				      u32 *num_dumped_dwords)
5386{
5387	u32 needed_buf_size_in_dwords;
5388	enum dbg_status status;
5389
5390	*num_dumped_dwords = 0;
5391
5392	status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
5393						    p_ptt,
5394						    &needed_buf_size_in_dwords);
5395	if (status != DBG_STATUS_OK)
5396		return status;
5397
5398	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5399		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5400
5401	/* Update reset state */
5402	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5403
5404	status = qed_reg_fifo_dump(p_hwfn,
5405				   p_ptt, dump_buf, true, num_dumped_dwords);
5406
5407	/* Revert GRC params to their default */
5408	qed_dbg_grc_set_params_default(p_hwfn);
5409
5410	return status;
5411}
5412
5413enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5414						   struct qed_ptt *p_ptt,
5415						   u32 *buf_size)
5416{
5417	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5418
5419	*buf_size = 0;
5420
5421	if (status != DBG_STATUS_OK)
5422		return status;
5423
5424	return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5425}
5426
5427enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
5428				      struct qed_ptt *p_ptt,
5429				      u32 *dump_buf,
5430				      u32 buf_size_in_dwords,
5431				      u32 *num_dumped_dwords)
5432{
5433	u32 needed_buf_size_in_dwords;
5434	enum dbg_status status;
5435
5436	*num_dumped_dwords = 0;
5437
5438	status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
5439						    p_ptt,
5440						    &needed_buf_size_in_dwords);
5441	if (status != DBG_STATUS_OK)
5442		return status;
5443
5444	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5445		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5446
5447	/* Update reset state */
5448	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5449
5450	status = qed_igu_fifo_dump(p_hwfn,
5451				   p_ptt, dump_buf, true, num_dumped_dwords);
5452	/* Revert GRC params to their default */
5453	qed_dbg_grc_set_params_default(p_hwfn);
5454
5455	return status;
5456}
5457
5458enum dbg_status
5459qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5460					      struct qed_ptt *p_ptt,
5461					      u32 *buf_size)
5462{
5463	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5464
5465	*buf_size = 0;
5466
5467	if (status != DBG_STATUS_OK)
5468		return status;
5469
5470	return qed_protection_override_dump(p_hwfn,
5471					    p_ptt, NULL, false, buf_size);
5472}
5473
5474enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
5475						 struct qed_ptt *p_ptt,
5476						 u32 *dump_buf,
5477						 u32 buf_size_in_dwords,
5478						 u32 *num_dumped_dwords)
5479{
5480	u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5481	enum dbg_status status;
5482
5483	*num_dumped_dwords = 0;
5484
5485	status =
5486		qed_dbg_protection_override_get_dump_buf_size(p_hwfn,
5487							      p_ptt,
5488							      p_size);
5489	if (status != DBG_STATUS_OK)
5490		return status;
5491
5492	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5493		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5494
5495	/* Update reset state */
5496	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5497
5498	status = qed_protection_override_dump(p_hwfn,
5499					      p_ptt,
5500					      dump_buf,
5501					      true, num_dumped_dwords);
5502
5503	/* Revert GRC params to their default */
5504	qed_dbg_grc_set_params_default(p_hwfn);
5505
5506	return status;
5507}
5508
5509enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5510						     struct qed_ptt *p_ptt,
5511						     u32 *buf_size)
5512{
5513	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5514
5515	*buf_size = 0;
5516
5517	if (status != DBG_STATUS_OK)
5518		return status;
5519
5520	/* Update reset state */
5521	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5522
5523	*buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false);
5524
5525	return DBG_STATUS_OK;
5526}
5527
5528enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
5529					struct qed_ptt *p_ptt,
5530					u32 *dump_buf,
5531					u32 buf_size_in_dwords,
5532					u32 *num_dumped_dwords)
5533{
5534	u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5535	enum dbg_status status;
5536
5537	*num_dumped_dwords = 0;
5538
5539	status =
5540		qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
5541						     p_ptt,
5542						     p_size);
5543	if (status != DBG_STATUS_OK)
5544		return status;
5545
5546	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5547		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5548
5549	*num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
5550
5551	/* Revert GRC params to their default */
5552	qed_dbg_grc_set_params_default(p_hwfn);
5553
5554	return DBG_STATUS_OK;
5555}
5556
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5557enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
5558				  struct qed_ptt *p_ptt,
5559				  enum block_id block_id,
5560				  enum dbg_attn_type attn_type,
5561				  bool clear_status,
5562				  struct dbg_attn_block_result *results)
5563{
5564	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5565	u8 reg_idx, num_attn_regs, num_result_regs = 0;
5566	const struct dbg_attn_reg *attn_reg_arr;
5567
5568	if (status != DBG_STATUS_OK)
5569		return status;
5570
5571	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5572	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5573	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5574		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5575
5576	attn_reg_arr = qed_get_block_attn_regs(block_id,
 
5577					       attn_type, &num_attn_regs);
5578
5579	for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
5580		const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
5581		struct dbg_attn_reg_result *reg_result;
5582		u32 sts_addr, sts_val;
5583		u16 modes_buf_offset;
5584		bool eval_mode;
5585
5586		/* Check mode */
5587		eval_mode = GET_FIELD(reg_data->mode.data,
5588				      DBG_MODE_HDR_EVAL_MODE) > 0;
5589		modes_buf_offset = GET_FIELD(reg_data->mode.data,
5590					     DBG_MODE_HDR_MODES_BUF_OFFSET);
5591		if (eval_mode && !qed_is_mode_match(p_hwfn, &modes_buf_offset))
5592			continue;
5593
5594		/* Mode match - read attention status register */
5595		sts_addr = DWORDS_TO_BYTES(clear_status ?
5596					   reg_data->sts_clr_address :
5597					   GET_FIELD(reg_data->data,
5598						     DBG_ATTN_REG_STS_ADDRESS));
5599		sts_val = qed_rd(p_hwfn, p_ptt, sts_addr);
5600		if (!sts_val)
5601			continue;
5602
5603		/* Non-zero attention status - add to results */
5604		reg_result = &results->reg_results[num_result_regs];
5605		SET_FIELD(reg_result->data,
5606			  DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr);
5607		SET_FIELD(reg_result->data,
5608			  DBG_ATTN_REG_RESULT_NUM_REG_ATTN,
5609			  GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN));
5610		reg_result->block_attn_offset = reg_data->block_attn_offset;
5611		reg_result->sts_val = sts_val;
5612		reg_result->mask_val = qed_rd(p_hwfn,
5613					      p_ptt,
5614					      DWORDS_TO_BYTES
5615					      (reg_data->mask_address));
5616		num_result_regs++;
5617	}
5618
5619	results->block_id = (u8)block_id;
5620	results->names_offset =
5621	    qed_get_block_attn_data(block_id, attn_type)->names_offset;
5622	SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
5623	SET_FIELD(results->data,
5624		  DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
5625
5626	return DBG_STATUS_OK;
5627}
5628
5629/******************************* Data Types **********************************/
5630
5631struct block_info {
5632	const char *name;
5633	enum block_id id;
5634};
5635
5636/* REG fifo element */
5637struct reg_fifo_element {
5638	u64 data;
5639#define REG_FIFO_ELEMENT_ADDRESS_SHIFT		0
5640#define REG_FIFO_ELEMENT_ADDRESS_MASK		0x7fffff
5641#define REG_FIFO_ELEMENT_ACCESS_SHIFT		23
5642#define REG_FIFO_ELEMENT_ACCESS_MASK		0x1
5643#define REG_FIFO_ELEMENT_PF_SHIFT		24
5644#define REG_FIFO_ELEMENT_PF_MASK		0xf
5645#define REG_FIFO_ELEMENT_VF_SHIFT		28
5646#define REG_FIFO_ELEMENT_VF_MASK		0xff
5647#define REG_FIFO_ELEMENT_PORT_SHIFT		36
5648#define REG_FIFO_ELEMENT_PORT_MASK		0x3
5649#define REG_FIFO_ELEMENT_PRIVILEGE_SHIFT	38
5650#define REG_FIFO_ELEMENT_PRIVILEGE_MASK		0x3
5651#define REG_FIFO_ELEMENT_PROTECTION_SHIFT	40
5652#define REG_FIFO_ELEMENT_PROTECTION_MASK	0x7
5653#define REG_FIFO_ELEMENT_MASTER_SHIFT		43
5654#define REG_FIFO_ELEMENT_MASTER_MASK		0xf
5655#define REG_FIFO_ELEMENT_ERROR_SHIFT		47
5656#define REG_FIFO_ELEMENT_ERROR_MASK		0x1f
5657};
5658
 
 
 
 
 
 
5659/* IGU fifo element */
5660struct igu_fifo_element {
5661	u32 dword0;
5662#define IGU_FIFO_ELEMENT_DWORD0_FID_SHIFT		0
5663#define IGU_FIFO_ELEMENT_DWORD0_FID_MASK		0xff
5664#define IGU_FIFO_ELEMENT_DWORD0_IS_PF_SHIFT		8
5665#define IGU_FIFO_ELEMENT_DWORD0_IS_PF_MASK		0x1
5666#define IGU_FIFO_ELEMENT_DWORD0_SOURCE_SHIFT		9
5667#define IGU_FIFO_ELEMENT_DWORD0_SOURCE_MASK		0xf
5668#define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_SHIFT		13
5669#define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_MASK		0xf
5670#define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_SHIFT		17
5671#define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_MASK		0x7fff
5672	u32 dword1;
5673	u32 dword2;
5674#define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_SHIFT	0
5675#define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_MASK		0x1
5676#define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_SHIFT		1
5677#define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_MASK		0xffffffff
5678	u32 reserved;
5679};
5680
5681struct igu_fifo_wr_data {
5682	u32 data;
5683#define IGU_FIFO_WR_DATA_PROD_CONS_SHIFT		0
5684#define IGU_FIFO_WR_DATA_PROD_CONS_MASK			0xffffff
5685#define IGU_FIFO_WR_DATA_UPDATE_FLAG_SHIFT		24
5686#define IGU_FIFO_WR_DATA_UPDATE_FLAG_MASK		0x1
5687#define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_SHIFT	25
5688#define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_MASK		0x3
5689#define IGU_FIFO_WR_DATA_SEGMENT_SHIFT			27
5690#define IGU_FIFO_WR_DATA_SEGMENT_MASK			0x1
5691#define IGU_FIFO_WR_DATA_TIMER_MASK_SHIFT		28
5692#define IGU_FIFO_WR_DATA_TIMER_MASK_MASK		0x1
5693#define IGU_FIFO_WR_DATA_CMD_TYPE_SHIFT			31
5694#define IGU_FIFO_WR_DATA_CMD_TYPE_MASK			0x1
5695};
5696
5697struct igu_fifo_cleanup_wr_data {
5698	u32 data;
5699#define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_SHIFT		0
5700#define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_MASK		0x7ffffff
5701#define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_SHIFT	27
5702#define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_MASK	0x1
5703#define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_SHIFT	28
5704#define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_MASK	0x7
5705#define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_SHIFT		31
5706#define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_MASK		0x1
5707};
5708
5709/* Protection override element */
5710struct protection_override_element {
5711	u64 data;
5712#define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_SHIFT		0
5713#define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_MASK		0x7fffff
5714#define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_SHIFT		23
5715#define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_MASK		0xffffff
5716#define PROTECTION_OVERRIDE_ELEMENT_READ_SHIFT			47
5717#define PROTECTION_OVERRIDE_ELEMENT_READ_MASK			0x1
5718#define PROTECTION_OVERRIDE_ELEMENT_WRITE_SHIFT			48
5719#define PROTECTION_OVERRIDE_ELEMENT_WRITE_MASK			0x1
5720#define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_SHIFT	49
5721#define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_MASK	0x7
5722#define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_SHIFT	52
5723#define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_MASK	0x7
5724};
5725
5726enum igu_fifo_sources {
5727	IGU_SRC_PXP0,
5728	IGU_SRC_PXP1,
5729	IGU_SRC_PXP2,
5730	IGU_SRC_PXP3,
5731	IGU_SRC_PXP4,
5732	IGU_SRC_PXP5,
5733	IGU_SRC_PXP6,
5734	IGU_SRC_PXP7,
5735	IGU_SRC_CAU,
5736	IGU_SRC_ATTN,
5737	IGU_SRC_GRC
5738};
5739
5740enum igu_fifo_addr_types {
5741	IGU_ADDR_TYPE_MSIX_MEM,
5742	IGU_ADDR_TYPE_WRITE_PBA,
5743	IGU_ADDR_TYPE_WRITE_INT_ACK,
5744	IGU_ADDR_TYPE_WRITE_ATTN_BITS,
5745	IGU_ADDR_TYPE_READ_INT,
5746	IGU_ADDR_TYPE_WRITE_PROD_UPDATE,
5747	IGU_ADDR_TYPE_RESERVED
5748};
5749
5750struct igu_fifo_addr_data {
5751	u16 start_addr;
5752	u16 end_addr;
5753	char *desc;
5754	char *vf_desc;
5755	enum igu_fifo_addr_types type;
5756};
5757
5758struct mcp_trace_meta {
5759	u32 modules_num;
5760	char **modules;
5761	u32 formats_num;
5762	struct mcp_trace_format *formats;
5763	bool is_allocated;
5764};
5765
5766/* Debug Tools user data */
5767struct dbg_tools_user_data {
5768	struct mcp_trace_meta mcp_trace_meta;
5769	const u32 *mcp_trace_user_meta_buf;
5770};
5771
5772/******************************** Constants **********************************/
5773
5774#define MAX_MSG_LEN				1024
5775
5776#define MCP_TRACE_MAX_MODULE_LEN		8
5777#define MCP_TRACE_FORMAT_MAX_PARAMS		3
5778#define MCP_TRACE_FORMAT_PARAM_WIDTH \
5779	(MCP_TRACE_FORMAT_P2_SIZE_SHIFT - MCP_TRACE_FORMAT_P1_SIZE_SHIFT)
5780
5781#define REG_FIFO_ELEMENT_ADDR_FACTOR		4
5782#define REG_FIFO_ELEMENT_IS_PF_VF_VAL		127
5783
5784#define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR	4
5785
5786/***************************** Constant Arrays *******************************/
5787
5788struct user_dbg_array {
5789	const u32 *ptr;
5790	u32 size_in_dwords;
5791};
5792
5793/* Debug arrays */
5794static struct user_dbg_array
5795s_user_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
5796
5797/* Block names array */
5798static struct block_info s_block_info_arr[] = {
5799	{"grc", BLOCK_GRC},
5800	{"miscs", BLOCK_MISCS},
5801	{"misc", BLOCK_MISC},
5802	{"dbu", BLOCK_DBU},
5803	{"pglue_b", BLOCK_PGLUE_B},
5804	{"cnig", BLOCK_CNIG},
5805	{"cpmu", BLOCK_CPMU},
5806	{"ncsi", BLOCK_NCSI},
5807	{"opte", BLOCK_OPTE},
5808	{"bmb", BLOCK_BMB},
5809	{"pcie", BLOCK_PCIE},
5810	{"mcp", BLOCK_MCP},
5811	{"mcp2", BLOCK_MCP2},
5812	{"pswhst", BLOCK_PSWHST},
5813	{"pswhst2", BLOCK_PSWHST2},
5814	{"pswrd", BLOCK_PSWRD},
5815	{"pswrd2", BLOCK_PSWRD2},
5816	{"pswwr", BLOCK_PSWWR},
5817	{"pswwr2", BLOCK_PSWWR2},
5818	{"pswrq", BLOCK_PSWRQ},
5819	{"pswrq2", BLOCK_PSWRQ2},
5820	{"pglcs", BLOCK_PGLCS},
5821	{"ptu", BLOCK_PTU},
5822	{"dmae", BLOCK_DMAE},
5823	{"tcm", BLOCK_TCM},
5824	{"mcm", BLOCK_MCM},
5825	{"ucm", BLOCK_UCM},
5826	{"xcm", BLOCK_XCM},
5827	{"ycm", BLOCK_YCM},
5828	{"pcm", BLOCK_PCM},
5829	{"qm", BLOCK_QM},
5830	{"tm", BLOCK_TM},
5831	{"dorq", BLOCK_DORQ},
5832	{"brb", BLOCK_BRB},
5833	{"src", BLOCK_SRC},
5834	{"prs", BLOCK_PRS},
5835	{"tsdm", BLOCK_TSDM},
5836	{"msdm", BLOCK_MSDM},
5837	{"usdm", BLOCK_USDM},
5838	{"xsdm", BLOCK_XSDM},
5839	{"ysdm", BLOCK_YSDM},
5840	{"psdm", BLOCK_PSDM},
5841	{"tsem", BLOCK_TSEM},
5842	{"msem", BLOCK_MSEM},
5843	{"usem", BLOCK_USEM},
5844	{"xsem", BLOCK_XSEM},
5845	{"ysem", BLOCK_YSEM},
5846	{"psem", BLOCK_PSEM},
5847	{"rss", BLOCK_RSS},
5848	{"tmld", BLOCK_TMLD},
5849	{"muld", BLOCK_MULD},
5850	{"yuld", BLOCK_YULD},
5851	{"xyld", BLOCK_XYLD},
5852	{"ptld", BLOCK_PTLD},
5853	{"ypld", BLOCK_YPLD},
5854	{"prm", BLOCK_PRM},
5855	{"pbf_pb1", BLOCK_PBF_PB1},
5856	{"pbf_pb2", BLOCK_PBF_PB2},
5857	{"rpb", BLOCK_RPB},
5858	{"btb", BLOCK_BTB},
5859	{"pbf", BLOCK_PBF},
5860	{"rdif", BLOCK_RDIF},
5861	{"tdif", BLOCK_TDIF},
5862	{"cdu", BLOCK_CDU},
5863	{"ccfc", BLOCK_CCFC},
5864	{"tcfc", BLOCK_TCFC},
5865	{"igu", BLOCK_IGU},
5866	{"cau", BLOCK_CAU},
5867	{"rgfs", BLOCK_RGFS},
5868	{"rgsrc", BLOCK_RGSRC},
5869	{"tgfs", BLOCK_TGFS},
5870	{"tgsrc", BLOCK_TGSRC},
5871	{"umac", BLOCK_UMAC},
5872	{"xmac", BLOCK_XMAC},
5873	{"dbg", BLOCK_DBG},
5874	{"nig", BLOCK_NIG},
5875	{"wol", BLOCK_WOL},
5876	{"bmbn", BLOCK_BMBN},
5877	{"ipc", BLOCK_IPC},
5878	{"nwm", BLOCK_NWM},
5879	{"nws", BLOCK_NWS},
5880	{"ms", BLOCK_MS},
5881	{"phy_pcie", BLOCK_PHY_PCIE},
5882	{"led", BLOCK_LED},
5883	{"avs_wrap", BLOCK_AVS_WRAP},
5884	{"pxpreqbus", BLOCK_PXPREQBUS},
5885	{"misc_aeu", BLOCK_MISC_AEU},
5886	{"bar0_map", BLOCK_BAR0_MAP}
5887};
5888
5889/* Status string array */
5890static const char * const s_status_str[] = {
5891	/* DBG_STATUS_OK */
5892	"Operation completed successfully",
5893
5894	/* DBG_STATUS_APP_VERSION_NOT_SET */
5895	"Debug application version wasn't set",
5896
5897	/* DBG_STATUS_UNSUPPORTED_APP_VERSION */
5898	"Unsupported debug application version",
5899
5900	/* DBG_STATUS_DBG_BLOCK_NOT_RESET */
5901	"The debug block wasn't reset since the last recording",
5902
5903	/* DBG_STATUS_INVALID_ARGS */
5904	"Invalid arguments",
5905
5906	/* DBG_STATUS_OUTPUT_ALREADY_SET */
5907	"The debug output was already set",
5908
5909	/* DBG_STATUS_INVALID_PCI_BUF_SIZE */
5910	"Invalid PCI buffer size",
5911
5912	/* DBG_STATUS_PCI_BUF_ALLOC_FAILED */
5913	"PCI buffer allocation failed",
5914
5915	/* DBG_STATUS_PCI_BUF_NOT_ALLOCATED */
5916	"A PCI buffer wasn't allocated",
5917
5918	/* DBG_STATUS_TOO_MANY_INPUTS */
5919	"Too many inputs were enabled. Enabled less inputs, or set 'unifyInputs' to true",
5920
5921	/* DBG_STATUS_INPUT_OVERLAP */
5922	"Overlapping debug bus inputs",
5923
5924	/* DBG_STATUS_HW_ONLY_RECORDING */
5925	"Cannot record Storm data since the entire recording cycle is used by HW",
5926
5927	/* DBG_STATUS_STORM_ALREADY_ENABLED */
5928	"The Storm was already enabled",
5929
5930	/* DBG_STATUS_STORM_NOT_ENABLED */
5931	"The specified Storm wasn't enabled",
5932
5933	/* DBG_STATUS_BLOCK_ALREADY_ENABLED */
5934	"The block was already enabled",
5935
5936	/* DBG_STATUS_BLOCK_NOT_ENABLED */
5937	"The specified block wasn't enabled",
5938
5939	/* DBG_STATUS_NO_INPUT_ENABLED */
5940	"No input was enabled for recording",
5941
5942	/* DBG_STATUS_NO_FILTER_TRIGGER_64B */
5943	"Filters and triggers are not allowed when recording in 64b units",
5944
5945	/* DBG_STATUS_FILTER_ALREADY_ENABLED */
5946	"The filter was already enabled",
5947
5948	/* DBG_STATUS_TRIGGER_ALREADY_ENABLED */
5949	"The trigger was already enabled",
5950
5951	/* DBG_STATUS_TRIGGER_NOT_ENABLED */
5952	"The trigger wasn't enabled",
5953
5954	/* DBG_STATUS_CANT_ADD_CONSTRAINT */
5955	"A constraint can be added only after a filter was enabled or a trigger state was added",
5956
5957	/* DBG_STATUS_TOO_MANY_TRIGGER_STATES */
5958	"Cannot add more than 3 trigger states",
5959
5960	/* DBG_STATUS_TOO_MANY_CONSTRAINTS */
5961	"Cannot add more than 4 constraints per filter or trigger state",
5962
5963	/* DBG_STATUS_RECORDING_NOT_STARTED */
5964	"The recording wasn't started",
5965
5966	/* DBG_STATUS_DATA_DIDNT_TRIGGER */
5967	"A trigger was configured, but it didn't trigger",
5968
5969	/* DBG_STATUS_NO_DATA_RECORDED */
5970	"No data was recorded",
5971
5972	/* DBG_STATUS_DUMP_BUF_TOO_SMALL */
5973	"Dump buffer is too small",
5974
5975	/* DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED */
5976	"Dumped data is not aligned to chunks",
5977
5978	/* DBG_STATUS_UNKNOWN_CHIP */
5979	"Unknown chip",
5980
5981	/* DBG_STATUS_VIRT_MEM_ALLOC_FAILED */
5982	"Failed allocating virtual memory",
5983
5984	/* DBG_STATUS_BLOCK_IN_RESET */
5985	"The input block is in reset",
5986
5987	/* DBG_STATUS_INVALID_TRACE_SIGNATURE */
5988	"Invalid MCP trace signature found in NVRAM",
5989
5990	/* DBG_STATUS_INVALID_NVRAM_BUNDLE */
5991	"Invalid bundle ID found in NVRAM",
5992
5993	/* DBG_STATUS_NVRAM_GET_IMAGE_FAILED */
5994	"Failed getting NVRAM image",
5995
5996	/* DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE */
5997	"NVRAM image is not dword-aligned",
5998
5999	/* DBG_STATUS_NVRAM_READ_FAILED */
6000	"Failed reading from NVRAM",
6001
6002	/* DBG_STATUS_IDLE_CHK_PARSE_FAILED */
6003	"Idle check parsing failed",
6004
6005	/* DBG_STATUS_MCP_TRACE_BAD_DATA */
6006	"MCP Trace data is corrupt",
6007
6008	/* DBG_STATUS_MCP_TRACE_NO_META */
6009	"Dump doesn't contain meta data - it must be provided in image file",
6010
6011	/* DBG_STATUS_MCP_COULD_NOT_HALT */
6012	"Failed to halt MCP",
6013
6014	/* DBG_STATUS_MCP_COULD_NOT_RESUME */
6015	"Failed to resume MCP after halt",
6016
6017	/* DBG_STATUS_RESERVED2 */
6018	"Reserved debug status - shouldn't be returned",
6019
6020	/* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */
6021	"Failed to empty SEMI sync FIFO",
6022
6023	/* DBG_STATUS_IGU_FIFO_BAD_DATA */
6024	"IGU FIFO data is corrupt",
6025
6026	/* DBG_STATUS_MCP_COULD_NOT_MASK_PRTY */
6027	"MCP failed to mask parities",
6028
6029	/* DBG_STATUS_FW_ASSERTS_PARSE_FAILED */
6030	"FW Asserts parsing failed",
6031
6032	/* DBG_STATUS_REG_FIFO_BAD_DATA */
6033	"GRC FIFO data is corrupt",
6034
6035	/* DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA */
6036	"Protection Override data is corrupt",
6037
6038	/* DBG_STATUS_DBG_ARRAY_NOT_SET */
6039	"Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
6040
6041	/* DBG_STATUS_FILTER_BUG */
6042	"Debug Bus filtering requires the -unifyInputs option (due to a HW bug)",
6043
6044	/* DBG_STATUS_NON_MATCHING_LINES */
6045	"Non-matching debug lines - all lines must be of the same type (either 128b or 256b)",
6046
6047	/* DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET */
6048	"The selected trigger dword offset wasn't enabled in the recorded HW block",
6049
6050	/* DBG_STATUS_DBG_BUS_IN_USE */
6051	"The debug bus is in use"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6052};
6053
6054/* Idle check severity names array */
6055static const char * const s_idle_chk_severity_str[] = {
6056	"Error",
6057	"Error if no traffic",
6058	"Warning"
6059};
6060
6061/* MCP Trace level names array */
6062static const char * const s_mcp_trace_level_str[] = {
6063	"ERROR",
6064	"TRACE",
6065	"DEBUG"
6066};
6067
6068/* Access type names array */
6069static const char * const s_access_strs[] = {
6070	"read",
6071	"write"
6072};
6073
6074/* Privilege type names array */
6075static const char * const s_privilege_strs[] = {
6076	"VF",
6077	"PDA",
6078	"HV",
6079	"UA"
6080};
6081
6082/* Protection type names array */
6083static const char * const s_protection_strs[] = {
6084	"(default)",
6085	"(default)",
6086	"(default)",
6087	"(default)",
6088	"override VF",
6089	"override PDA",
6090	"override HV",
6091	"override UA"
6092};
6093
6094/* Master type names array */
6095static const char * const s_master_strs[] = {
6096	"???",
6097	"pxp",
6098	"mcp",
6099	"msdm",
6100	"psdm",
6101	"ysdm",
6102	"usdm",
6103	"tsdm",
6104	"xsdm",
6105	"dbu",
6106	"dmae",
6107	"???",
6108	"???",
6109	"???",
6110	"???",
6111	"???"
6112};
6113
6114/* REG FIFO error messages array */
6115static const char * const s_reg_fifo_error_strs[] = {
6116	"grc timeout",
6117	"address doesn't belong to any block",
6118	"reserved address in block or write to read-only address",
6119	"privilege/protection mismatch",
6120	"path isolation error"
 
6121};
6122
6123/* IGU FIFO sources array */
6124static const char * const s_igu_fifo_source_strs[] = {
6125	"TSTORM",
6126	"MSTORM",
6127	"USTORM",
6128	"XSTORM",
6129	"YSTORM",
6130	"PSTORM",
6131	"PCIE",
6132	"NIG_QM_PBF",
6133	"CAU",
6134	"ATTN",
6135	"GRC",
6136};
6137
6138/* IGU FIFO error messages */
6139static const char * const s_igu_fifo_error_strs[] = {
6140	"no error",
6141	"length error",
6142	"function disabled",
6143	"VF sent command to attention address",
6144	"host sent prod update command",
6145	"read of during interrupt register while in MIMD mode",
6146	"access to PXP BAR reserved address",
6147	"producer update command to attention index",
6148	"unknown error",
6149	"SB index not valid",
6150	"SB relative index and FID not found",
6151	"FID not match",
6152	"command with error flag asserted (PCI error or CAU discard)",
6153	"VF sent cleanup and RF cleanup is disabled",
6154	"cleanup command on type bigger than 4"
6155};
6156
6157/* IGU FIFO address data */
6158static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
6159	{0x0, 0x101, "MSI-X Memory", NULL,
6160	 IGU_ADDR_TYPE_MSIX_MEM},
6161	{0x102, 0x1ff, "reserved", NULL,
6162	 IGU_ADDR_TYPE_RESERVED},
6163	{0x200, 0x200, "Write PBA[0:63]", NULL,
6164	 IGU_ADDR_TYPE_WRITE_PBA},
6165	{0x201, 0x201, "Write PBA[64:127]", "reserved",
6166	 IGU_ADDR_TYPE_WRITE_PBA},
6167	{0x202, 0x202, "Write PBA[128]", "reserved",
6168	 IGU_ADDR_TYPE_WRITE_PBA},
6169	{0x203, 0x3ff, "reserved", NULL,
6170	 IGU_ADDR_TYPE_RESERVED},
6171	{0x400, 0x5ef, "Write interrupt acknowledgment", NULL,
6172	 IGU_ADDR_TYPE_WRITE_INT_ACK},
6173	{0x5f0, 0x5f0, "Attention bits update", NULL,
6174	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6175	{0x5f1, 0x5f1, "Attention bits set", NULL,
6176	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6177	{0x5f2, 0x5f2, "Attention bits clear", NULL,
6178	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6179	{0x5f3, 0x5f3, "Read interrupt 0:63 with mask", NULL,
6180	 IGU_ADDR_TYPE_READ_INT},
6181	{0x5f4, 0x5f4, "Read interrupt 0:31 with mask", NULL,
6182	 IGU_ADDR_TYPE_READ_INT},
6183	{0x5f5, 0x5f5, "Read interrupt 32:63 with mask", NULL,
6184	 IGU_ADDR_TYPE_READ_INT},
6185	{0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL,
6186	 IGU_ADDR_TYPE_READ_INT},
6187	{0x5f7, 0x5ff, "reserved", NULL,
6188	 IGU_ADDR_TYPE_RESERVED},
6189	{0x600, 0x7ff, "Producer update", NULL,
6190	 IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
6191};
6192
6193/******************************** Variables **********************************/
6194
6195/* Temporary buffer, used for print size calculations */
6196static char s_temp_buf[MAX_MSG_LEN];
6197
6198/**************************** Private Functions ******************************/
6199
 
 
 
 
6200static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
6201{
6202	return (a + b) % size;
6203}
6204
6205static u32 qed_cyclic_sub(u32 a, u32 b, u32 size)
6206{
6207	return (size + a - b) % size;
6208}
6209
6210/* Reads the specified number of bytes from the specified cyclic buffer (up to 4
6211 * bytes) and returns them as a dword value. the specified buffer offset is
6212 * updated.
6213 */
6214static u32 qed_read_from_cyclic_buf(void *buf,
6215				    u32 *offset,
6216				    u32 buf_size, u8 num_bytes_to_read)
6217{
6218	u8 i, *val_ptr, *bytes_buf = (u8 *)buf;
6219	u32 val = 0;
6220
6221	val_ptr = (u8 *)&val;
6222
6223	/* Assume running on a LITTLE ENDIAN and the buffer is network order
6224	 * (BIG ENDIAN), as high order bytes are placed in lower memory address.
6225	 */
6226	for (i = 0; i < num_bytes_to_read; i++) {
6227		val_ptr[i] = bytes_buf[*offset];
6228		*offset = qed_cyclic_add(*offset, 1, buf_size);
6229	}
6230
6231	return val;
6232}
6233
6234/* Reads and returns the next byte from the specified buffer.
6235 * The specified buffer offset is updated.
6236 */
6237static u8 qed_read_byte_from_buf(void *buf, u32 *offset)
6238{
6239	return ((u8 *)buf)[(*offset)++];
6240}
6241
6242/* Reads and returns the next dword from the specified buffer.
6243 * The specified buffer offset is updated.
6244 */
6245static u32 qed_read_dword_from_buf(void *buf, u32 *offset)
6246{
6247	u32 dword_val = *(u32 *)&((u8 *)buf)[*offset];
6248
6249	*offset += 4;
6250
6251	return dword_val;
6252}
6253
6254/* Reads the next string from the specified buffer, and copies it to the
6255 * specified pointer. The specified buffer offset is updated.
6256 */
6257static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest)
6258{
6259	const char *source_str = &((const char *)buf)[*offset];
6260
6261	strncpy(dest, source_str, size);
6262	dest[size - 1] = '\0';
6263	*offset += size;
6264}
6265
6266/* Returns a pointer to the specified offset (in bytes) of the specified buffer.
6267 * If the specified buffer in NULL, a temporary buffer pointer is returned.
6268 */
6269static char *qed_get_buf_ptr(void *buf, u32 offset)
6270{
6271	return buf ? (char *)buf + offset : s_temp_buf;
6272}
6273
6274/* Reads a param from the specified buffer. Returns the number of dwords read.
6275 * If the returned str_param is NULL, the param is numeric and its value is
6276 * returned in num_param.
6277 * Otheriwise, the param is a string and its pointer is returned in str_param.
6278 */
6279static u32 qed_read_param(u32 *dump_buf,
6280			  const char **param_name,
6281			  const char **param_str_val, u32 *param_num_val)
6282{
6283	char *char_buf = (char *)dump_buf;
6284	size_t offset = 0;
6285
6286	/* Extract param name */
6287	*param_name = char_buf;
6288	offset += strlen(*param_name) + 1;
6289
6290	/* Check param type */
6291	if (*(char_buf + offset++)) {
6292		/* String param */
6293		*param_str_val = char_buf + offset;
6294		*param_num_val = 0;
6295		offset += strlen(*param_str_val) + 1;
6296		if (offset & 0x3)
6297			offset += (4 - (offset & 0x3));
6298	} else {
6299		/* Numeric param */
6300		*param_str_val = NULL;
6301		if (offset & 0x3)
6302			offset += (4 - (offset & 0x3));
6303		*param_num_val = *(u32 *)(char_buf + offset);
6304		offset += 4;
6305	}
6306
6307	return (u32)offset / 4;
6308}
6309
6310/* Reads a section header from the specified buffer.
6311 * Returns the number of dwords read.
6312 */
6313static u32 qed_read_section_hdr(u32 *dump_buf,
6314				const char **section_name,
6315				u32 *num_section_params)
6316{
6317	const char *param_str_val;
6318
6319	return qed_read_param(dump_buf,
6320			      section_name, &param_str_val, num_section_params);
6321}
6322
6323/* Reads section params from the specified buffer and prints them to the results
6324 * buffer. Returns the number of dwords read.
6325 */
6326static u32 qed_print_section_params(u32 *dump_buf,
6327				    u32 num_section_params,
6328				    char *results_buf, u32 *num_chars_printed)
6329{
6330	u32 i, dump_offset = 0, results_offset = 0;
6331
6332	for (i = 0; i < num_section_params; i++) {
6333		const char *param_name, *param_str_val;
6334		u32 param_num_val = 0;
6335
6336		dump_offset += qed_read_param(dump_buf + dump_offset,
6337					      &param_name,
6338					      &param_str_val, &param_num_val);
6339
6340		if (param_str_val)
6341			results_offset +=
6342				sprintf(qed_get_buf_ptr(results_buf,
6343							results_offset),
6344					"%s: %s\n", param_name, param_str_val);
6345		else if (strcmp(param_name, "fw-timestamp"))
6346			results_offset +=
6347				sprintf(qed_get_buf_ptr(results_buf,
6348							results_offset),
6349					"%s: %d\n", param_name, param_num_val);
6350	}
6351
6352	results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset),
6353				  "\n");
6354
6355	*num_chars_printed = results_offset;
6356
6357	return dump_offset;
6358}
6359
6360static struct dbg_tools_user_data *
6361qed_dbg_get_user_data(struct qed_hwfn *p_hwfn)
 
 
 
 
 
 
 
 
 
 
 
 
 
6362{
6363	return (struct dbg_tools_user_data *)p_hwfn->dbg_user_info;
6364}
6365
6366/* Parses the idle check rules and returns the number of characters printed.
6367 * In case of parsing error, returns 0.
6368 */
6369static u32 qed_parse_idle_chk_dump_rules(u32 *dump_buf,
 
6370					 u32 *dump_buf_end,
6371					 u32 num_rules,
6372					 bool print_fw_idle_chk,
6373					 char *results_buf,
6374					 u32 *num_errors, u32 *num_warnings)
6375{
6376	/* Offset in results_buf in bytes */
6377	u32 results_offset = 0;
6378
6379	u32 rule_idx;
6380	u16 i, j;
6381
6382	*num_errors = 0;
6383	*num_warnings = 0;
6384
6385	/* Go over dumped results */
6386	for (rule_idx = 0; rule_idx < num_rules && dump_buf < dump_buf_end;
6387	     rule_idx++) {
6388		const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data;
6389		struct dbg_idle_chk_result_hdr *hdr;
6390		const char *parsing_str, *lsi_msg;
6391		u32 parsing_str_offset;
6392		bool has_fw_msg;
6393		u8 curr_reg_id;
6394
6395		hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
6396		rule_parsing_data =
6397			(const struct dbg_idle_chk_rule_parsing_data *)
6398			&s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].
6399			ptr[hdr->rule_id];
6400		parsing_str_offset =
6401			GET_FIELD(rule_parsing_data->data,
6402				  DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
6403		has_fw_msg =
6404			GET_FIELD(rule_parsing_data->data,
6405				DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
6406		parsing_str =
6407			&((const char *)
6408			s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
6409			[parsing_str_offset];
6410		lsi_msg = parsing_str;
6411		curr_reg_id = 0;
6412
6413		if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES)
6414			return 0;
6415
6416		/* Skip rule header */
6417		dump_buf += BYTES_TO_DWORDS(sizeof(*hdr));
6418
6419		/* Update errors/warnings count */
6420		if (hdr->severity == IDLE_CHK_SEVERITY_ERROR ||
6421		    hdr->severity == IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC)
6422			(*num_errors)++;
6423		else
6424			(*num_warnings)++;
6425
6426		/* Print rule severity */
6427		results_offset +=
6428		    sprintf(qed_get_buf_ptr(results_buf,
6429					    results_offset), "%s: ",
6430			    s_idle_chk_severity_str[hdr->severity]);
6431
6432		/* Print rule message */
6433		if (has_fw_msg)
6434			parsing_str += strlen(parsing_str) + 1;
6435		results_offset +=
6436		    sprintf(qed_get_buf_ptr(results_buf,
6437					    results_offset), "%s.",
6438			    has_fw_msg &&
6439			    print_fw_idle_chk ? parsing_str : lsi_msg);
6440		parsing_str += strlen(parsing_str) + 1;
6441
6442		/* Print register values */
6443		results_offset +=
6444		    sprintf(qed_get_buf_ptr(results_buf,
6445					    results_offset), " Registers:");
6446		for (i = 0;
6447		     i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs;
6448		     i++) {
6449			struct dbg_idle_chk_result_reg_hdr *reg_hdr;
6450			bool is_mem;
6451			u8 reg_id;
6452
6453			reg_hdr =
6454				(struct dbg_idle_chk_result_reg_hdr *)dump_buf;
6455			is_mem = GET_FIELD(reg_hdr->data,
6456					   DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
6457			reg_id = GET_FIELD(reg_hdr->data,
6458					   DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
6459
6460			/* Skip reg header */
6461			dump_buf += BYTES_TO_DWORDS(sizeof(*reg_hdr));
6462
6463			/* Skip register names until the required reg_id is
6464			 * reached.
6465			 */
6466			for (; reg_id > curr_reg_id;
6467			     curr_reg_id++,
6468			     parsing_str += strlen(parsing_str) + 1);
6469
6470			results_offset +=
6471			    sprintf(qed_get_buf_ptr(results_buf,
6472						    results_offset), " %s",
6473				    parsing_str);
6474			if (i < hdr->num_dumped_cond_regs && is_mem)
6475				results_offset +=
6476				    sprintf(qed_get_buf_ptr(results_buf,
6477							    results_offset),
6478					    "[%d]", hdr->mem_entry_id +
6479					    reg_hdr->start_entry);
6480			results_offset +=
6481			    sprintf(qed_get_buf_ptr(results_buf,
6482						    results_offset), "=");
6483			for (j = 0; j < reg_hdr->size; j++, dump_buf++) {
6484				results_offset +=
6485				    sprintf(qed_get_buf_ptr(results_buf,
6486							    results_offset),
6487					    "0x%x", *dump_buf);
6488				if (j < reg_hdr->size - 1)
6489					results_offset +=
6490					    sprintf(qed_get_buf_ptr
6491						    (results_buf,
6492						     results_offset), ",");
6493			}
6494		}
6495
6496		results_offset +=
6497		    sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
6498	}
6499
6500	/* Check if end of dump buffer was exceeded */
6501	if (dump_buf > dump_buf_end)
6502		return 0;
6503
6504	return results_offset;
6505}
6506
6507/* Parses an idle check dump buffer.
6508 * If result_buf is not NULL, the idle check results are printed to it.
6509 * In any case, the required results buffer size is assigned to
6510 * parsed_results_bytes.
6511 * The parsing status is returned.
6512 */
6513static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf,
 
6514					       u32 num_dumped_dwords,
6515					       char *results_buf,
6516					       u32 *parsed_results_bytes,
6517					       u32 *num_errors,
6518					       u32 *num_warnings)
6519{
 
6520	const char *section_name, *param_name, *param_str_val;
6521	u32 *dump_buf_end = dump_buf + num_dumped_dwords;
6522	u32 num_section_params = 0, num_rules;
6523
6524	/* Offset in results_buf in bytes */
6525	u32 results_offset = 0;
6526
6527	*parsed_results_bytes = 0;
6528	*num_errors = 0;
6529	*num_warnings = 0;
6530
6531	if (!s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
6532	    !s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
6533		return DBG_STATUS_DBG_ARRAY_NOT_SET;
6534
6535	/* Read global_params section */
6536	dump_buf += qed_read_section_hdr(dump_buf,
6537					 &section_name, &num_section_params);
6538	if (strcmp(section_name, "global_params"))
6539		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6540
6541	/* Print global params */
6542	dump_buf += qed_print_section_params(dump_buf,
6543					     num_section_params,
6544					     results_buf, &results_offset);
6545
6546	/* Read idle_chk section */
 
 
 
 
 
6547	dump_buf += qed_read_section_hdr(dump_buf,
6548					 &section_name, &num_section_params);
6549	if (strcmp(section_name, "idle_chk") || num_section_params != 1)
 
6550		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6551	dump_buf += qed_read_param(dump_buf,
6552				   &param_name, &param_str_val, &num_rules);
6553	if (strcmp(param_name, "num_rules"))
6554		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
 
 
 
 
 
 
 
 
 
 
6555
6556	if (num_rules) {
6557		u32 rules_print_size;
6558
6559		/* Print FW output */
6560		results_offset +=
6561		    sprintf(qed_get_buf_ptr(results_buf,
6562					    results_offset),
6563			    "FW_IDLE_CHECK:\n");
6564		rules_print_size =
6565			qed_parse_idle_chk_dump_rules(dump_buf,
 
6566						      dump_buf_end,
6567						      num_rules,
6568						      true,
6569						      results_buf ?
6570						      results_buf +
6571						      results_offset :
6572						      NULL,
6573						      num_errors,
6574						      num_warnings);
6575		results_offset += rules_print_size;
6576		if (!rules_print_size)
6577			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6578
6579		/* Print LSI output */
6580		results_offset +=
6581		    sprintf(qed_get_buf_ptr(results_buf,
6582					    results_offset),
6583			    "\nLSI_IDLE_CHECK:\n");
6584		rules_print_size =
6585			qed_parse_idle_chk_dump_rules(dump_buf,
 
6586						      dump_buf_end,
6587						      num_rules,
6588						      false,
6589						      results_buf ?
6590						      results_buf +
6591						      results_offset :
6592						      NULL,
6593						      num_errors,
6594						      num_warnings);
6595		results_offset += rules_print_size;
6596		if (!rules_print_size)
6597			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6598	}
6599
6600	/* Print errors/warnings count */
6601	if (*num_errors)
6602		results_offset +=
6603		    sprintf(qed_get_buf_ptr(results_buf,
6604					    results_offset),
6605			    "\nIdle Check failed!!! (with %d errors and %d warnings)\n",
6606			    *num_errors, *num_warnings);
6607	else if (*num_warnings)
6608		results_offset +=
6609		    sprintf(qed_get_buf_ptr(results_buf,
6610					    results_offset),
6611			    "\nIdle Check completed successfully (with %d warnings)\n",
6612			    *num_warnings);
6613	else
6614		results_offset +=
6615		    sprintf(qed_get_buf_ptr(results_buf,
6616					    results_offset),
6617			    "\nIdle Check completed successfully\n");
6618
 
 
 
 
 
 
 
6619	/* Add 1 for string NULL termination */
6620	*parsed_results_bytes = results_offset + 1;
6621
6622	return DBG_STATUS_OK;
6623}
6624
6625/* Allocates and fills MCP Trace meta data based on the specified meta data
6626 * dump buffer.
6627 * Returns debug status code.
6628 */
6629static enum dbg_status
6630qed_mcp_trace_alloc_meta_data(struct qed_hwfn *p_hwfn,
6631			      const u32 *meta_buf)
6632{
6633	struct dbg_tools_user_data *dev_user_data;
6634	u32 offset = 0, signature, i;
6635	struct mcp_trace_meta *meta;
6636	u8 *meta_buf_bytes;
6637
6638	dev_user_data = qed_dbg_get_user_data(p_hwfn);
6639	meta = &dev_user_data->mcp_trace_meta;
6640	meta_buf_bytes = (u8 *)meta_buf;
6641
6642	/* Free the previous meta before loading a new one. */
6643	if (meta->is_allocated)
6644		qed_mcp_trace_free_meta_data(p_hwfn);
6645
6646	memset(meta, 0, sizeof(*meta));
6647
6648	/* Read first signature */
6649	signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6650	if (signature != NVM_MAGIC_VALUE)
6651		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6652
6653	/* Read no. of modules and allocate memory for their pointers */
6654	meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6655	meta->modules = kcalloc(meta->modules_num, sizeof(char *),
6656				GFP_KERNEL);
6657	if (!meta->modules)
6658		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6659
6660	/* Allocate and read all module strings */
6661	for (i = 0; i < meta->modules_num; i++) {
6662		u8 module_len = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6663
6664		*(meta->modules + i) = kzalloc(module_len, GFP_KERNEL);
6665		if (!(*(meta->modules + i))) {
6666			/* Update number of modules to be released */
6667			meta->modules_num = i ? i - 1 : 0;
6668			return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6669		}
6670
6671		qed_read_str_from_buf(meta_buf_bytes, &offset, module_len,
6672				      *(meta->modules + i));
6673		if (module_len > MCP_TRACE_MAX_MODULE_LEN)
6674			(*(meta->modules + i))[MCP_TRACE_MAX_MODULE_LEN] = '\0';
6675	}
6676
6677	/* Read second signature */
6678	signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6679	if (signature != NVM_MAGIC_VALUE)
6680		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6681
6682	/* Read number of formats and allocate memory for all formats */
6683	meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6684	meta->formats = kcalloc(meta->formats_num,
6685				sizeof(struct mcp_trace_format),
6686				GFP_KERNEL);
6687	if (!meta->formats)
6688		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6689
6690	/* Allocate and read all strings */
6691	for (i = 0; i < meta->formats_num; i++) {
6692		struct mcp_trace_format *format_ptr = &meta->formats[i];
6693		u8 format_len;
6694
6695		format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes,
6696							   &offset);
6697		format_len =
6698		    (format_ptr->data &
6699		     MCP_TRACE_FORMAT_LEN_MASK) >> MCP_TRACE_FORMAT_LEN_SHIFT;
6700		format_ptr->format_str = kzalloc(format_len, GFP_KERNEL);
6701		if (!format_ptr->format_str) {
6702			/* Update number of modules to be released */
6703			meta->formats_num = i ? i - 1 : 0;
6704			return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6705		}
6706
6707		qed_read_str_from_buf(meta_buf_bytes,
6708				      &offset,
6709				      format_len, format_ptr->format_str);
6710	}
6711
6712	meta->is_allocated = true;
6713	return DBG_STATUS_OK;
6714}
6715
6716/* Parses an MCP trace buffer. If result_buf is not NULL, the MCP Trace results
6717 * are printed to it. The parsing status is returned.
6718 * Arguments:
6719 * trace_buf - MCP trace cyclic buffer
6720 * trace_buf_size - MCP trace cyclic buffer size in bytes
6721 * data_offset - offset in bytes of the data to parse in the MCP trace cyclic
6722 *               buffer.
6723 * data_size - size in bytes of data to parse.
6724 * parsed_buf - destination buffer for parsed data.
6725 * parsed_results_bytes - size of parsed data in bytes.
6726 */
6727static enum dbg_status qed_parse_mcp_trace_buf(struct qed_hwfn *p_hwfn,
6728					       u8 *trace_buf,
6729					       u32 trace_buf_size,
6730					       u32 data_offset,
6731					       u32 data_size,
6732					       char *parsed_buf,
6733					       u32 *parsed_results_bytes)
6734{
6735	struct dbg_tools_user_data *dev_user_data;
6736	struct mcp_trace_meta *meta;
6737	u32 param_mask, param_shift;
6738	enum dbg_status status;
6739
6740	dev_user_data = qed_dbg_get_user_data(p_hwfn);
6741	meta = &dev_user_data->mcp_trace_meta;
6742	*parsed_results_bytes = 0;
6743
6744	if (!meta->is_allocated)
6745		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6746
6747	status = DBG_STATUS_OK;
6748
6749	while (data_size) {
6750		struct mcp_trace_format *format_ptr;
6751		u8 format_level, format_module;
6752		u32 params[3] = { 0, 0, 0 };
6753		u32 header, format_idx, i;
6754
6755		if (data_size < MFW_TRACE_ENTRY_SIZE)
6756			return DBG_STATUS_MCP_TRACE_BAD_DATA;
6757
6758		header = qed_read_from_cyclic_buf(trace_buf,
6759						  &data_offset,
6760						  trace_buf_size,
6761						  MFW_TRACE_ENTRY_SIZE);
6762		data_size -= MFW_TRACE_ENTRY_SIZE;
6763		format_idx = header & MFW_TRACE_EVENTID_MASK;
6764
6765		/* Skip message if its index doesn't exist in the meta data */
6766		if (format_idx >= meta->formats_num) {
6767			u8 format_size =
6768				(u8)((header & MFW_TRACE_PRM_SIZE_MASK) >>
6769				     MFW_TRACE_PRM_SIZE_SHIFT);
6770
6771			if (data_size < format_size)
6772				return DBG_STATUS_MCP_TRACE_BAD_DATA;
6773
6774			data_offset = qed_cyclic_add(data_offset,
6775						     format_size,
6776						     trace_buf_size);
6777			data_size -= format_size;
6778			continue;
6779		}
6780
6781		format_ptr = &meta->formats[format_idx];
6782
6783		for (i = 0,
6784		     param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK,
6785		     param_shift = MCP_TRACE_FORMAT_P1_SIZE_SHIFT;
6786		     i < MCP_TRACE_FORMAT_MAX_PARAMS;
6787		     i++,
6788		     param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
6789		     param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) {
6790			/* Extract param size (0..3) */
6791			u8 param_size = (u8)((format_ptr->data & param_mask) >>
6792					     param_shift);
6793
6794			/* If the param size is zero, there are no other
6795			 * parameters.
6796			 */
6797			if (!param_size)
6798				break;
6799
6800			/* Size is encoded using 2 bits, where 3 is used to
6801			 * encode 4.
6802			 */
6803			if (param_size == 3)
6804				param_size = 4;
6805
6806			if (data_size < param_size)
6807				return DBG_STATUS_MCP_TRACE_BAD_DATA;
6808
6809			params[i] = qed_read_from_cyclic_buf(trace_buf,
6810							     &data_offset,
6811							     trace_buf_size,
6812							     param_size);
6813			data_size -= param_size;
6814		}
6815
6816		format_level = (u8)((format_ptr->data &
6817				     MCP_TRACE_FORMAT_LEVEL_MASK) >>
6818				    MCP_TRACE_FORMAT_LEVEL_SHIFT);
6819		format_module = (u8)((format_ptr->data &
6820				      MCP_TRACE_FORMAT_MODULE_MASK) >>
6821				     MCP_TRACE_FORMAT_MODULE_SHIFT);
6822		if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str))
6823			return DBG_STATUS_MCP_TRACE_BAD_DATA;
6824
6825		/* Print current message to results buffer */
6826		*parsed_results_bytes +=
6827			sprintf(qed_get_buf_ptr(parsed_buf,
6828						*parsed_results_bytes),
6829				"%s %-8s: ",
6830				s_mcp_trace_level_str[format_level],
6831				meta->modules[format_module]);
6832		*parsed_results_bytes +=
6833		    sprintf(qed_get_buf_ptr(parsed_buf, *parsed_results_bytes),
6834			    format_ptr->format_str,
6835			    params[0], params[1], params[2]);
6836	}
6837
6838	/* Add string NULL terminator */
6839	(*parsed_results_bytes)++;
6840
6841	return status;
6842}
6843
6844/* Parses an MCP Trace dump buffer.
6845 * If result_buf is not NULL, the MCP Trace results are printed to it.
6846 * In any case, the required results buffer size is assigned to
6847 * parsed_results_bytes.
6848 * The parsing status is returned.
6849 */
6850static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
6851						u32 *dump_buf,
6852						char *results_buf,
6853						u32 *parsed_results_bytes,
6854						bool free_meta_data)
6855{
6856	const char *section_name, *param_name, *param_str_val;
6857	u32 data_size, trace_data_dwords, trace_meta_dwords;
6858	u32 offset, results_offset, results_buf_bytes;
6859	u32 param_num_val, num_section_params;
6860	struct mcp_trace *trace;
6861	enum dbg_status status;
6862	const u32 *meta_buf;
6863	u8 *trace_buf;
6864
6865	*parsed_results_bytes = 0;
6866
6867	/* Read global_params section */
6868	dump_buf += qed_read_section_hdr(dump_buf,
6869					 &section_name, &num_section_params);
6870	if (strcmp(section_name, "global_params"))
6871		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6872
6873	/* Print global params */
6874	dump_buf += qed_print_section_params(dump_buf,
6875					     num_section_params,
6876					     results_buf, &results_offset);
6877
6878	/* Read trace_data section */
6879	dump_buf += qed_read_section_hdr(dump_buf,
6880					 &section_name, &num_section_params);
6881	if (strcmp(section_name, "mcp_trace_data") || num_section_params != 1)
6882		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6883	dump_buf += qed_read_param(dump_buf,
6884				   &param_name, &param_str_val, &param_num_val);
6885	if (strcmp(param_name, "size"))
6886		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6887	trace_data_dwords = param_num_val;
6888
6889	/* Prepare trace info */
6890	trace = (struct mcp_trace *)dump_buf;
6891	if (trace->signature != MFW_TRACE_SIGNATURE || !trace->size)
6892		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6893
6894	trace_buf = (u8 *)dump_buf + sizeof(*trace);
6895	offset = trace->trace_oldest;
6896	data_size = qed_cyclic_sub(trace->trace_prod, offset, trace->size);
6897	dump_buf += trace_data_dwords;
6898
6899	/* Read meta_data section */
6900	dump_buf += qed_read_section_hdr(dump_buf,
6901					 &section_name, &num_section_params);
6902	if (strcmp(section_name, "mcp_trace_meta"))
6903		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6904	dump_buf += qed_read_param(dump_buf,
6905				   &param_name, &param_str_val, &param_num_val);
6906	if (strcmp(param_name, "size"))
6907		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6908	trace_meta_dwords = param_num_val;
6909
6910	/* Choose meta data buffer */
6911	if (!trace_meta_dwords) {
6912		/* Dump doesn't include meta data */
6913		struct dbg_tools_user_data *dev_user_data =
6914			qed_dbg_get_user_data(p_hwfn);
6915
6916		if (!dev_user_data->mcp_trace_user_meta_buf)
6917			return DBG_STATUS_MCP_TRACE_NO_META;
6918
6919		meta_buf = dev_user_data->mcp_trace_user_meta_buf;
6920	} else {
6921		/* Dump includes meta data */
6922		meta_buf = dump_buf;
6923	}
6924
6925	/* Allocate meta data memory */
6926	status = qed_mcp_trace_alloc_meta_data(p_hwfn, meta_buf);
6927	if (status != DBG_STATUS_OK)
6928		return status;
6929
6930	status = qed_parse_mcp_trace_buf(p_hwfn,
6931					 trace_buf,
6932					 trace->size,
6933					 offset,
6934					 data_size,
6935					 results_buf ?
6936					 results_buf + results_offset :
6937					 NULL,
6938					 &results_buf_bytes);
6939	if (status != DBG_STATUS_OK)
6940		return status;
6941
6942	if (free_meta_data)
6943		qed_mcp_trace_free_meta_data(p_hwfn);
6944
6945	*parsed_results_bytes = results_offset + results_buf_bytes;
6946
6947	return DBG_STATUS_OK;
6948}
6949
6950/* Parses a Reg FIFO dump buffer.
6951 * If result_buf is not NULL, the Reg FIFO results are printed to it.
6952 * In any case, the required results buffer size is assigned to
6953 * parsed_results_bytes.
6954 * The parsing status is returned.
6955 */
6956static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
6957					       char *results_buf,
6958					       u32 *parsed_results_bytes)
6959{
6960	const char *section_name, *param_name, *param_str_val;
6961	u32 param_num_val, num_section_params, num_elements;
6962	struct reg_fifo_element *elements;
6963	u8 i, j, err_val, vf_val;
6964	u32 results_offset = 0;
6965	char vf_str[4];
6966
6967	/* Read global_params section */
6968	dump_buf += qed_read_section_hdr(dump_buf,
6969					 &section_name, &num_section_params);
6970	if (strcmp(section_name, "global_params"))
6971		return DBG_STATUS_REG_FIFO_BAD_DATA;
6972
6973	/* Print global params */
6974	dump_buf += qed_print_section_params(dump_buf,
6975					     num_section_params,
6976					     results_buf, &results_offset);
6977
6978	/* Read reg_fifo_data section */
6979	dump_buf += qed_read_section_hdr(dump_buf,
6980					 &section_name, &num_section_params);
6981	if (strcmp(section_name, "reg_fifo_data"))
6982		return DBG_STATUS_REG_FIFO_BAD_DATA;
6983	dump_buf += qed_read_param(dump_buf,
6984				   &param_name, &param_str_val, &param_num_val);
6985	if (strcmp(param_name, "size"))
6986		return DBG_STATUS_REG_FIFO_BAD_DATA;
6987	if (param_num_val % REG_FIFO_ELEMENT_DWORDS)
6988		return DBG_STATUS_REG_FIFO_BAD_DATA;
6989	num_elements = param_num_val / REG_FIFO_ELEMENT_DWORDS;
6990	elements = (struct reg_fifo_element *)dump_buf;
6991
6992	/* Decode elements */
6993	for (i = 0; i < num_elements; i++) {
6994		bool err_printed = false;
6995
6996		/* Discover if element belongs to a VF or a PF */
6997		vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF);
6998		if (vf_val == REG_FIFO_ELEMENT_IS_PF_VF_VAL)
6999			sprintf(vf_str, "%s", "N/A");
7000		else
7001			sprintf(vf_str, "%d", vf_val);
7002
 
 
 
 
 
 
7003		/* Add parsed element to parsed buffer */
7004		results_offset +=
7005		    sprintf(qed_get_buf_ptr(results_buf,
7006					    results_offset),
7007			    "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
7008			    elements[i].data,
7009			    (u32)GET_FIELD(elements[i].data,
7010					   REG_FIFO_ELEMENT_ADDRESS) *
7011			    REG_FIFO_ELEMENT_ADDR_FACTOR,
7012			    s_access_strs[GET_FIELD(elements[i].data,
7013						    REG_FIFO_ELEMENT_ACCESS)],
7014			    (u32)GET_FIELD(elements[i].data,
7015					   REG_FIFO_ELEMENT_PF),
7016			    vf_str,
7017			    (u32)GET_FIELD(elements[i].data,
7018					   REG_FIFO_ELEMENT_PORT),
7019			    s_privilege_strs[GET_FIELD(elements[i].data,
7020						REG_FIFO_ELEMENT_PRIVILEGE)],
7021			    s_protection_strs[GET_FIELD(elements[i].data,
7022						REG_FIFO_ELEMENT_PROTECTION)],
7023			    s_master_strs[GET_FIELD(elements[i].data,
7024						REG_FIFO_ELEMENT_MASTER)]);
7025
7026		/* Print errors */
7027		for (j = 0,
7028		     err_val = GET_FIELD(elements[i].data,
7029					 REG_FIFO_ELEMENT_ERROR);
7030		     j < ARRAY_SIZE(s_reg_fifo_error_strs);
7031		     j++, err_val >>= 1) {
7032			if (err_val & 0x1) {
7033				if (err_printed)
7034					results_offset +=
7035					    sprintf(qed_get_buf_ptr
7036						    (results_buf,
7037						     results_offset), ", ");
7038				results_offset +=
7039				    sprintf(qed_get_buf_ptr
7040					    (results_buf, results_offset), "%s",
7041					    s_reg_fifo_error_strs[j]);
7042				err_printed = true;
7043			}
7044		}
7045
7046		results_offset +=
7047		    sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
7048	}
7049
7050	results_offset += sprintf(qed_get_buf_ptr(results_buf,
7051						  results_offset),
7052				  "fifo contained %d elements", num_elements);
7053
7054	/* Add 1 for string NULL termination */
7055	*parsed_results_bytes = results_offset + 1;
7056
7057	return DBG_STATUS_OK;
7058}
7059
7060static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
7061						  *element, char
7062						  *results_buf,
7063						  u32 *results_offset)
7064{
7065	const struct igu_fifo_addr_data *found_addr = NULL;
7066	u8 source, err_type, i, is_cleanup;
7067	char parsed_addr_data[32];
7068	char parsed_wr_data[256];
7069	u32 wr_data, prod_cons;
7070	bool is_wr_cmd, is_pf;
7071	u16 cmd_addr;
7072	u64 dword12;
7073
7074	/* Dword12 (dword index 1 and 2) contains bits 32..95 of the
7075	 * FIFO element.
7076	 */
7077	dword12 = ((u64)element->dword2 << 32) | element->dword1;
7078	is_wr_cmd = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
7079	is_pf = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_IS_PF);
7080	cmd_addr = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
7081	source = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_SOURCE);
7082	err_type = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
7083
7084	if (source >= ARRAY_SIZE(s_igu_fifo_source_strs))
7085		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7086	if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs))
7087		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7088
7089	/* Find address data */
7090	for (i = 0; i < ARRAY_SIZE(s_igu_fifo_addr_data) && !found_addr; i++) {
7091		const struct igu_fifo_addr_data *curr_addr =
7092			&s_igu_fifo_addr_data[i];
7093
7094		if (cmd_addr >= curr_addr->start_addr && cmd_addr <=
7095		    curr_addr->end_addr)
7096			found_addr = curr_addr;
7097	}
7098
7099	if (!found_addr)
7100		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7101
7102	/* Prepare parsed address data */
7103	switch (found_addr->type) {
7104	case IGU_ADDR_TYPE_MSIX_MEM:
7105		sprintf(parsed_addr_data, " vector_num = 0x%x", cmd_addr / 2);
7106		break;
7107	case IGU_ADDR_TYPE_WRITE_INT_ACK:
7108	case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
7109		sprintf(parsed_addr_data,
7110			" SB = 0x%x", cmd_addr - found_addr->start_addr);
7111		break;
7112	default:
7113		parsed_addr_data[0] = '\0';
7114	}
7115
7116	if (!is_wr_cmd) {
7117		parsed_wr_data[0] = '\0';
7118		goto out;
7119	}
7120
7121	/* Prepare parsed write data */
7122	wr_data = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
7123	prod_cons = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_PROD_CONS);
7124	is_cleanup = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_CMD_TYPE);
7125
7126	if (source == IGU_SRC_ATTN) {
7127		sprintf(parsed_wr_data, "prod: 0x%x, ", prod_cons);
7128	} else {
7129		if (is_cleanup) {
7130			u8 cleanup_val, cleanup_type;
7131
7132			cleanup_val =
7133				GET_FIELD(wr_data,
7134					  IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
7135			cleanup_type =
7136			    GET_FIELD(wr_data,
7137				      IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
7138
7139			sprintf(parsed_wr_data,
7140				"cmd_type: cleanup, cleanup_val: %s, cleanup_type : %d, ",
7141				cleanup_val ? "set" : "clear",
7142				cleanup_type);
7143		} else {
7144			u8 update_flag, en_dis_int_for_sb, segment;
7145			u8 timer_mask;
7146
7147			update_flag = GET_FIELD(wr_data,
7148						IGU_FIFO_WR_DATA_UPDATE_FLAG);
7149			en_dis_int_for_sb =
7150				GET_FIELD(wr_data,
7151					  IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
7152			segment = GET_FIELD(wr_data,
7153					    IGU_FIFO_WR_DATA_SEGMENT);
7154			timer_mask = GET_FIELD(wr_data,
7155					       IGU_FIFO_WR_DATA_TIMER_MASK);
7156
7157			sprintf(parsed_wr_data,
7158				"cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ",
7159				prod_cons,
7160				update_flag ? "update" : "nop",
7161				en_dis_int_for_sb ?
7162				(en_dis_int_for_sb == 1 ? "disable" : "nop") :
7163				"enable",
7164				segment ? "attn" : "regular",
7165				timer_mask);
7166		}
7167	}
7168out:
7169	/* Add parsed element to parsed buffer */
7170	*results_offset += sprintf(qed_get_buf_ptr(results_buf,
7171						   *results_offset),
7172				   "raw: 0x%01x%08x%08x, %s: %d, source : %s, type : %s, cmd_addr : 0x%x(%s%s), %serror: %s\n",
7173				   element->dword2, element->dword1,
7174				   element->dword0,
7175				   is_pf ? "pf" : "vf",
7176				   GET_FIELD(element->dword0,
7177					     IGU_FIFO_ELEMENT_DWORD0_FID),
7178				   s_igu_fifo_source_strs[source],
7179				   is_wr_cmd ? "wr" : "rd",
7180				   cmd_addr,
7181				   (!is_pf && found_addr->vf_desc)
7182				   ? found_addr->vf_desc
7183				   : found_addr->desc,
7184				   parsed_addr_data,
7185				   parsed_wr_data,
7186				   s_igu_fifo_error_strs[err_type]);
7187
7188	return DBG_STATUS_OK;
7189}
7190
7191/* Parses an IGU FIFO dump buffer.
7192 * If result_buf is not NULL, the IGU FIFO results are printed to it.
7193 * In any case, the required results buffer size is assigned to
7194 * parsed_results_bytes.
7195 * The parsing status is returned.
7196 */
7197static enum dbg_status qed_parse_igu_fifo_dump(u32 *dump_buf,
7198					       char *results_buf,
7199					       u32 *parsed_results_bytes)
7200{
7201	const char *section_name, *param_name, *param_str_val;
7202	u32 param_num_val, num_section_params, num_elements;
7203	struct igu_fifo_element *elements;
7204	enum dbg_status status;
7205	u32 results_offset = 0;
7206	u8 i;
7207
7208	/* Read global_params section */
7209	dump_buf += qed_read_section_hdr(dump_buf,
7210					 &section_name, &num_section_params);
7211	if (strcmp(section_name, "global_params"))
7212		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7213
7214	/* Print global params */
7215	dump_buf += qed_print_section_params(dump_buf,
7216					     num_section_params,
7217					     results_buf, &results_offset);
7218
7219	/* Read igu_fifo_data section */
7220	dump_buf += qed_read_section_hdr(dump_buf,
7221					 &section_name, &num_section_params);
7222	if (strcmp(section_name, "igu_fifo_data"))
7223		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7224	dump_buf += qed_read_param(dump_buf,
7225				   &param_name, &param_str_val, &param_num_val);
7226	if (strcmp(param_name, "size"))
7227		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7228	if (param_num_val % IGU_FIFO_ELEMENT_DWORDS)
7229		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7230	num_elements = param_num_val / IGU_FIFO_ELEMENT_DWORDS;
7231	elements = (struct igu_fifo_element *)dump_buf;
7232
7233	/* Decode elements */
7234	for (i = 0; i < num_elements; i++) {
7235		status = qed_parse_igu_fifo_element(&elements[i],
7236						    results_buf,
7237						    &results_offset);
7238		if (status != DBG_STATUS_OK)
7239			return status;
7240	}
7241
7242	results_offset += sprintf(qed_get_buf_ptr(results_buf,
7243						  results_offset),
7244				  "fifo contained %d elements", num_elements);
7245
7246	/* Add 1 for string NULL termination */
7247	*parsed_results_bytes = results_offset + 1;
7248
7249	return DBG_STATUS_OK;
7250}
7251
7252static enum dbg_status
7253qed_parse_protection_override_dump(u32 *dump_buf,
7254				   char *results_buf,
7255				   u32 *parsed_results_bytes)
7256{
7257	const char *section_name, *param_name, *param_str_val;
7258	u32 param_num_val, num_section_params, num_elements;
7259	struct protection_override_element *elements;
7260	u32 results_offset = 0;
7261	u8 i;
7262
7263	/* Read global_params section */
7264	dump_buf += qed_read_section_hdr(dump_buf,
7265					 &section_name, &num_section_params);
7266	if (strcmp(section_name, "global_params"))
7267		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7268
7269	/* Print global params */
7270	dump_buf += qed_print_section_params(dump_buf,
7271					     num_section_params,
7272					     results_buf, &results_offset);
7273
7274	/* Read protection_override_data section */
7275	dump_buf += qed_read_section_hdr(dump_buf,
7276					 &section_name, &num_section_params);
7277	if (strcmp(section_name, "protection_override_data"))
7278		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7279	dump_buf += qed_read_param(dump_buf,
7280				   &param_name, &param_str_val, &param_num_val);
7281	if (strcmp(param_name, "size"))
7282		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7283	if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS)
7284		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7285	num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS;
7286	elements = (struct protection_override_element *)dump_buf;
7287
7288	/* Decode elements */
7289	for (i = 0; i < num_elements; i++) {
7290		u32 address = GET_FIELD(elements[i].data,
7291					PROTECTION_OVERRIDE_ELEMENT_ADDRESS) *
7292			      PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
7293
7294		results_offset +=
7295		    sprintf(qed_get_buf_ptr(results_buf,
7296					    results_offset),
7297			    "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n",
7298			    i, address,
7299			    (u32)GET_FIELD(elements[i].data,
7300				      PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
7301			    (u32)GET_FIELD(elements[i].data,
7302				      PROTECTION_OVERRIDE_ELEMENT_READ),
7303			    (u32)GET_FIELD(elements[i].data,
7304				      PROTECTION_OVERRIDE_ELEMENT_WRITE),
7305			    s_protection_strs[GET_FIELD(elements[i].data,
7306				PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
7307			    s_protection_strs[GET_FIELD(elements[i].data,
7308				PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION)]);
7309	}
7310
7311	results_offset += sprintf(qed_get_buf_ptr(results_buf,
7312						  results_offset),
7313				  "protection override contained %d elements",
7314				  num_elements);
7315
7316	/* Add 1 for string NULL termination */
7317	*parsed_results_bytes = results_offset + 1;
7318
7319	return DBG_STATUS_OK;
7320}
7321
7322/* Parses a FW Asserts dump buffer.
7323 * If result_buf is not NULL, the FW Asserts results are printed to it.
7324 * In any case, the required results buffer size is assigned to
7325 * parsed_results_bytes.
7326 * The parsing status is returned.
7327 */
7328static enum dbg_status qed_parse_fw_asserts_dump(u32 *dump_buf,
7329						 char *results_buf,
7330						 u32 *parsed_results_bytes)
7331{
7332	u32 num_section_params, param_num_val, i, results_offset = 0;
7333	const char *param_name, *param_str_val, *section_name;
7334	bool last_section_found = false;
7335
7336	*parsed_results_bytes = 0;
7337
7338	/* Read global_params section */
7339	dump_buf += qed_read_section_hdr(dump_buf,
7340					 &section_name, &num_section_params);
7341	if (strcmp(section_name, "global_params"))
7342		return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7343
7344	/* Print global params */
7345	dump_buf += qed_print_section_params(dump_buf,
7346					     num_section_params,
7347					     results_buf, &results_offset);
7348
7349	while (!last_section_found) {
7350		dump_buf += qed_read_section_hdr(dump_buf,
7351						 &section_name,
7352						 &num_section_params);
7353		if (!strcmp(section_name, "fw_asserts")) {
7354			/* Extract params */
7355			const char *storm_letter = NULL;
7356			u32 storm_dump_size = 0;
7357
7358			for (i = 0; i < num_section_params; i++) {
7359				dump_buf += qed_read_param(dump_buf,
7360							   &param_name,
7361							   &param_str_val,
7362							   &param_num_val);
7363				if (!strcmp(param_name, "storm"))
7364					storm_letter = param_str_val;
7365				else if (!strcmp(param_name, "size"))
7366					storm_dump_size = param_num_val;
7367				else
7368					return
7369					    DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7370			}
7371
7372			if (!storm_letter || !storm_dump_size)
7373				return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7374
7375			/* Print data */
7376			results_offset +=
7377			    sprintf(qed_get_buf_ptr(results_buf,
7378						    results_offset),
7379				    "\n%sSTORM_ASSERT: size=%d\n",
7380				    storm_letter, storm_dump_size);
7381			for (i = 0; i < storm_dump_size; i++, dump_buf++)
7382				results_offset +=
7383				    sprintf(qed_get_buf_ptr(results_buf,
7384							    results_offset),
7385					    "%08x\n", *dump_buf);
7386		} else if (!strcmp(section_name, "last")) {
7387			last_section_found = true;
7388		} else {
7389			return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7390		}
7391	}
7392
7393	/* Add 1 for string NULL termination */
7394	*parsed_results_bytes = results_offset + 1;
7395
7396	return DBG_STATUS_OK;
7397}
7398
7399/***************************** Public Functions *******************************/
7400
7401enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
 
7402{
7403	struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
7404	u8 buf_id;
7405
7406	/* Convert binary data to debug arrays */
7407	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
7408		s_user_dbg_arrays[buf_id].ptr =
7409			(u32 *)(bin_ptr + buf_array[buf_id].offset);
7410		s_user_dbg_arrays[buf_id].size_in_dwords =
7411			BYTES_TO_DWORDS(buf_array[buf_id].length);
7412	}
7413
7414	return DBG_STATUS_OK;
7415}
7416
7417enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn)
 
7418{
7419	p_hwfn->dbg_user_info = kzalloc(sizeof(struct dbg_tools_user_data),
7420					GFP_KERNEL);
7421	if (!p_hwfn->dbg_user_info)
7422		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7423
7424	return DBG_STATUS_OK;
7425}
7426
7427const char *qed_dbg_get_status_str(enum dbg_status status)
7428{
7429	return (status <
7430		MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
7431}
7432
7433enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
7434						  u32 *dump_buf,
7435						  u32 num_dumped_dwords,
7436						  u32 *results_buf_size)
7437{
7438	u32 num_errors, num_warnings;
7439
7440	return qed_parse_idle_chk_dump(dump_buf,
 
7441				       num_dumped_dwords,
7442				       NULL,
7443				       results_buf_size,
7444				       &num_errors, &num_warnings);
7445}
7446
7447enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
7448					   u32 *dump_buf,
7449					   u32 num_dumped_dwords,
7450					   char *results_buf,
7451					   u32 *num_errors,
7452					   u32 *num_warnings)
7453{
7454	u32 parsed_buf_size;
7455
7456	return qed_parse_idle_chk_dump(dump_buf,
 
7457				       num_dumped_dwords,
7458				       results_buf,
7459				       &parsed_buf_size,
7460				       num_errors, num_warnings);
7461}
7462
7463void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn,
7464				     const u32 *meta_buf)
7465{
7466	struct dbg_tools_user_data *dev_user_data =
7467		qed_dbg_get_user_data(p_hwfn);
7468
7469	dev_user_data->mcp_trace_user_meta_buf = meta_buf;
7470}
7471
7472enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
7473						   u32 *dump_buf,
7474						   u32 num_dumped_dwords,
7475						   u32 *results_buf_size)
7476{
7477	return qed_parse_mcp_trace_dump(p_hwfn,
7478					dump_buf, NULL, results_buf_size, true);
7479}
7480
7481enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
7482					    u32 *dump_buf,
7483					    u32 num_dumped_dwords,
7484					    char *results_buf)
7485{
7486	u32 parsed_buf_size;
7487
 
 
 
7488	return qed_parse_mcp_trace_dump(p_hwfn,
7489					dump_buf,
7490					results_buf, &parsed_buf_size, true);
7491}
7492
7493enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
7494						 u32 *dump_buf,
7495						 char *results_buf)
7496{
7497	u32 parsed_buf_size;
7498
7499	return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, results_buf,
7500					&parsed_buf_size, false);
7501}
7502
7503enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
7504					 u8 *dump_buf,
7505					 u32 num_dumped_bytes,
7506					 char *results_buf)
7507{
7508	u32 parsed_results_bytes;
7509
7510	return qed_parse_mcp_trace_buf(p_hwfn,
7511				       dump_buf,
7512				       num_dumped_bytes,
7513				       0,
7514				       num_dumped_bytes,
7515				       results_buf, &parsed_results_bytes);
7516}
7517
7518/* Frees the specified MCP Trace meta data */
7519void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn)
7520{
7521	struct dbg_tools_user_data *dev_user_data;
7522	struct mcp_trace_meta *meta;
7523	u32 i;
7524
7525	dev_user_data = qed_dbg_get_user_data(p_hwfn);
7526	meta = &dev_user_data->mcp_trace_meta;
7527	if (!meta->is_allocated)
7528		return;
7529
7530	/* Release modules */
7531	if (meta->modules) {
7532		for (i = 0; i < meta->modules_num; i++)
7533			kfree(meta->modules[i]);
7534		kfree(meta->modules);
7535	}
7536
7537	/* Release formats */
7538	if (meta->formats) {
7539		for (i = 0; i < meta->formats_num; i++)
7540			kfree(meta->formats[i].format_str);
7541		kfree(meta->formats);
7542	}
7543
7544	meta->is_allocated = false;
7545}
7546
7547enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7548						  u32 *dump_buf,
7549						  u32 num_dumped_dwords,
7550						  u32 *results_buf_size)
7551{
7552	return qed_parse_reg_fifo_dump(dump_buf, NULL, results_buf_size);
7553}
7554
7555enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
7556					   u32 *dump_buf,
7557					   u32 num_dumped_dwords,
7558					   char *results_buf)
7559{
7560	u32 parsed_buf_size;
7561
7562	return qed_parse_reg_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7563}
7564
7565enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7566						  u32 *dump_buf,
7567						  u32 num_dumped_dwords,
7568						  u32 *results_buf_size)
7569{
7570	return qed_parse_igu_fifo_dump(dump_buf, NULL, results_buf_size);
7571}
7572
7573enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
7574					   u32 *dump_buf,
7575					   u32 num_dumped_dwords,
7576					   char *results_buf)
7577{
7578	u32 parsed_buf_size;
7579
7580	return qed_parse_igu_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7581}
7582
7583enum dbg_status
7584qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
7585					     u32 *dump_buf,
7586					     u32 num_dumped_dwords,
7587					     u32 *results_buf_size)
7588{
7589	return qed_parse_protection_override_dump(dump_buf,
7590						  NULL, results_buf_size);
7591}
7592
7593enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
7594						      u32 *dump_buf,
7595						      u32 num_dumped_dwords,
7596						      char *results_buf)
7597{
7598	u32 parsed_buf_size;
7599
7600	return qed_parse_protection_override_dump(dump_buf,
7601						  results_buf,
7602						  &parsed_buf_size);
7603}
7604
7605enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
7606						    u32 *dump_buf,
7607						    u32 num_dumped_dwords,
7608						    u32 *results_buf_size)
7609{
7610	return qed_parse_fw_asserts_dump(dump_buf, NULL, results_buf_size);
7611}
7612
7613enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
7614					     u32 *dump_buf,
7615					     u32 num_dumped_dwords,
7616					     char *results_buf)
7617{
7618	u32 parsed_buf_size;
7619
7620	return qed_parse_fw_asserts_dump(dump_buf,
7621					 results_buf, &parsed_buf_size);
7622}
7623
7624enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
7625				   struct dbg_attn_block_result *results)
7626{
7627	struct user_dbg_array *block_attn, *pstrings;
7628	const u32 *block_attn_name_offsets;
 
 
7629	enum dbg_attn_type attn_type;
7630	const char *block_name;
7631	u8 num_regs, i, j;
7632
7633	num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
7634	attn_type = (enum dbg_attn_type)
7635		    GET_FIELD(results->data,
7636			      DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
7637	block_name = s_block_info_arr[results->block_id].name;
7638
7639	if (!s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr ||
7640	    !s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr ||
7641	    !s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
7642		return DBG_STATUS_DBG_ARRAY_NOT_SET;
7643
7644	block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS];
7645	block_attn_name_offsets = &block_attn->ptr[results->names_offset];
 
 
 
7646
7647	/* Go over registers with a non-zero attention status */
7648	for (i = 0; i < num_regs; i++) {
7649		struct dbg_attn_bit_mapping *bit_mapping;
7650		struct dbg_attn_reg_result *reg_result;
7651		u8 num_reg_attn, bit_idx = 0;
7652
7653		reg_result = &results->reg_results[i];
7654		num_reg_attn = GET_FIELD(reg_result->data,
7655					 DBG_ATTN_REG_RESULT_NUM_REG_ATTN);
7656		block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES];
7657		bit_mapping = &((struct dbg_attn_bit_mapping *)
7658				block_attn->ptr)[reg_result->block_attn_offset];
7659
7660		pstrings = &s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS];
7661
7662		/* Go over attention status bits */
7663		for (j = 0; j < num_reg_attn; j++) {
7664			u16 attn_idx_val = GET_FIELD(bit_mapping[j].data,
7665						     DBG_ATTN_BIT_MAPPING_VAL);
7666			const char *attn_name, *attn_type_str, *masked_str;
7667			u32 attn_name_offset, sts_addr;
 
7668
7669			/* Check if bit mask should be advanced (due to unused
7670			 * bits).
7671			 */
7672			if (GET_FIELD(bit_mapping[j].data,
7673				      DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) {
7674				bit_idx += (u8)attn_idx_val;
7675				continue;
7676			}
7677
7678			/* Check current bit index */
7679			if (!(reg_result->sts_val & BIT(bit_idx))) {
7680				bit_idx++;
7681				continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7682			}
7683
7684			/* Find attention name */
7685			attn_name_offset =
7686				block_attn_name_offsets[attn_idx_val];
7687			attn_name = &((const char *)
7688				      pstrings->ptr)[attn_name_offset];
7689			attn_type_str = attn_type == ATTN_TYPE_INTERRUPT ?
7690					"Interrupt" : "Parity";
7691			masked_str = reg_result->mask_val & BIT(bit_idx) ?
7692				     " [masked]" : "";
7693			sts_addr = GET_FIELD(reg_result->data,
7694					     DBG_ATTN_REG_RESULT_STS_ADDRESS);
7695			DP_NOTICE(p_hwfn,
7696				  "%s (%s) : %s [address 0x%08x, bit %d]%s\n",
7697				  block_name, attn_type_str, attn_name,
7698				  sts_addr, bit_idx, masked_str);
7699
7700			bit_idx++;
7701		}
7702	}
7703
7704	return DBG_STATUS_OK;
7705}
7706
7707/* Wrapper for unifying the idle_chk and mcp_trace api */
7708static enum dbg_status
7709qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
7710				   u32 *dump_buf,
7711				   u32 num_dumped_dwords,
7712				   char *results_buf)
7713{
7714	u32 num_errors, num_warnnings;
7715
7716	return qed_print_idle_chk_results(p_hwfn, dump_buf, num_dumped_dwords,
7717					  results_buf, &num_errors,
7718					  &num_warnnings);
7719}
7720
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7721/* Feature meta data lookup table */
7722static struct {
7723	char *name;
 
7724	enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn,
7725				    struct qed_ptt *p_ptt, u32 *size);
7726	enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn,
7727					struct qed_ptt *p_ptt, u32 *dump_buf,
7728					u32 buf_size, u32 *dumped_dwords);
7729	enum dbg_status (*print_results)(struct qed_hwfn *p_hwfn,
7730					 u32 *dump_buf, u32 num_dumped_dwords,
7731					 char *results_buf);
7732	enum dbg_status (*results_buf_size)(struct qed_hwfn *p_hwfn,
7733					    u32 *dump_buf,
7734					    u32 num_dumped_dwords,
7735					    u32 *results_buf_size);
 
7736} qed_features_lookup[] = {
7737	{
7738	"grc", qed_dbg_grc_get_dump_buf_size,
7739		    qed_dbg_grc_dump, NULL, NULL}, {
7740	"idle_chk",
7741		    qed_dbg_idle_chk_get_dump_buf_size,
7742		    qed_dbg_idle_chk_dump,
7743		    qed_print_idle_chk_results_wrapper,
7744		    qed_get_idle_chk_results_buf_size}, {
7745	"mcp_trace",
 
7746		    qed_dbg_mcp_trace_get_dump_buf_size,
7747		    qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results,
7748		    qed_get_mcp_trace_results_buf_size}, {
7749	"reg_fifo",
 
7750		    qed_dbg_reg_fifo_get_dump_buf_size,
7751		    qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results,
7752		    qed_get_reg_fifo_results_buf_size}, {
7753	"igu_fifo",
 
7754		    qed_dbg_igu_fifo_get_dump_buf_size,
7755		    qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results,
7756		    qed_get_igu_fifo_results_buf_size}, {
7757	"protection_override",
 
7758		    qed_dbg_protection_override_get_dump_buf_size,
7759		    qed_dbg_protection_override_dump,
7760		    qed_print_protection_override_results,
7761		    qed_get_protection_override_results_buf_size}, {
7762	"fw_asserts",
 
7763		    qed_dbg_fw_asserts_get_dump_buf_size,
7764		    qed_dbg_fw_asserts_dump,
7765		    qed_print_fw_asserts_results,
7766		    qed_get_fw_asserts_results_buf_size},};
 
 
 
7767
7768static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
7769{
7770	u32 i, precision = 80;
7771
7772	if (!p_text_buf)
7773		return;
7774
7775	pr_notice("\n%.*s", precision, p_text_buf);
7776	for (i = precision; i < text_size; i += precision)
7777		pr_cont("%.*s", precision, p_text_buf + i);
7778	pr_cont("\n");
7779}
7780
7781#define QED_RESULTS_BUF_MIN_SIZE 16
7782/* Generic function for decoding debug feature info */
7783static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
7784				      enum qed_dbg_features feature_idx)
7785{
7786	struct qed_dbg_feature *feature =
7787	    &p_hwfn->cdev->dbg_params.features[feature_idx];
7788	u32 text_size_bytes, null_char_pos, i;
 
7789	enum dbg_status rc;
7790	char *text_buf;
7791
7792	/* Check if feature supports formatting capability */
7793	if (!qed_features_lookup[feature_idx].results_buf_size)
7794		return DBG_STATUS_OK;
7795
 
 
 
7796	/* Obtain size of formatted output */
7797	rc = qed_features_lookup[feature_idx].
7798		results_buf_size(p_hwfn, (u32 *)feature->dump_buf,
7799				 feature->dumped_dwords, &text_size_bytes);
 
7800	if (rc != DBG_STATUS_OK)
7801		return rc;
7802
7803	/* Make sure that the allocated size is a multiple of dword (4 bytes) */
7804	null_char_pos = text_size_bytes - 1;
7805	text_size_bytes = (text_size_bytes + 3) & ~0x3;
 
 
7806
7807	if (text_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
7808		DP_NOTICE(p_hwfn->cdev,
7809			  "formatted size of feature was too small %d. Aborting\n",
7810			  text_size_bytes);
7811		return DBG_STATUS_INVALID_ARGS;
7812	}
7813
7814	/* Allocate temp text buf */
7815	text_buf = vzalloc(text_size_bytes);
7816	if (!text_buf)
 
 
7817		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
 
7818
7819	/* Decode feature opcodes to string on temp buf */
7820	rc = qed_features_lookup[feature_idx].
7821		print_results(p_hwfn, (u32 *)feature->dump_buf,
7822			      feature->dumped_dwords, text_buf);
 
7823	if (rc != DBG_STATUS_OK) {
7824		vfree(text_buf);
7825		return rc;
7826	}
7827
7828	/* Replace the original null character with a '\n' character.
7829	 * The bytes that were added as a result of the dword alignment are also
7830	 * padded with '\n' characters.
7831	 */
7832	for (i = null_char_pos; i < text_size_bytes; i++)
7833		text_buf[i] = '\n';
7834
7835	/* Dump printable feature to log */
7836	if (p_hwfn->cdev->dbg_params.print_data)
7837		qed_dbg_print_feature(text_buf, text_size_bytes);
 
 
 
 
 
 
7838
7839	/* Free the old dump_buf and point the dump_buf to the newly allocagted
7840	 * and formatted text buffer.
7841	 */
7842	vfree(feature->dump_buf);
7843	feature->dump_buf = text_buf;
7844	feature->buf_size = text_size_bytes;
7845	feature->dumped_dwords = text_size_bytes / 4;
 
7846	return rc;
7847}
7848
 
 
7849/* Generic function for performing the dump of a debug feature. */
7850static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
7851				    struct qed_ptt *p_ptt,
7852				    enum qed_dbg_features feature_idx)
7853{
7854	struct qed_dbg_feature *feature =
7855	    &p_hwfn->cdev->dbg_params.features[feature_idx];
7856	u32 buf_size_dwords;
7857	enum dbg_status rc;
7858
7859	DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n",
7860		  qed_features_lookup[feature_idx].name);
7861
7862	/* Dump_buf was already allocated need to free (this can happen if dump
7863	 * was called but file was never read).
7864	 * We can't use the buffer as is since size may have changed.
7865	 */
7866	if (feature->dump_buf) {
7867		vfree(feature->dump_buf);
7868		feature->dump_buf = NULL;
7869	}
7870
7871	/* Get buffer size from hsi, allocate accordingly, and perform the
7872	 * dump.
7873	 */
7874	rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
7875						       &buf_size_dwords);
7876	if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
7877		return rc;
 
 
 
 
 
 
 
 
 
 
 
7878	feature->buf_size = buf_size_dwords * sizeof(u32);
7879	feature->dump_buf = vmalloc(feature->buf_size);
7880	if (!feature->dump_buf)
7881		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7882
7883	rc = qed_features_lookup[feature_idx].
7884		perform_dump(p_hwfn, p_ptt, (u32 *)feature->dump_buf,
7885			     feature->buf_size / sizeof(u32),
7886			     &feature->dumped_dwords);
 
 
 
7887
7888	/* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error.
7889	 * In this case the buffer holds valid binary data, but we wont able
7890	 * to parse it (since parsing relies on data in NVRAM which is only
7891	 * accessible when MFW is responsive). skip the formatting but return
7892	 * success so that binary data is provided.
7893	 */
7894	if (rc == DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
7895		return DBG_STATUS_OK;
7896
7897	if (rc != DBG_STATUS_OK)
7898		return rc;
7899
7900	/* Format output */
7901	rc = format_feature(p_hwfn, feature_idx);
7902	return rc;
7903}
7904
7905int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7906{
7907	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_GRC, num_dumped_bytes);
7908}
7909
7910int qed_dbg_grc_size(struct qed_dev *cdev)
7911{
7912	return qed_dbg_feature_size(cdev, DBG_FEATURE_GRC);
7913}
7914
7915int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7916{
7917	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IDLE_CHK,
7918			       num_dumped_bytes);
7919}
7920
7921int qed_dbg_idle_chk_size(struct qed_dev *cdev)
7922{
7923	return qed_dbg_feature_size(cdev, DBG_FEATURE_IDLE_CHK);
7924}
7925
7926int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7927{
7928	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_REG_FIFO,
7929			       num_dumped_bytes);
7930}
7931
7932int qed_dbg_reg_fifo_size(struct qed_dev *cdev)
7933{
7934	return qed_dbg_feature_size(cdev, DBG_FEATURE_REG_FIFO);
7935}
7936
7937int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7938{
7939	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IGU_FIFO,
7940			       num_dumped_bytes);
7941}
7942
7943int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
7944{
7945	return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
7946}
7947
7948static int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn,
7949				    enum qed_nvm_images image_id, u32 *length)
7950{
7951	struct qed_nvm_image_att image_att;
7952	int rc;
7953
7954	*length = 0;
7955	rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
7956	if (rc)
7957		return rc;
7958
7959	*length = image_att.length;
7960
7961	return rc;
7962}
7963
7964static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,
7965			     u32 *num_dumped_bytes,
7966			     enum qed_nvm_images image_id)
7967{
7968	struct qed_hwfn *p_hwfn =
7969		&cdev->hwfns[cdev->dbg_params.engine_for_debug];
7970	u32 len_rounded, i;
7971	__be32 val;
7972	int rc;
7973
7974	*num_dumped_bytes = 0;
7975	rc = qed_dbg_nvm_image_length(p_hwfn, image_id, &len_rounded);
7976	if (rc)
7977		return rc;
7978
7979	DP_NOTICE(p_hwfn->cdev,
7980		  "Collecting a debug feature [\"nvram image %d\"]\n",
7981		  image_id);
7982
7983	len_rounded = roundup(len_rounded, sizeof(u32));
7984	rc = qed_mcp_get_nvm_image(p_hwfn, image_id, buffer, len_rounded);
7985	if (rc)
7986		return rc;
7987
7988	/* QED_NVM_IMAGE_NVM_META image is not swapped like other images */
7989	if (image_id != QED_NVM_IMAGE_NVM_META)
7990		for (i = 0; i < len_rounded; i += 4) {
7991			val = cpu_to_be32(*(u32 *)(buffer + i));
7992			*(u32 *)(buffer + i) = val;
7993		}
7994
7995	*num_dumped_bytes = len_rounded;
7996
7997	return rc;
7998}
7999
8000int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
8001				u32 *num_dumped_bytes)
8002{
8003	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_PROTECTION_OVERRIDE,
8004			       num_dumped_bytes);
8005}
8006
8007int qed_dbg_protection_override_size(struct qed_dev *cdev)
8008{
8009	return qed_dbg_feature_size(cdev, DBG_FEATURE_PROTECTION_OVERRIDE);
8010}
8011
8012int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
8013		       u32 *num_dumped_bytes)
8014{
8015	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_FW_ASSERTS,
8016			       num_dumped_bytes);
8017}
8018
8019int qed_dbg_fw_asserts_size(struct qed_dev *cdev)
8020{
8021	return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS);
8022}
8023
 
 
 
 
 
 
 
 
 
 
8024int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
8025		      u32 *num_dumped_bytes)
8026{
8027	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_MCP_TRACE,
8028			       num_dumped_bytes);
8029}
8030
8031int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
8032{
8033	return qed_dbg_feature_size(cdev, DBG_FEATURE_MCP_TRACE);
8034}
8035
8036/* Defines the amount of bytes allocated for recording the length of debugfs
8037 * feature buffer.
8038 */
8039#define REGDUMP_HEADER_SIZE			sizeof(u32)
 
 
8040#define REGDUMP_HEADER_FEATURE_SHIFT		24
 
 
 
 
 
8041#define REGDUMP_HEADER_ENGINE_SHIFT		31
8042#define REGDUMP_HEADER_OMIT_ENGINE_SHIFT	30
 
 
 
8043enum debug_print_features {
8044	OLD_MODE = 0,
8045	IDLE_CHK = 1,
8046	GRC_DUMP = 2,
8047	MCP_TRACE = 3,
8048	REG_FIFO = 4,
8049	PROTECTION_OVERRIDE = 5,
8050	IGU_FIFO = 6,
8051	PHY = 7,
8052	FW_ASSERTS = 8,
8053	NVM_CFG1 = 9,
8054	DEFAULT_CFG = 10,
8055	NVM_META = 11,
 
 
8056};
8057
8058static u32 qed_calc_regdump_header(enum debug_print_features feature,
8059				   int engine, u32 feature_size, u8 omit_engine)
 
 
8060{
8061	/* Insert the engine, feature and mode inside the header and combine it
8062	 * with feature size.
8063	 */
8064	return feature_size | (feature << REGDUMP_HEADER_FEATURE_SHIFT) |
8065	       (omit_engine << REGDUMP_HEADER_OMIT_ENGINE_SHIFT) |
8066	       (engine << REGDUMP_HEADER_ENGINE_SHIFT);
 
 
 
 
 
 
 
 
8067}
8068
8069int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
8070{
8071	u8 cur_engine, omit_engine = 0, org_engine;
8072	struct qed_hwfn *p_hwfn =
8073		&cdev->hwfns[cdev->dbg_params.engine_for_debug];
8074	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
8075	int grc_params[MAX_DBG_GRC_PARAMS], i;
8076	u32 offset = 0, feature_size;
8077	int rc;
8078
8079	for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
8080		grc_params[i] = dev_data->grc.param_val[i];
8081
8082	if (cdev->num_hwfns == 1)
8083		omit_engine = 1;
8084
 
 
 
8085	org_engine = qed_get_debug_engine(cdev);
8086	for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
8087		/* Collect idle_chks and grcDump for each hw function */
8088		DP_VERBOSE(cdev, QED_MSG_DEBUG,
8089			   "obtaining idle_chk and grcdump for current engine\n");
8090		qed_set_debug_engine(cdev, cur_engine);
8091
8092		/* First idle_chk */
8093		rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
8094				      REGDUMP_HEADER_SIZE, &feature_size);
8095		if (!rc) {
8096			*(u32 *)((u8 *)buffer + offset) =
8097			    qed_calc_regdump_header(IDLE_CHK, cur_engine,
8098						    feature_size, omit_engine);
 
 
 
8099			offset += (feature_size + REGDUMP_HEADER_SIZE);
8100		} else {
8101			DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
8102		}
8103
8104		/* Second idle_chk */
8105		rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
8106				      REGDUMP_HEADER_SIZE, &feature_size);
8107		if (!rc) {
8108			*(u32 *)((u8 *)buffer + offset) =
8109			    qed_calc_regdump_header(IDLE_CHK, cur_engine,
8110						    feature_size, omit_engine);
 
 
 
8111			offset += (feature_size + REGDUMP_HEADER_SIZE);
8112		} else {
8113			DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
8114		}
8115
8116		/* reg_fifo dump */
8117		rc = qed_dbg_reg_fifo(cdev, (u8 *)buffer + offset +
8118				      REGDUMP_HEADER_SIZE, &feature_size);
8119		if (!rc) {
8120			*(u32 *)((u8 *)buffer + offset) =
8121			    qed_calc_regdump_header(REG_FIFO, cur_engine,
8122						    feature_size, omit_engine);
 
 
 
8123			offset += (feature_size + REGDUMP_HEADER_SIZE);
8124		} else {
8125			DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc);
8126		}
8127
8128		/* igu_fifo dump */
8129		rc = qed_dbg_igu_fifo(cdev, (u8 *)buffer + offset +
8130				      REGDUMP_HEADER_SIZE, &feature_size);
8131		if (!rc) {
8132			*(u32 *)((u8 *)buffer + offset) =
8133			    qed_calc_regdump_header(IGU_FIFO, cur_engine,
8134						    feature_size, omit_engine);
 
 
 
8135			offset += (feature_size + REGDUMP_HEADER_SIZE);
8136		} else {
8137			DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc);
8138		}
8139
8140		/* protection_override dump */
8141		rc = qed_dbg_protection_override(cdev, (u8 *)buffer + offset +
8142						 REGDUMP_HEADER_SIZE,
8143						 &feature_size);
8144		if (!rc) {
8145			*(u32 *)((u8 *)buffer + offset) =
8146			    qed_calc_regdump_header(PROTECTION_OVERRIDE,
 
8147						    cur_engine,
8148						    feature_size, omit_engine);
 
 
8149			offset += (feature_size + REGDUMP_HEADER_SIZE);
8150		} else {
8151			DP_ERR(cdev,
8152			       "qed_dbg_protection_override failed. rc = %d\n",
8153			       rc);
8154		}
8155
8156		/* fw_asserts dump */
8157		rc = qed_dbg_fw_asserts(cdev, (u8 *)buffer + offset +
8158					REGDUMP_HEADER_SIZE, &feature_size);
8159		if (!rc) {
8160			*(u32 *)((u8 *)buffer + offset) =
8161			    qed_calc_regdump_header(FW_ASSERTS, cur_engine,
8162						    feature_size, omit_engine);
 
 
 
8163			offset += (feature_size + REGDUMP_HEADER_SIZE);
8164		} else {
8165			DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
8166			       rc);
8167		}
8168
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8169		for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
8170			dev_data->grc.param_val[i] = grc_params[i];
8171
8172		/* GRC dump - must be last because when mcp stuck it will
8173		 * clutter idle_chk, reg_fifo, ...
8174		 */
8175		rc = qed_dbg_grc(cdev, (u8 *)buffer + offset +
8176				 REGDUMP_HEADER_SIZE, &feature_size);
8177		if (!rc) {
8178			*(u32 *)((u8 *)buffer + offset) =
8179			    qed_calc_regdump_header(GRC_DUMP, cur_engine,
8180						    feature_size, omit_engine);
 
 
 
8181			offset += (feature_size + REGDUMP_HEADER_SIZE);
8182		} else {
8183			DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc);
8184		}
8185	}
8186
8187	qed_set_debug_engine(cdev, org_engine);
 
8188	/* mcp_trace */
8189	rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset +
8190			       REGDUMP_HEADER_SIZE, &feature_size);
8191	if (!rc) {
8192		*(u32 *)((u8 *)buffer + offset) =
8193		    qed_calc_regdump_header(MCP_TRACE, cur_engine,
8194					    feature_size, omit_engine);
 
8195		offset += (feature_size + REGDUMP_HEADER_SIZE);
8196	} else {
8197		DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
8198	}
8199
8200	/* nvm cfg1 */
8201	rc = qed_dbg_nvm_image(cdev,
8202			       (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
8203			       &feature_size, QED_NVM_IMAGE_NVM_CFG1);
 
8204	if (!rc) {
8205		*(u32 *)((u8 *)buffer + offset) =
8206		    qed_calc_regdump_header(NVM_CFG1, cur_engine,
8207					    feature_size, omit_engine);
 
8208		offset += (feature_size + REGDUMP_HEADER_SIZE);
8209	} else if (rc != -ENOENT) {
8210		DP_ERR(cdev,
8211		       "qed_dbg_nvm_image failed for image  %d (%s), rc = %d\n",
8212		       QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1", rc);
 
8213	}
8214
8215	/* nvm default */
8216	rc = qed_dbg_nvm_image(cdev,
8217			       (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
8218			       &feature_size, QED_NVM_IMAGE_DEFAULT_CFG);
 
8219	if (!rc) {
8220		*(u32 *)((u8 *)buffer + offset) =
8221		    qed_calc_regdump_header(DEFAULT_CFG, cur_engine,
8222					    feature_size, omit_engine);
 
 
8223		offset += (feature_size + REGDUMP_HEADER_SIZE);
8224	} else if (rc != -ENOENT) {
8225		DP_ERR(cdev,
8226		       "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
8227		       QED_NVM_IMAGE_DEFAULT_CFG, "QED_NVM_IMAGE_DEFAULT_CFG",
8228		       rc);
8229	}
8230
8231	/* nvm meta */
8232	rc = qed_dbg_nvm_image(cdev,
8233			       (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
8234			       &feature_size, QED_NVM_IMAGE_NVM_META);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8235	if (!rc) {
8236		*(u32 *)((u8 *)buffer + offset) =
8237		    qed_calc_regdump_header(NVM_META, cur_engine,
8238					    feature_size, omit_engine);
 
8239		offset += (feature_size + REGDUMP_HEADER_SIZE);
8240	} else if (rc != -ENOENT) {
8241		DP_ERR(cdev,
8242		       "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
8243		       QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META", rc);
8244	}
8245
 
 
 
8246	return 0;
8247}
8248
8249int qed_dbg_all_data_size(struct qed_dev *cdev)
8250{
8251	struct qed_hwfn *p_hwfn =
8252		&cdev->hwfns[cdev->dbg_params.engine_for_debug];
8253	u32 regs_len = 0, image_len = 0;
8254	u8 cur_engine, org_engine;
8255
 
8256	org_engine = qed_get_debug_engine(cdev);
8257	for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
8258		/* Engine specific */
8259		DP_VERBOSE(cdev, QED_MSG_DEBUG,
8260			   "calculating idle_chk and grcdump register length for current engine\n");
8261		qed_set_debug_engine(cdev, cur_engine);
8262		regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
8263			    REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
8264			    REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
8265			    REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
8266			    REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
8267			    REGDUMP_HEADER_SIZE +
8268			    qed_dbg_protection_override_size(cdev) +
8269			    REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
 
 
 
 
 
8270	}
8271
8272	qed_set_debug_engine(cdev, org_engine);
8273
8274	/* Engine common */
8275	regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev);
 
8276	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_CFG1, &image_len);
8277	if (image_len)
8278		regs_len += REGDUMP_HEADER_SIZE + image_len;
8279	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_DEFAULT_CFG, &image_len);
8280	if (image_len)
8281		regs_len += REGDUMP_HEADER_SIZE + image_len;
8282	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_META, &image_len);
8283	if (image_len)
8284		regs_len += REGDUMP_HEADER_SIZE + image_len;
 
 
 
 
 
 
 
 
 
 
 
8285
8286	return regs_len;
8287}
8288
8289int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
8290		    enum qed_dbg_features feature, u32 *num_dumped_bytes)
8291{
8292	struct qed_hwfn *p_hwfn =
8293		&cdev->hwfns[cdev->dbg_params.engine_for_debug];
8294	struct qed_dbg_feature *qed_feature =
8295		&cdev->dbg_params.features[feature];
8296	enum dbg_status dbg_rc;
8297	struct qed_ptt *p_ptt;
8298	int rc = 0;
8299
8300	/* Acquire ptt */
8301	p_ptt = qed_ptt_acquire(p_hwfn);
8302	if (!p_ptt)
8303		return -EINVAL;
8304
8305	/* Get dump */
8306	dbg_rc = qed_dbg_dump(p_hwfn, p_ptt, feature);
8307	if (dbg_rc != DBG_STATUS_OK) {
8308		DP_VERBOSE(cdev, QED_MSG_DEBUG, "%s\n",
8309			   qed_dbg_get_status_str(dbg_rc));
8310		*num_dumped_bytes = 0;
8311		rc = -EINVAL;
8312		goto out;
8313	}
8314
8315	DP_VERBOSE(cdev, QED_MSG_DEBUG,
8316		   "copying debugfs feature to external buffer\n");
8317	memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size);
8318	*num_dumped_bytes = cdev->dbg_params.features[feature].dumped_dwords *
8319			    4;
8320
8321out:
8322	qed_ptt_release(p_hwfn, p_ptt);
8323	return rc;
8324}
8325
8326int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
8327{
8328	struct qed_hwfn *p_hwfn =
8329		&cdev->hwfns[cdev->dbg_params.engine_for_debug];
8330	struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
8331	struct qed_dbg_feature *qed_feature =
8332		&cdev->dbg_params.features[feature];
8333	u32 buf_size_dwords;
8334	enum dbg_status rc;
8335
8336	if (!p_ptt)
8337		return -EINVAL;
8338
8339	rc = qed_features_lookup[feature].get_size(p_hwfn, p_ptt,
8340						   &buf_size_dwords);
8341	if (rc != DBG_STATUS_OK)
8342		buf_size_dwords = 0;
8343
 
 
 
 
8344	qed_ptt_release(p_hwfn, p_ptt);
8345	qed_feature->buf_size = buf_size_dwords * sizeof(u32);
8346	return qed_feature->buf_size;
8347}
8348
 
 
 
 
 
 
 
 
8349u8 qed_get_debug_engine(struct qed_dev *cdev)
8350{
8351	return cdev->dbg_params.engine_for_debug;
8352}
8353
8354void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
8355{
8356	DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n",
8357		   engine_number);
8358	cdev->dbg_params.engine_for_debug = engine_number;
8359}
8360
8361void qed_dbg_pf_init(struct qed_dev *cdev)
8362{
8363	const u8 *dbg_values;
 
 
 
 
8364
8365	/* Debug values are after init values.
8366	 * The offset is the first dword of the file.
8367	 */
8368	dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data;
8369	qed_dbg_set_bin_ptr((u8 *)dbg_values);
8370	qed_dbg_user_set_bin_ptr((u8 *)dbg_values);
 
 
 
 
 
 
8371}
8372
8373void qed_dbg_pf_exit(struct qed_dev *cdev)
8374{
8375	struct qed_dbg_feature *feature = NULL;
8376	enum qed_dbg_features feature_idx;
8377
8378	/* Debug features' buffers may be allocated if debug feature was used
8379	 * but dump wasn't called.
8380	 */
8381	for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) {
8382		feature = &cdev->dbg_params.features[feature_idx];
8383		if (feature->dump_buf) {
8384			vfree(feature->dump_buf);
8385			feature->dump_buf = NULL;
8386		}
8387	}
8388}