Loading...
1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Copyright (C) 2018-2024 Intel Corporation
4 */
5#include <linux/firmware.h>
6#include "iwl-drv.h"
7#include "iwl-trans.h"
8#include "iwl-dbg-tlv.h"
9#include "fw/dbg.h"
10#include "fw/runtime.h"
11
12/**
13 * enum iwl_dbg_tlv_type - debug TLV types
14 * @IWL_DBG_TLV_TYPE_DEBUG_INFO: debug info TLV
15 * @IWL_DBG_TLV_TYPE_BUF_ALLOC: buffer allocation TLV
16 * @IWL_DBG_TLV_TYPE_HCMD: host command TLV
17 * @IWL_DBG_TLV_TYPE_REGION: region TLV
18 * @IWL_DBG_TLV_TYPE_TRIGGER: trigger TLV
19 * @IWL_DBG_TLV_TYPE_CONF_SET: conf set TLV
20 * @IWL_DBG_TLV_TYPE_NUM: number of debug TLVs
21 */
22enum iwl_dbg_tlv_type {
23 IWL_DBG_TLV_TYPE_DEBUG_INFO =
24 IWL_UCODE_TLV_TYPE_DEBUG_INFO - IWL_UCODE_TLV_DEBUG_BASE,
25 IWL_DBG_TLV_TYPE_BUF_ALLOC,
26 IWL_DBG_TLV_TYPE_HCMD,
27 IWL_DBG_TLV_TYPE_REGION,
28 IWL_DBG_TLV_TYPE_TRIGGER,
29 IWL_DBG_TLV_TYPE_CONF_SET,
30 IWL_DBG_TLV_TYPE_NUM,
31};
32
33/**
34 * struct iwl_dbg_tlv_ver_data - debug TLV version struct
35 * @min_ver: min version supported
36 * @max_ver: max version supported
37 */
38struct iwl_dbg_tlv_ver_data {
39 int min_ver;
40 int max_ver;
41};
42
43/**
44 * struct iwl_dbg_tlv_timer_node - timer node struct
45 * @list: list of &struct iwl_dbg_tlv_timer_node
46 * @timer: timer
47 * @fwrt: &struct iwl_fw_runtime
48 * @tlv: TLV attach to the timer node
49 */
50struct iwl_dbg_tlv_timer_node {
51 struct list_head list;
52 struct timer_list timer;
53 struct iwl_fw_runtime *fwrt;
54 struct iwl_ucode_tlv *tlv;
55};
56
57static const struct iwl_dbg_tlv_ver_data
58dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = {
59 [IWL_DBG_TLV_TYPE_DEBUG_INFO] = {.min_ver = 1, .max_ver = 1,},
60 [IWL_DBG_TLV_TYPE_BUF_ALLOC] = {.min_ver = 1, .max_ver = 1,},
61 [IWL_DBG_TLV_TYPE_HCMD] = {.min_ver = 1, .max_ver = 1,},
62 [IWL_DBG_TLV_TYPE_REGION] = {.min_ver = 1, .max_ver = 3,},
63 [IWL_DBG_TLV_TYPE_TRIGGER] = {.min_ver = 1, .max_ver = 1,},
64 [IWL_DBG_TLV_TYPE_CONF_SET] = {.min_ver = 1, .max_ver = 1,},
65};
66
67static int iwl_dbg_tlv_add(const struct iwl_ucode_tlv *tlv,
68 struct list_head *list)
69{
70 u32 len = le32_to_cpu(tlv->length);
71 struct iwl_dbg_tlv_node *node;
72
73 node = kzalloc(sizeof(*node) + len, GFP_KERNEL);
74 if (!node)
75 return -ENOMEM;
76
77 memcpy(&node->tlv, tlv, sizeof(node->tlv));
78 memcpy(node->tlv.data, tlv->data, len);
79 list_add_tail(&node->list, list);
80
81 return 0;
82}
83
84static bool iwl_dbg_tlv_ver_support(const struct iwl_ucode_tlv *tlv)
85{
86 const struct iwl_fw_ini_header *hdr = (const void *)&tlv->data[0];
87 u32 type = le32_to_cpu(tlv->type);
88 u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
89 u32 ver = le32_to_cpu(hdr->version);
90
91 if (ver < dbg_ver_table[tlv_idx].min_ver ||
92 ver > dbg_ver_table[tlv_idx].max_ver)
93 return false;
94
95 return true;
96}
97
98static int iwl_dbg_tlv_alloc_debug_info(struct iwl_trans *trans,
99 const struct iwl_ucode_tlv *tlv)
100{
101 const struct iwl_fw_ini_debug_info_tlv *debug_info = (const void *)tlv->data;
102
103 if (le32_to_cpu(tlv->length) != sizeof(*debug_info))
104 return -EINVAL;
105
106 IWL_DEBUG_FW(trans, "WRT: Loading debug cfg: %s\n",
107 debug_info->debug_cfg_name);
108
109 return iwl_dbg_tlv_add(tlv, &trans->dbg.debug_info_tlv_list);
110}
111
112static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans,
113 const struct iwl_ucode_tlv *tlv)
114{
115 const struct iwl_fw_ini_allocation_tlv *alloc = (const void *)tlv->data;
116 u32 buf_location;
117 u32 alloc_id;
118
119 if (le32_to_cpu(tlv->length) != sizeof(*alloc))
120 return -EINVAL;
121
122 buf_location = le32_to_cpu(alloc->buf_location);
123 alloc_id = le32_to_cpu(alloc->alloc_id);
124
125 if (buf_location == IWL_FW_INI_LOCATION_INVALID ||
126 buf_location >= IWL_FW_INI_LOCATION_NUM)
127 goto err;
128
129 if (alloc_id == IWL_FW_INI_ALLOCATION_INVALID ||
130 alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
131 goto err;
132
133 if (buf_location == IWL_FW_INI_LOCATION_NPK_PATH &&
134 alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
135 goto err;
136
137 if (buf_location == IWL_FW_INI_LOCATION_SRAM_PATH &&
138 alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
139 goto err;
140
141 if (buf_location == IWL_FW_INI_LOCATION_DRAM_PATH &&
142 alloc->req_size == 0) {
143 IWL_ERR(trans, "WRT: Invalid DRAM buffer allocation requested size (0)\n");
144 return -EINVAL;
145 }
146
147 trans->dbg.fw_mon_cfg[alloc_id] = *alloc;
148
149 return 0;
150err:
151 IWL_ERR(trans,
152 "WRT: Invalid allocation id %u and/or location id %u for allocation TLV\n",
153 alloc_id, buf_location);
154 return -EINVAL;
155}
156
157static int iwl_dbg_tlv_alloc_hcmd(struct iwl_trans *trans,
158 const struct iwl_ucode_tlv *tlv)
159{
160 const struct iwl_fw_ini_hcmd_tlv *hcmd = (const void *)tlv->data;
161 u32 tp = le32_to_cpu(hcmd->time_point);
162
163 if (le32_to_cpu(tlv->length) <= sizeof(*hcmd))
164 return -EINVAL;
165
166 /* Host commands can not be sent in early time point since the FW
167 * is not ready
168 */
169 if (tp == IWL_FW_INI_TIME_POINT_INVALID ||
170 tp >= IWL_FW_INI_TIME_POINT_NUM ||
171 tp == IWL_FW_INI_TIME_POINT_EARLY) {
172 IWL_ERR(trans,
173 "WRT: Invalid time point %u for host command TLV\n",
174 tp);
175 return -EINVAL;
176 }
177
178 return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].hcmd_list);
179}
180
181static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
182 const struct iwl_ucode_tlv *tlv)
183{
184 const struct iwl_fw_ini_region_tlv *reg = (const void *)tlv->data;
185 struct iwl_ucode_tlv **active_reg;
186 u32 id = le32_to_cpu(reg->id);
187 u8 type = reg->type;
188 u32 tlv_len = sizeof(*tlv) + le32_to_cpu(tlv->length);
189
190 /*
191 * The higher part of the ID from version 2 is debug policy.
192 * The id will be only lsb 16 bits, so mask it out.
193 */
194 if (le32_to_cpu(reg->hdr.version) >= 2)
195 id &= IWL_FW_INI_REGION_ID_MASK;
196
197 if (le32_to_cpu(tlv->length) < sizeof(*reg))
198 return -EINVAL;
199
200 /* for safe use of a string from FW, limit it to IWL_FW_INI_MAX_NAME */
201 IWL_DEBUG_FW(trans, "WRT: parsing region: %.*s\n",
202 IWL_FW_INI_MAX_NAME, reg->name);
203
204 if (id >= IWL_FW_INI_MAX_REGION_ID) {
205 IWL_ERR(trans, "WRT: Invalid region id %u\n", id);
206 return -EINVAL;
207 }
208
209 if (type <= IWL_FW_INI_REGION_INVALID ||
210 type >= IWL_FW_INI_REGION_NUM) {
211 IWL_ERR(trans, "WRT: Invalid region type %u\n", type);
212 return -EINVAL;
213 }
214
215 if (type == IWL_FW_INI_REGION_PCI_IOSF_CONFIG &&
216 !trans->ops->read_config32) {
217 IWL_ERR(trans, "WRT: Unsupported region type %u\n", type);
218 return -EOPNOTSUPP;
219 }
220
221 if (type == IWL_FW_INI_REGION_INTERNAL_BUFFER) {
222 trans->dbg.imr_data.sram_addr =
223 le32_to_cpu(reg->internal_buffer.base_addr);
224 trans->dbg.imr_data.sram_size =
225 le32_to_cpu(reg->internal_buffer.size);
226 }
227
228
229 active_reg = &trans->dbg.active_regions[id];
230 if (*active_reg) {
231 IWL_WARN(trans, "WRT: Overriding region id %u\n", id);
232
233 kfree(*active_reg);
234 }
235
236 *active_reg = kmemdup(tlv, tlv_len, GFP_KERNEL);
237 if (!*active_reg)
238 return -ENOMEM;
239
240 IWL_DEBUG_FW(trans, "WRT: Enabling region id %u type %u\n", id, type);
241
242 return 0;
243}
244
245static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
246 const struct iwl_ucode_tlv *tlv)
247{
248 const struct iwl_fw_ini_trigger_tlv *trig = (const void *)tlv->data;
249 struct iwl_fw_ini_trigger_tlv *dup_trig;
250 u32 tp = le32_to_cpu(trig->time_point);
251 u32 rf = le32_to_cpu(trig->reset_fw);
252 struct iwl_ucode_tlv *dup = NULL;
253 int ret;
254
255 if (le32_to_cpu(tlv->length) < sizeof(*trig))
256 return -EINVAL;
257
258 if (tp <= IWL_FW_INI_TIME_POINT_INVALID ||
259 tp >= IWL_FW_INI_TIME_POINT_NUM) {
260 IWL_ERR(trans,
261 "WRT: Invalid time point %u for trigger TLV\n",
262 tp);
263 return -EINVAL;
264 }
265
266 IWL_DEBUG_FW(trans,
267 "WRT: time point %u for trigger TLV with reset_fw %u\n",
268 tp, rf);
269 trans->dbg.last_tp_resetfw = 0xFF;
270 if (!le32_to_cpu(trig->occurrences)) {
271 dup = kmemdup(tlv, sizeof(*tlv) + le32_to_cpu(tlv->length),
272 GFP_KERNEL);
273 if (!dup)
274 return -ENOMEM;
275 dup_trig = (void *)dup->data;
276 dup_trig->occurrences = cpu_to_le32(-1);
277 tlv = dup;
278 }
279
280 ret = iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list);
281 kfree(dup);
282
283 return ret;
284}
285
286static int iwl_dbg_tlv_config_set(struct iwl_trans *trans,
287 const struct iwl_ucode_tlv *tlv)
288{
289 const struct iwl_fw_ini_conf_set_tlv *conf_set = (const void *)tlv->data;
290 u32 tp = le32_to_cpu(conf_set->time_point);
291 u32 type = le32_to_cpu(conf_set->set_type);
292
293 if (tp <= IWL_FW_INI_TIME_POINT_INVALID ||
294 tp >= IWL_FW_INI_TIME_POINT_NUM) {
295 IWL_DEBUG_FW(trans,
296 "WRT: Invalid time point %u for config set TLV\n", tp);
297 return -EINVAL;
298 }
299
300 if (type <= IWL_FW_INI_CONFIG_SET_TYPE_INVALID ||
301 type >= IWL_FW_INI_CONFIG_SET_TYPE_MAX_NUM) {
302 IWL_DEBUG_FW(trans,
303 "WRT: Invalid config set type %u for config set TLV\n", type);
304 return -EINVAL;
305 }
306
307 return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].config_list);
308}
309
310static int (*dbg_tlv_alloc[])(struct iwl_trans *trans,
311 const struct iwl_ucode_tlv *tlv) = {
312 [IWL_DBG_TLV_TYPE_DEBUG_INFO] = iwl_dbg_tlv_alloc_debug_info,
313 [IWL_DBG_TLV_TYPE_BUF_ALLOC] = iwl_dbg_tlv_alloc_buf_alloc,
314 [IWL_DBG_TLV_TYPE_HCMD] = iwl_dbg_tlv_alloc_hcmd,
315 [IWL_DBG_TLV_TYPE_REGION] = iwl_dbg_tlv_alloc_region,
316 [IWL_DBG_TLV_TYPE_TRIGGER] = iwl_dbg_tlv_alloc_trigger,
317 [IWL_DBG_TLV_TYPE_CONF_SET] = iwl_dbg_tlv_config_set,
318};
319
320void iwl_dbg_tlv_alloc(struct iwl_trans *trans, const struct iwl_ucode_tlv *tlv,
321 bool ext)
322{
323 enum iwl_ini_cfg_state *cfg_state = ext ?
324 &trans->dbg.external_ini_cfg : &trans->dbg.internal_ini_cfg;
325 const struct iwl_fw_ini_header *hdr = (const void *)&tlv->data[0];
326 u32 type;
327 u32 tlv_idx;
328 u32 domain;
329 int ret;
330
331 if (le32_to_cpu(tlv->length) < sizeof(*hdr))
332 return;
333
334 type = le32_to_cpu(tlv->type);
335 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
336 domain = le32_to_cpu(hdr->domain);
337
338 if (domain != IWL_FW_INI_DOMAIN_ALWAYS_ON &&
339 !(domain & trans->dbg.domains_bitmap)) {
340 IWL_DEBUG_FW(trans,
341 "WRT: Skipping TLV with disabled domain 0x%0x (0x%0x)\n",
342 domain, trans->dbg.domains_bitmap);
343 return;
344 }
345
346 if (tlv_idx >= ARRAY_SIZE(dbg_tlv_alloc) || !dbg_tlv_alloc[tlv_idx]) {
347 IWL_ERR(trans, "WRT: Unsupported TLV type 0x%x\n", type);
348 goto out_err;
349 }
350
351 if (!iwl_dbg_tlv_ver_support(tlv)) {
352 IWL_ERR(trans, "WRT: Unsupported TLV 0x%x version %u\n", type,
353 le32_to_cpu(hdr->version));
354 goto out_err;
355 }
356
357 ret = dbg_tlv_alloc[tlv_idx](trans, tlv);
358 if (ret) {
359 IWL_WARN(trans,
360 "WRT: Failed to allocate TLV 0x%x, ret %d, (ext=%d)\n",
361 type, ret, ext);
362 goto out_err;
363 }
364
365 if (*cfg_state == IWL_INI_CFG_STATE_NOT_LOADED)
366 *cfg_state = IWL_INI_CFG_STATE_LOADED;
367
368 return;
369
370out_err:
371 *cfg_state = IWL_INI_CFG_STATE_CORRUPTED;
372}
373
374void iwl_dbg_tlv_del_timers(struct iwl_trans *trans)
375{
376 struct list_head *timer_list = &trans->dbg.periodic_trig_list;
377 struct iwl_dbg_tlv_timer_node *node, *tmp;
378
379 list_for_each_entry_safe(node, tmp, timer_list, list) {
380 timer_shutdown_sync(&node->timer);
381 list_del(&node->list);
382 kfree(node);
383 }
384}
385IWL_EXPORT_SYMBOL(iwl_dbg_tlv_del_timers);
386
387static void iwl_dbg_tlv_fragments_free(struct iwl_trans *trans,
388 enum iwl_fw_ini_allocation_id alloc_id)
389{
390 struct iwl_fw_mon *fw_mon;
391 int i;
392
393 if (alloc_id <= IWL_FW_INI_ALLOCATION_INVALID ||
394 alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
395 return;
396
397 fw_mon = &trans->dbg.fw_mon_ini[alloc_id];
398
399 for (i = 0; i < fw_mon->num_frags; i++) {
400 struct iwl_dram_data *frag = &fw_mon->frags[i];
401
402 dma_free_coherent(trans->dev, frag->size, frag->block,
403 frag->physical);
404
405 frag->physical = 0;
406 frag->block = NULL;
407 frag->size = 0;
408 }
409
410 kfree(fw_mon->frags);
411 fw_mon->frags = NULL;
412 fw_mon->num_frags = 0;
413}
414
415void iwl_dbg_tlv_free(struct iwl_trans *trans)
416{
417 struct iwl_dbg_tlv_node *tlv_node, *tlv_node_tmp;
418 int i;
419
420 iwl_dbg_tlv_del_timers(trans);
421
422 for (i = 0; i < ARRAY_SIZE(trans->dbg.active_regions); i++) {
423 struct iwl_ucode_tlv **active_reg =
424 &trans->dbg.active_regions[i];
425
426 kfree(*active_reg);
427 *active_reg = NULL;
428 }
429
430 list_for_each_entry_safe(tlv_node, tlv_node_tmp,
431 &trans->dbg.debug_info_tlv_list, list) {
432 list_del(&tlv_node->list);
433 kfree(tlv_node);
434 }
435
436 for (i = 0; i < ARRAY_SIZE(trans->dbg.time_point); i++) {
437 struct iwl_dbg_tlv_time_point_data *tp =
438 &trans->dbg.time_point[i];
439
440 list_for_each_entry_safe(tlv_node, tlv_node_tmp, &tp->trig_list,
441 list) {
442 list_del(&tlv_node->list);
443 kfree(tlv_node);
444 }
445
446 list_for_each_entry_safe(tlv_node, tlv_node_tmp, &tp->hcmd_list,
447 list) {
448 list_del(&tlv_node->list);
449 kfree(tlv_node);
450 }
451
452 list_for_each_entry_safe(tlv_node, tlv_node_tmp,
453 &tp->active_trig_list, list) {
454 list_del(&tlv_node->list);
455 kfree(tlv_node);
456 }
457
458 list_for_each_entry_safe(tlv_node, tlv_node_tmp,
459 &tp->config_list, list) {
460 list_del(&tlv_node->list);
461 kfree(tlv_node);
462 }
463
464 }
465
466 for (i = 0; i < ARRAY_SIZE(trans->dbg.fw_mon_ini); i++)
467 iwl_dbg_tlv_fragments_free(trans, i);
468}
469
470static int iwl_dbg_tlv_parse_bin(struct iwl_trans *trans, const u8 *data,
471 size_t len)
472{
473 const struct iwl_ucode_tlv *tlv;
474 u32 tlv_len;
475
476 while (len >= sizeof(*tlv)) {
477 len -= sizeof(*tlv);
478 tlv = (const void *)data;
479
480 tlv_len = le32_to_cpu(tlv->length);
481
482 if (len < tlv_len) {
483 IWL_ERR(trans, "invalid TLV len: %zd/%u\n",
484 len, tlv_len);
485 return -EINVAL;
486 }
487 len -= ALIGN(tlv_len, 4);
488 data += sizeof(*tlv) + ALIGN(tlv_len, 4);
489
490 iwl_dbg_tlv_alloc(trans, tlv, true);
491 }
492
493 return 0;
494}
495
496void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans)
497{
498 const struct firmware *fw;
499 const char *yoyo_bin = "iwl-debug-yoyo.bin";
500 int res;
501
502 if (!iwlwifi_mod_params.enable_ini ||
503 trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000)
504 return;
505
506 res = firmware_request_nowarn(&fw, yoyo_bin, dev);
507 IWL_DEBUG_FW(trans, "%s %s\n", res ? "didn't load" : "loaded", yoyo_bin);
508
509 if (res)
510 return;
511
512 trans->dbg.yoyo_bin_loaded = true;
513
514 iwl_dbg_tlv_parse_bin(trans, fw->data, fw->size);
515
516 release_firmware(fw);
517}
518
519void iwl_dbg_tlv_init(struct iwl_trans *trans)
520{
521 int i;
522
523 INIT_LIST_HEAD(&trans->dbg.debug_info_tlv_list);
524 INIT_LIST_HEAD(&trans->dbg.periodic_trig_list);
525
526 for (i = 0; i < ARRAY_SIZE(trans->dbg.time_point); i++) {
527 struct iwl_dbg_tlv_time_point_data *tp =
528 &trans->dbg.time_point[i];
529
530 INIT_LIST_HEAD(&tp->trig_list);
531 INIT_LIST_HEAD(&tp->hcmd_list);
532 INIT_LIST_HEAD(&tp->active_trig_list);
533 INIT_LIST_HEAD(&tp->config_list);
534 }
535}
536
537static int iwl_dbg_tlv_alloc_fragment(struct iwl_fw_runtime *fwrt,
538 struct iwl_dram_data *frag, u32 pages)
539{
540 void *block = NULL;
541 dma_addr_t physical;
542
543 if (!frag || frag->size || !pages)
544 return -EIO;
545
546 /*
547 * We try to allocate as many pages as we can, starting with
548 * the requested amount and going down until we can allocate
549 * something. Because of DIV_ROUND_UP(), pages will never go
550 * down to 0 and stop the loop, so stop when pages reaches 1,
551 * which is too small anyway.
552 */
553 while (pages > 1) {
554 block = dma_alloc_coherent(fwrt->dev, pages * PAGE_SIZE,
555 &physical,
556 GFP_KERNEL | __GFP_NOWARN);
557 if (block)
558 break;
559
560 IWL_WARN(fwrt, "WRT: Failed to allocate fragment size %lu\n",
561 pages * PAGE_SIZE);
562
563 pages = DIV_ROUND_UP(pages, 2);
564 }
565
566 if (!block)
567 return -ENOMEM;
568
569 frag->physical = physical;
570 frag->block = block;
571 frag->size = pages * PAGE_SIZE;
572
573 return pages;
574}
575
576static int iwl_dbg_tlv_alloc_fragments(struct iwl_fw_runtime *fwrt,
577 enum iwl_fw_ini_allocation_id alloc_id)
578{
579 struct iwl_fw_mon *fw_mon;
580 struct iwl_fw_ini_allocation_tlv *fw_mon_cfg;
581 u32 num_frags, remain_pages, frag_pages;
582 int i;
583
584 if (alloc_id < IWL_FW_INI_ALLOCATION_INVALID ||
585 alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
586 return -EIO;
587
588 fw_mon_cfg = &fwrt->trans->dbg.fw_mon_cfg[alloc_id];
589 fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
590
591 if (fw_mon->num_frags) {
592 for (i = 0; i < fw_mon->num_frags; i++)
593 memset(fw_mon->frags[i].block, 0,
594 fw_mon->frags[i].size);
595 return 0;
596 }
597
598 if (fw_mon_cfg->buf_location !=
599 cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH))
600 return 0;
601
602 num_frags = le32_to_cpu(fw_mon_cfg->max_frags_num);
603 if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
604 if (alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
605 return -EIO;
606 num_frags = 1;
607 } else if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ &&
608 alloc_id > IWL_FW_INI_ALLOCATION_ID_DBGC3) {
609 return -EIO;
610 }
611
612 remain_pages = DIV_ROUND_UP(le32_to_cpu(fw_mon_cfg->req_size),
613 PAGE_SIZE);
614 num_frags = min_t(u32, num_frags, BUF_ALLOC_MAX_NUM_FRAGS);
615 num_frags = min_t(u32, num_frags, remain_pages);
616 frag_pages = DIV_ROUND_UP(remain_pages, num_frags);
617
618 fw_mon->frags = kcalloc(num_frags, sizeof(*fw_mon->frags), GFP_KERNEL);
619 if (!fw_mon->frags)
620 return -ENOMEM;
621
622 for (i = 0; i < num_frags; i++) {
623 int pages = min_t(u32, frag_pages, remain_pages);
624
625 IWL_DEBUG_FW(fwrt,
626 "WRT: Allocating DRAM buffer (alloc_id=%u, fragment=%u, size=0x%lx)\n",
627 alloc_id, i, pages * PAGE_SIZE);
628
629 pages = iwl_dbg_tlv_alloc_fragment(fwrt, &fw_mon->frags[i],
630 pages);
631 if (pages < 0) {
632 u32 alloc_size = le32_to_cpu(fw_mon_cfg->req_size) -
633 (remain_pages * PAGE_SIZE);
634
635 if (alloc_size < le32_to_cpu(fw_mon_cfg->min_size)) {
636 iwl_dbg_tlv_fragments_free(fwrt->trans,
637 alloc_id);
638 return pages;
639 }
640 break;
641 }
642
643 remain_pages -= pages;
644 fw_mon->num_frags++;
645 }
646
647 return 0;
648}
649
650static int iwl_dbg_tlv_apply_buffer(struct iwl_fw_runtime *fwrt,
651 enum iwl_fw_ini_allocation_id alloc_id)
652{
653 struct iwl_fw_mon *fw_mon;
654 u32 remain_frags, num_commands;
655 int i, fw_mon_idx = 0;
656
657 if (!fw_has_capa(&fwrt->fw->ucode_capa,
658 IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP))
659 return 0;
660
661 if (alloc_id < IWL_FW_INI_ALLOCATION_INVALID ||
662 alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
663 return -EIO;
664
665 if (le32_to_cpu(fwrt->trans->dbg.fw_mon_cfg[alloc_id].buf_location) !=
666 IWL_FW_INI_LOCATION_DRAM_PATH)
667 return 0;
668
669 fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
670
671 /* the first fragment of DBGC1 is given to the FW via register
672 * or context info
673 */
674 if (alloc_id == IWL_FW_INI_ALLOCATION_ID_DBGC1)
675 fw_mon_idx++;
676
677 remain_frags = fw_mon->num_frags - fw_mon_idx;
678 if (!remain_frags)
679 return 0;
680
681 num_commands = DIV_ROUND_UP(remain_frags, BUF_ALLOC_MAX_NUM_FRAGS);
682
683 IWL_DEBUG_FW(fwrt, "WRT: Applying DRAM destination (alloc_id=%u)\n",
684 alloc_id);
685
686 for (i = 0; i < num_commands; i++) {
687 u32 num_frags = min_t(u32, remain_frags,
688 BUF_ALLOC_MAX_NUM_FRAGS);
689 struct iwl_buf_alloc_cmd data = {
690 .alloc_id = cpu_to_le32(alloc_id),
691 .num_frags = cpu_to_le32(num_frags),
692 .buf_location =
693 cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH),
694 };
695 struct iwl_host_cmd hcmd = {
696 .id = WIDE_ID(DEBUG_GROUP, BUFFER_ALLOCATION),
697 .data[0] = &data,
698 .len[0] = sizeof(data),
699 .flags = CMD_SEND_IN_RFKILL,
700 };
701 int ret, j;
702
703 for (j = 0; j < num_frags; j++) {
704 struct iwl_buf_alloc_frag *frag = &data.frags[j];
705 struct iwl_dram_data *fw_mon_frag =
706 &fw_mon->frags[fw_mon_idx++];
707
708 frag->addr = cpu_to_le64(fw_mon_frag->physical);
709 frag->size = cpu_to_le32(fw_mon_frag->size);
710 }
711 ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
712 if (ret)
713 return ret;
714
715 remain_frags -= num_frags;
716 }
717
718 return 0;
719}
720
721static void iwl_dbg_tlv_apply_buffers(struct iwl_fw_runtime *fwrt)
722{
723 int ret, i;
724
725 if (fw_has_capa(&fwrt->fw->ucode_capa,
726 IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT))
727 return;
728
729 for (i = 0; i < IWL_FW_INI_ALLOCATION_NUM; i++) {
730 ret = iwl_dbg_tlv_apply_buffer(fwrt, i);
731 if (ret)
732 IWL_WARN(fwrt,
733 "WRT: Failed to apply DRAM buffer for allocation id %d, ret=%d\n",
734 i, ret);
735 }
736}
737
738static int iwl_dbg_tlv_update_dram(struct iwl_fw_runtime *fwrt,
739 enum iwl_fw_ini_allocation_id alloc_id,
740 struct iwl_dram_info *dram_info)
741{
742 struct iwl_fw_mon *fw_mon;
743 u32 remain_frags, num_frags;
744 int j, fw_mon_idx = 0;
745 struct iwl_buf_alloc_cmd *data;
746
747 if (le32_to_cpu(fwrt->trans->dbg.fw_mon_cfg[alloc_id].buf_location) !=
748 IWL_FW_INI_LOCATION_DRAM_PATH) {
749 IWL_DEBUG_FW(fwrt, "WRT: alloc_id %u location is not in DRAM_PATH\n",
750 alloc_id);
751 return -1;
752 }
753
754 fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
755
756 /* the first fragment of DBGC1 is given to the FW via register
757 * or context info
758 */
759 if (alloc_id == IWL_FW_INI_ALLOCATION_ID_DBGC1)
760 fw_mon_idx++;
761
762 remain_frags = fw_mon->num_frags - fw_mon_idx;
763 if (!remain_frags)
764 return -1;
765
766 num_frags = min_t(u32, remain_frags, BUF_ALLOC_MAX_NUM_FRAGS);
767 data = &dram_info->dram_frags[alloc_id - 1];
768 data->alloc_id = cpu_to_le32(alloc_id);
769 data->num_frags = cpu_to_le32(num_frags);
770 data->buf_location = cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH);
771
772 IWL_DEBUG_FW(fwrt, "WRT: DRAM buffer details alloc_id=%u, num_frags=%u\n",
773 cpu_to_le32(alloc_id), cpu_to_le32(num_frags));
774
775 for (j = 0; j < num_frags; j++) {
776 struct iwl_buf_alloc_frag *frag = &data->frags[j];
777 struct iwl_dram_data *fw_mon_frag = &fw_mon->frags[fw_mon_idx++];
778
779 frag->addr = cpu_to_le64(fw_mon_frag->physical);
780 frag->size = cpu_to_le32(fw_mon_frag->size);
781 IWL_DEBUG_FW(fwrt, "WRT: DRAM fragment details\n");
782 IWL_DEBUG_FW(fwrt, "frag=%u, addr=0x%016llx, size=0x%x)\n",
783 j, cpu_to_le64(fw_mon_frag->physical),
784 cpu_to_le32(fw_mon_frag->size));
785 }
786 return 0;
787}
788
789static void iwl_dbg_tlv_update_drams(struct iwl_fw_runtime *fwrt)
790{
791 int ret, i;
792 bool dram_alloc = false;
793 struct iwl_dram_data *frags =
794 &fwrt->trans->dbg.fw_mon_ini[IWL_FW_INI_ALLOCATION_ID_DBGC1].frags[0];
795 struct iwl_dram_info *dram_info;
796
797 if (!frags || !frags->block)
798 return;
799
800 dram_info = frags->block;
801
802 if (!fw_has_capa(&fwrt->fw->ucode_capa,
803 IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT))
804 return;
805
806 memset(dram_info, 0, sizeof(*dram_info));
807
808 for (i = IWL_FW_INI_ALLOCATION_ID_DBGC1;
809 i < IWL_FW_INI_ALLOCATION_NUM; i++) {
810 if (fwrt->trans->dbg.fw_mon_cfg[i].buf_location ==
811 IWL_FW_INI_LOCATION_INVALID)
812 continue;
813
814 ret = iwl_dbg_tlv_update_dram(fwrt, i, dram_info);
815 if (!ret)
816 dram_alloc = true;
817 else
818 IWL_INFO(fwrt,
819 "WRT: Failed to set DRAM buffer for alloc id %d, ret=%d\n",
820 i, ret);
821 }
822
823 if (dram_alloc) {
824 dram_info->first_word = cpu_to_le32(DRAM_INFO_FIRST_MAGIC_WORD);
825 dram_info->second_word = cpu_to_le32(DRAM_INFO_SECOND_MAGIC_WORD);
826 }
827}
828
829static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt,
830 struct list_head *hcmd_list)
831{
832 struct iwl_dbg_tlv_node *node;
833
834 list_for_each_entry(node, hcmd_list, list) {
835 struct iwl_fw_ini_hcmd_tlv *hcmd = (void *)node->tlv.data;
836 struct iwl_fw_ini_hcmd *hcmd_data = &hcmd->hcmd;
837 u16 hcmd_len = le32_to_cpu(node->tlv.length) - sizeof(*hcmd);
838 struct iwl_host_cmd cmd = {
839 .id = WIDE_ID(hcmd_data->group, hcmd_data->id),
840 .len = { hcmd_len, },
841 .data = { hcmd_data->data, },
842 };
843
844 iwl_trans_send_cmd(fwrt->trans, &cmd);
845 }
846}
847
848static void iwl_dbg_tlv_apply_config(struct iwl_fw_runtime *fwrt,
849 struct list_head *conf_list)
850{
851 struct iwl_dbg_tlv_node *node;
852
853 list_for_each_entry(node, conf_list, list) {
854 struct iwl_fw_ini_conf_set_tlv *config_list = (void *)node->tlv.data;
855 u32 count, address, value;
856 u32 len = (le32_to_cpu(node->tlv.length) - sizeof(*config_list)) / 8;
857 u32 type = le32_to_cpu(config_list->set_type);
858 u32 offset = le32_to_cpu(config_list->addr_offset);
859
860 switch (type) {
861 case IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_PERIPHERY_MAC: {
862 if (!iwl_trans_grab_nic_access(fwrt->trans)) {
863 IWL_DEBUG_FW(fwrt, "WRT: failed to get nic access\n");
864 IWL_DEBUG_FW(fwrt, "WRT: skipping MAC PERIPHERY config\n");
865 continue;
866 }
867 IWL_DEBUG_FW(fwrt, "WRT: MAC PERIPHERY config len: len %u\n", len);
868 for (count = 0; count < len; count++) {
869 address = le32_to_cpu(config_list->addr_val[count].address);
870 value = le32_to_cpu(config_list->addr_val[count].value);
871 iwl_trans_write_prph(fwrt->trans, address + offset, value);
872 }
873 iwl_trans_release_nic_access(fwrt->trans);
874 break;
875 }
876 case IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_MEMORY: {
877 for (count = 0; count < len; count++) {
878 address = le32_to_cpu(config_list->addr_val[count].address);
879 value = le32_to_cpu(config_list->addr_val[count].value);
880 iwl_trans_write_mem32(fwrt->trans, address + offset, value);
881 IWL_DEBUG_FW(fwrt, "WRT: DEV_MEM: count %u, add: %u val: %u\n",
882 count, address, value);
883 }
884 break;
885 }
886 case IWL_FW_INI_CONFIG_SET_TYPE_CSR: {
887 for (count = 0; count < len; count++) {
888 address = le32_to_cpu(config_list->addr_val[count].address);
889 value = le32_to_cpu(config_list->addr_val[count].value);
890 iwl_write32(fwrt->trans, address + offset, value);
891 IWL_DEBUG_FW(fwrt, "WRT: CSR: count %u, add: %u val: %u\n",
892 count, address, value);
893 }
894 break;
895 }
896 case IWL_FW_INI_CONFIG_SET_TYPE_DBGC_DRAM_ADDR: {
897 struct iwl_dbgc1_info dram_info = {};
898 struct iwl_dram_data *frags = &fwrt->trans->dbg.fw_mon_ini[1].frags[0];
899 __le64 dram_base_addr;
900 __le32 dram_size;
901 u64 dram_addr;
902 u32 ret;
903
904 if (!frags)
905 break;
906
907 dram_base_addr = cpu_to_le64(frags->physical);
908 dram_size = cpu_to_le32(frags->size);
909 dram_addr = le64_to_cpu(dram_base_addr);
910
911 IWL_DEBUG_FW(fwrt, "WRT: dram_base_addr 0x%016llx, dram_size 0x%x\n",
912 dram_base_addr, dram_size);
913 IWL_DEBUG_FW(fwrt, "WRT: config_list->addr_offset: %u\n",
914 le32_to_cpu(config_list->addr_offset));
915 for (count = 0; count < len; count++) {
916 address = le32_to_cpu(config_list->addr_val[count].address);
917 dram_info.dbgc1_add_lsb =
918 cpu_to_le32((dram_addr & 0x00000000FFFFFFFFULL) + 0x400);
919 dram_info.dbgc1_add_msb =
920 cpu_to_le32((dram_addr & 0xFFFFFFFF00000000ULL) >> 32);
921 dram_info.dbgc1_size = cpu_to_le32(le32_to_cpu(dram_size) - 0x400);
922 ret = iwl_trans_write_mem(fwrt->trans,
923 address + offset, &dram_info, 4);
924 if (ret) {
925 IWL_ERR(fwrt, "Failed to write dram_info to HW_SMEM\n");
926 break;
927 }
928 }
929 break;
930 }
931 case IWL_FW_INI_CONFIG_SET_TYPE_PERIPH_SCRATCH_HWM: {
932 u32 debug_token_config =
933 le32_to_cpu(config_list->addr_val[0].value);
934
935 IWL_DEBUG_FW(fwrt, "WRT: Setting HWM debug token config: %u\n",
936 debug_token_config);
937 fwrt->trans->dbg.ucode_preset = debug_token_config;
938 break;
939 }
940 default:
941 break;
942 }
943 }
944}
945
946static void iwl_dbg_tlv_periodic_trig_handler(struct timer_list *t)
947{
948 struct iwl_dbg_tlv_timer_node *timer_node =
949 from_timer(timer_node, t, timer);
950 struct iwl_fwrt_dump_data dump_data = {
951 .trig = (void *)timer_node->tlv->data,
952 };
953 int ret;
954
955 ret = iwl_fw_dbg_ini_collect(timer_node->fwrt, &dump_data, false);
956 if (!ret || ret == -EBUSY) {
957 u32 occur = le32_to_cpu(dump_data.trig->occurrences);
958 u32 collect_interval = le32_to_cpu(dump_data.trig->data[0]);
959
960 if (!occur)
961 return;
962
963 mod_timer(t, jiffies + msecs_to_jiffies(collect_interval));
964 }
965}
966
967static void iwl_dbg_tlv_set_periodic_trigs(struct iwl_fw_runtime *fwrt)
968{
969 struct iwl_dbg_tlv_node *node;
970 struct list_head *trig_list =
971 &fwrt->trans->dbg.time_point[IWL_FW_INI_TIME_POINT_PERIODIC].active_trig_list;
972
973 list_for_each_entry(node, trig_list, list) {
974 struct iwl_fw_ini_trigger_tlv *trig = (void *)node->tlv.data;
975 struct iwl_dbg_tlv_timer_node *timer_node;
976 u32 occur = le32_to_cpu(trig->occurrences), collect_interval;
977 u32 min_interval = 100;
978
979 if (!occur)
980 continue;
981
982 /* make sure there is at least one dword of data for the
983 * interval value
984 */
985 if (le32_to_cpu(node->tlv.length) <
986 sizeof(*trig) + sizeof(__le32)) {
987 IWL_ERR(fwrt,
988 "WRT: Invalid periodic trigger data was not given\n");
989 continue;
990 }
991
992 if (le32_to_cpu(trig->data[0]) < min_interval) {
993 IWL_WARN(fwrt,
994 "WRT: Override min interval from %u to %u msec\n",
995 le32_to_cpu(trig->data[0]), min_interval);
996 trig->data[0] = cpu_to_le32(min_interval);
997 }
998
999 collect_interval = le32_to_cpu(trig->data[0]);
1000
1001 timer_node = kzalloc(sizeof(*timer_node), GFP_KERNEL);
1002 if (!timer_node) {
1003 IWL_ERR(fwrt,
1004 "WRT: Failed to allocate periodic trigger\n");
1005 continue;
1006 }
1007
1008 timer_node->fwrt = fwrt;
1009 timer_node->tlv = &node->tlv;
1010 timer_setup(&timer_node->timer,
1011 iwl_dbg_tlv_periodic_trig_handler, 0);
1012
1013 list_add_tail(&timer_node->list,
1014 &fwrt->trans->dbg.periodic_trig_list);
1015
1016 IWL_DEBUG_FW(fwrt, "WRT: Enabling periodic trigger\n");
1017
1018 mod_timer(&timer_node->timer,
1019 jiffies + msecs_to_jiffies(collect_interval));
1020 }
1021}
1022
1023static bool is_trig_data_contained(const struct iwl_ucode_tlv *new,
1024 const struct iwl_ucode_tlv *old)
1025{
1026 const struct iwl_fw_ini_trigger_tlv *new_trig = (const void *)new->data;
1027 const struct iwl_fw_ini_trigger_tlv *old_trig = (const void *)old->data;
1028 const __le32 *new_data = new_trig->data, *old_data = old_trig->data;
1029 u32 new_dwords_num = iwl_tlv_array_len(new, new_trig, data);
1030 u32 old_dwords_num = iwl_tlv_array_len(old, old_trig, data);
1031 int i, j;
1032
1033 for (i = 0; i < new_dwords_num; i++) {
1034 bool match = false;
1035
1036 for (j = 0; j < old_dwords_num; j++) {
1037 if (new_data[i] == old_data[j]) {
1038 match = true;
1039 break;
1040 }
1041 }
1042 if (!match)
1043 return false;
1044 }
1045
1046 return true;
1047}
1048
1049static int iwl_dbg_tlv_override_trig_node(struct iwl_fw_runtime *fwrt,
1050 struct iwl_ucode_tlv *trig_tlv,
1051 struct iwl_dbg_tlv_node *node)
1052{
1053 struct iwl_ucode_tlv *node_tlv = &node->tlv;
1054 struct iwl_fw_ini_trigger_tlv *node_trig = (void *)node_tlv->data;
1055 struct iwl_fw_ini_trigger_tlv *trig = (void *)trig_tlv->data;
1056 u32 policy = le32_to_cpu(trig->apply_policy);
1057 u32 size = le32_to_cpu(trig_tlv->length);
1058 u32 trig_data_len = size - sizeof(*trig);
1059 u32 offset = 0;
1060
1061 if (!(policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA)) {
1062 u32 data_len = le32_to_cpu(node_tlv->length) -
1063 sizeof(*node_trig);
1064
1065 IWL_DEBUG_FW(fwrt,
1066 "WRT: Appending trigger data (time point %u)\n",
1067 le32_to_cpu(trig->time_point));
1068
1069 offset += data_len;
1070 size += data_len;
1071 } else {
1072 IWL_DEBUG_FW(fwrt,
1073 "WRT: Overriding trigger data (time point %u)\n",
1074 le32_to_cpu(trig->time_point));
1075 }
1076
1077 if (size != le32_to_cpu(node_tlv->length)) {
1078 struct list_head *prev = node->list.prev;
1079 struct iwl_dbg_tlv_node *tmp;
1080
1081 list_del(&node->list);
1082
1083 tmp = krealloc(node, sizeof(*node) + size, GFP_KERNEL);
1084 if (!tmp) {
1085 IWL_WARN(fwrt,
1086 "WRT: No memory to override trigger (time point %u)\n",
1087 le32_to_cpu(trig->time_point));
1088
1089 list_add(&node->list, prev);
1090
1091 return -ENOMEM;
1092 }
1093
1094 list_add(&tmp->list, prev);
1095 node_tlv = &tmp->tlv;
1096 node_trig = (void *)node_tlv->data;
1097 }
1098
1099 memcpy((u8 *)node_trig->data + offset, trig->data, trig_data_len);
1100 node_tlv->length = cpu_to_le32(size);
1101
1102 if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG) {
1103 IWL_DEBUG_FW(fwrt,
1104 "WRT: Overriding trigger configuration (time point %u)\n",
1105 le32_to_cpu(trig->time_point));
1106
1107 /* the first 11 dwords are configuration related */
1108 memcpy(node_trig, trig, sizeof(__le32) * 11);
1109 }
1110
1111 if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS) {
1112 IWL_DEBUG_FW(fwrt,
1113 "WRT: Overriding trigger regions (time point %u)\n",
1114 le32_to_cpu(trig->time_point));
1115
1116 node_trig->regions_mask = trig->regions_mask;
1117 } else {
1118 IWL_DEBUG_FW(fwrt,
1119 "WRT: Appending trigger regions (time point %u)\n",
1120 le32_to_cpu(trig->time_point));
1121
1122 node_trig->regions_mask |= trig->regions_mask;
1123 }
1124
1125 return 0;
1126}
1127
1128static int
1129iwl_dbg_tlv_add_active_trigger(struct iwl_fw_runtime *fwrt,
1130 struct list_head *trig_list,
1131 struct iwl_ucode_tlv *trig_tlv)
1132{
1133 struct iwl_fw_ini_trigger_tlv *trig = (void *)trig_tlv->data;
1134 struct iwl_dbg_tlv_node *node, *match = NULL;
1135 u32 policy = le32_to_cpu(trig->apply_policy);
1136
1137 list_for_each_entry(node, trig_list, list) {
1138 if (!(policy & IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT))
1139 break;
1140
1141 if (!(policy & IWL_FW_INI_APPLY_POLICY_MATCH_DATA) ||
1142 is_trig_data_contained(trig_tlv, &node->tlv)) {
1143 match = node;
1144 break;
1145 }
1146 }
1147
1148 if (!match) {
1149 IWL_DEBUG_FW(fwrt, "WRT: Enabling trigger (time point %u)\n",
1150 le32_to_cpu(trig->time_point));
1151 return iwl_dbg_tlv_add(trig_tlv, trig_list);
1152 }
1153
1154 return iwl_dbg_tlv_override_trig_node(fwrt, trig_tlv, match);
1155}
1156
1157static void
1158iwl_dbg_tlv_gen_active_trig_list(struct iwl_fw_runtime *fwrt,
1159 struct iwl_dbg_tlv_time_point_data *tp)
1160{
1161 struct iwl_dbg_tlv_node *node;
1162 struct list_head *trig_list = &tp->trig_list;
1163 struct list_head *active_trig_list = &tp->active_trig_list;
1164
1165 list_for_each_entry(node, trig_list, list) {
1166 struct iwl_ucode_tlv *tlv = &node->tlv;
1167
1168 iwl_dbg_tlv_add_active_trigger(fwrt, active_trig_list, tlv);
1169 }
1170}
1171
1172static bool iwl_dbg_tlv_check_fw_pkt(struct iwl_fw_runtime *fwrt,
1173 struct iwl_fwrt_dump_data *dump_data,
1174 union iwl_dbg_tlv_tp_data *tp_data,
1175 u32 trig_data)
1176{
1177 struct iwl_rx_packet *pkt = tp_data->fw_pkt;
1178 struct iwl_cmd_header *wanted_hdr = (void *)&trig_data;
1179
1180 if (pkt && (pkt->hdr.cmd == wanted_hdr->cmd &&
1181 pkt->hdr.group_id == wanted_hdr->group_id)) {
1182 struct iwl_rx_packet *fw_pkt =
1183 kmemdup(pkt,
1184 sizeof(*pkt) + iwl_rx_packet_payload_len(pkt),
1185 GFP_ATOMIC);
1186
1187 if (!fw_pkt)
1188 return false;
1189
1190 dump_data->fw_pkt = fw_pkt;
1191
1192 return true;
1193 }
1194
1195 return false;
1196}
1197
1198static int
1199iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync,
1200 struct list_head *active_trig_list,
1201 union iwl_dbg_tlv_tp_data *tp_data,
1202 bool (*data_check)(struct iwl_fw_runtime *fwrt,
1203 struct iwl_fwrt_dump_data *dump_data,
1204 union iwl_dbg_tlv_tp_data *tp_data,
1205 u32 trig_data))
1206{
1207 struct iwl_dbg_tlv_node *node;
1208
1209 list_for_each_entry(node, active_trig_list, list) {
1210 struct iwl_fwrt_dump_data dump_data = {
1211 .trig = (void *)node->tlv.data,
1212 };
1213 u32 num_data = iwl_tlv_array_len(&node->tlv, dump_data.trig,
1214 data);
1215 int ret, i;
1216 u32 tp = le32_to_cpu(dump_data.trig->time_point);
1217
1218
1219 if (!num_data) {
1220 ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data, sync);
1221 if (ret)
1222 return ret;
1223 }
1224
1225 for (i = 0; i < num_data; i++) {
1226 if (!data_check ||
1227 data_check(fwrt, &dump_data, tp_data,
1228 le32_to_cpu(dump_data.trig->data[i]))) {
1229 ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data, sync);
1230 if (ret)
1231 return ret;
1232
1233 break;
1234 }
1235 }
1236
1237 fwrt->trans->dbg.restart_required = FALSE;
1238 IWL_DEBUG_FW(fwrt, "WRT: tp %d, reset_fw %d\n",
1239 tp, dump_data.trig->reset_fw);
1240 IWL_DEBUG_FW(fwrt,
1241 "WRT: restart_required %d, last_tp_resetfw %d\n",
1242 fwrt->trans->dbg.restart_required,
1243 fwrt->trans->dbg.last_tp_resetfw);
1244
1245 if (fwrt->trans->trans_cfg->device_family ==
1246 IWL_DEVICE_FAMILY_9000) {
1247 fwrt->trans->dbg.restart_required = TRUE;
1248 } else if (tp == IWL_FW_INI_TIME_POINT_FW_ASSERT &&
1249 fwrt->trans->dbg.last_tp_resetfw ==
1250 IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) {
1251 fwrt->trans->dbg.restart_required = FALSE;
1252 fwrt->trans->dbg.last_tp_resetfw = 0xFF;
1253 IWL_DEBUG_FW(fwrt, "WRT: FW_ASSERT due to reset_fw_mode-no restart\n");
1254 } else if (le32_to_cpu(dump_data.trig->reset_fw) ==
1255 IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW) {
1256 IWL_DEBUG_FW(fwrt, "WRT: stop and reload firmware\n");
1257 fwrt->trans->dbg.restart_required = TRUE;
1258 } else if (le32_to_cpu(dump_data.trig->reset_fw) ==
1259 IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) {
1260 IWL_DEBUG_FW(fwrt,
1261 "WRT: stop only and no reload firmware\n");
1262 fwrt->trans->dbg.restart_required = FALSE;
1263 fwrt->trans->dbg.last_tp_resetfw =
1264 le32_to_cpu(dump_data.trig->reset_fw);
1265 } else if (le32_to_cpu(dump_data.trig->reset_fw) ==
1266 IWL_FW_INI_RESET_FW_MODE_NOTHING) {
1267 IWL_DEBUG_FW(fwrt,
1268 "WRT: nothing need to be done after debug collection\n");
1269 } else {
1270 IWL_ERR(fwrt, "WRT: wrong resetfw %d\n",
1271 le32_to_cpu(dump_data.trig->reset_fw));
1272 }
1273 }
1274 return 0;
1275}
1276
1277void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt)
1278{
1279 enum iwl_fw_ini_buffer_location *ini_dest = &fwrt->trans->dbg.ini_dest;
1280 int ret, i;
1281 u32 failed_alloc = 0;
1282
1283 if (*ini_dest == IWL_FW_INI_LOCATION_INVALID) {
1284 IWL_DEBUG_FW(fwrt,
1285 "WRT: Generating active triggers list, domain 0x%x\n",
1286 fwrt->trans->dbg.domains_bitmap);
1287
1288 for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.time_point); i++) {
1289 struct iwl_dbg_tlv_time_point_data *tp =
1290 &fwrt->trans->dbg.time_point[i];
1291
1292 iwl_dbg_tlv_gen_active_trig_list(fwrt, tp);
1293 }
1294 } else if (*ini_dest != IWL_FW_INI_LOCATION_DRAM_PATH) {
1295 /* For DRAM, go through the loop below to clear all the buffers
1296 * properly on restart, otherwise garbage may be left there and
1297 * leak into new debug dumps.
1298 */
1299 return;
1300 }
1301
1302 *ini_dest = IWL_FW_INI_LOCATION_INVALID;
1303 for (i = 0; i < IWL_FW_INI_ALLOCATION_NUM; i++) {
1304 struct iwl_fw_ini_allocation_tlv *fw_mon_cfg =
1305 &fwrt->trans->dbg.fw_mon_cfg[i];
1306 u32 dest = le32_to_cpu(fw_mon_cfg->buf_location);
1307
1308 if (dest == IWL_FW_INI_LOCATION_INVALID) {
1309 failed_alloc |= BIT(i);
1310 continue;
1311 }
1312
1313 if (*ini_dest == IWL_FW_INI_LOCATION_INVALID)
1314 *ini_dest = dest;
1315
1316 if (dest != *ini_dest)
1317 continue;
1318
1319 ret = iwl_dbg_tlv_alloc_fragments(fwrt, i);
1320
1321 if (ret) {
1322 IWL_WARN(fwrt,
1323 "WRT: Failed to allocate DRAM buffer for allocation id %d, ret=%d\n",
1324 i, ret);
1325 failed_alloc |= BIT(i);
1326 }
1327 }
1328
1329 if (!failed_alloc)
1330 return;
1331
1332 for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.active_regions) && failed_alloc; i++) {
1333 struct iwl_fw_ini_region_tlv *reg;
1334 struct iwl_ucode_tlv **active_reg =
1335 &fwrt->trans->dbg.active_regions[i];
1336 u32 reg_type;
1337
1338 if (!*active_reg) {
1339 fwrt->trans->dbg.unsupported_region_msk |= BIT(i);
1340 continue;
1341 }
1342
1343 reg = (void *)(*active_reg)->data;
1344 reg_type = reg->type;
1345
1346 if (reg_type != IWL_FW_INI_REGION_DRAM_BUFFER ||
1347 !(BIT(le32_to_cpu(reg->dram_alloc_id)) & failed_alloc))
1348 continue;
1349
1350 IWL_DEBUG_FW(fwrt,
1351 "WRT: removing allocation id %d from region id %d\n",
1352 le32_to_cpu(reg->dram_alloc_id), i);
1353
1354 failed_alloc &= ~BIT(le32_to_cpu(reg->dram_alloc_id));
1355 fwrt->trans->dbg.unsupported_region_msk |= BIT(i);
1356
1357 kfree(*active_reg);
1358 *active_reg = NULL;
1359 }
1360}
1361
1362void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
1363 enum iwl_fw_ini_time_point tp_id,
1364 union iwl_dbg_tlv_tp_data *tp_data,
1365 bool sync)
1366{
1367 struct list_head *hcmd_list, *trig_list, *conf_list;
1368
1369 if (!iwl_trans_dbg_ini_valid(fwrt->trans) ||
1370 tp_id == IWL_FW_INI_TIME_POINT_INVALID ||
1371 tp_id >= IWL_FW_INI_TIME_POINT_NUM)
1372 return;
1373
1374 hcmd_list = &fwrt->trans->dbg.time_point[tp_id].hcmd_list;
1375 trig_list = &fwrt->trans->dbg.time_point[tp_id].active_trig_list;
1376 conf_list = &fwrt->trans->dbg.time_point[tp_id].config_list;
1377
1378 switch (tp_id) {
1379 case IWL_FW_INI_TIME_POINT_EARLY:
1380 iwl_dbg_tlv_init_cfg(fwrt);
1381 iwl_dbg_tlv_apply_config(fwrt, conf_list);
1382 iwl_dbg_tlv_update_drams(fwrt);
1383 iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL);
1384 break;
1385 case IWL_FW_INI_TIME_POINT_AFTER_ALIVE:
1386 iwl_dbg_tlv_apply_buffers(fwrt);
1387 iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
1388 iwl_dbg_tlv_apply_config(fwrt, conf_list);
1389 iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL);
1390 break;
1391 case IWL_FW_INI_TIME_POINT_PERIODIC:
1392 iwl_dbg_tlv_set_periodic_trigs(fwrt);
1393 iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
1394 break;
1395 case IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF:
1396 case IWL_FW_INI_TIME_POINT_MISSED_BEACONS:
1397 case IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION:
1398 iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
1399 iwl_dbg_tlv_apply_config(fwrt, conf_list);
1400 iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data,
1401 iwl_dbg_tlv_check_fw_pkt);
1402 break;
1403 default:
1404 iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
1405 iwl_dbg_tlv_apply_config(fwrt, conf_list);
1406 iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL);
1407 break;
1408 }
1409}
1410IWL_EXPORT_SYMBOL(_iwl_dbg_tlv_time_point);
1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Copyright (C) 2018-2022 Intel Corporation
4 */
5#include <linux/firmware.h>
6#include "iwl-drv.h"
7#include "iwl-trans.h"
8#include "iwl-dbg-tlv.h"
9#include "fw/dbg.h"
10#include "fw/runtime.h"
11
12/**
13 * enum iwl_dbg_tlv_type - debug TLV types
14 * @IWL_DBG_TLV_TYPE_DEBUG_INFO: debug info TLV
15 * @IWL_DBG_TLV_TYPE_BUF_ALLOC: buffer allocation TLV
16 * @IWL_DBG_TLV_TYPE_HCMD: host command TLV
17 * @IWL_DBG_TLV_TYPE_REGION: region TLV
18 * @IWL_DBG_TLV_TYPE_TRIGGER: trigger TLV
19 * @IWL_DBG_TLV_TYPE_CONF_SET: conf set TLV
20 * @IWL_DBG_TLV_TYPE_NUM: number of debug TLVs
21 */
22enum iwl_dbg_tlv_type {
23 IWL_DBG_TLV_TYPE_DEBUG_INFO =
24 IWL_UCODE_TLV_TYPE_DEBUG_INFO - IWL_UCODE_TLV_DEBUG_BASE,
25 IWL_DBG_TLV_TYPE_BUF_ALLOC,
26 IWL_DBG_TLV_TYPE_HCMD,
27 IWL_DBG_TLV_TYPE_REGION,
28 IWL_DBG_TLV_TYPE_TRIGGER,
29 IWL_DBG_TLV_TYPE_CONF_SET,
30 IWL_DBG_TLV_TYPE_NUM,
31};
32
33/**
34 * struct iwl_dbg_tlv_ver_data - debug TLV version struct
35 * @min_ver: min version supported
36 * @max_ver: max version supported
37 */
38struct iwl_dbg_tlv_ver_data {
39 int min_ver;
40 int max_ver;
41};
42
43/**
44 * struct iwl_dbg_tlv_timer_node - timer node struct
45 * @list: list of &struct iwl_dbg_tlv_timer_node
46 * @timer: timer
47 * @fwrt: &struct iwl_fw_runtime
48 * @tlv: TLV attach to the timer node
49 */
50struct iwl_dbg_tlv_timer_node {
51 struct list_head list;
52 struct timer_list timer;
53 struct iwl_fw_runtime *fwrt;
54 struct iwl_ucode_tlv *tlv;
55};
56
57static const struct iwl_dbg_tlv_ver_data
58dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = {
59 [IWL_DBG_TLV_TYPE_DEBUG_INFO] = {.min_ver = 1, .max_ver = 1,},
60 [IWL_DBG_TLV_TYPE_BUF_ALLOC] = {.min_ver = 1, .max_ver = 1,},
61 [IWL_DBG_TLV_TYPE_HCMD] = {.min_ver = 1, .max_ver = 1,},
62 [IWL_DBG_TLV_TYPE_REGION] = {.min_ver = 1, .max_ver = 3,},
63 [IWL_DBG_TLV_TYPE_TRIGGER] = {.min_ver = 1, .max_ver = 1,},
64 [IWL_DBG_TLV_TYPE_CONF_SET] = {.min_ver = 1, .max_ver = 1,},
65};
66
67static int iwl_dbg_tlv_add(const struct iwl_ucode_tlv *tlv,
68 struct list_head *list)
69{
70 u32 len = le32_to_cpu(tlv->length);
71 struct iwl_dbg_tlv_node *node;
72
73 node = kzalloc(sizeof(*node) + len, GFP_KERNEL);
74 if (!node)
75 return -ENOMEM;
76
77 memcpy(&node->tlv, tlv, sizeof(node->tlv));
78 memcpy(node->tlv.data, tlv->data, len);
79 list_add_tail(&node->list, list);
80
81 return 0;
82}
83
84static bool iwl_dbg_tlv_ver_support(const struct iwl_ucode_tlv *tlv)
85{
86 const struct iwl_fw_ini_header *hdr = (const void *)&tlv->data[0];
87 u32 type = le32_to_cpu(tlv->type);
88 u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
89 u32 ver = le32_to_cpu(hdr->version);
90
91 if (ver < dbg_ver_table[tlv_idx].min_ver ||
92 ver > dbg_ver_table[tlv_idx].max_ver)
93 return false;
94
95 return true;
96}
97
98static int iwl_dbg_tlv_alloc_debug_info(struct iwl_trans *trans,
99 const struct iwl_ucode_tlv *tlv)
100{
101 const struct iwl_fw_ini_debug_info_tlv *debug_info = (const void *)tlv->data;
102
103 if (le32_to_cpu(tlv->length) != sizeof(*debug_info))
104 return -EINVAL;
105
106 IWL_DEBUG_FW(trans, "WRT: Loading debug cfg: %s\n",
107 debug_info->debug_cfg_name);
108
109 return iwl_dbg_tlv_add(tlv, &trans->dbg.debug_info_tlv_list);
110}
111
112static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans,
113 const struct iwl_ucode_tlv *tlv)
114{
115 const struct iwl_fw_ini_allocation_tlv *alloc = (const void *)tlv->data;
116 u32 buf_location;
117 u32 alloc_id;
118
119 if (le32_to_cpu(tlv->length) != sizeof(*alloc))
120 return -EINVAL;
121
122 buf_location = le32_to_cpu(alloc->buf_location);
123 alloc_id = le32_to_cpu(alloc->alloc_id);
124
125 if (buf_location == IWL_FW_INI_LOCATION_INVALID ||
126 buf_location >= IWL_FW_INI_LOCATION_NUM)
127 goto err;
128
129 if (alloc_id == IWL_FW_INI_ALLOCATION_INVALID ||
130 alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
131 goto err;
132
133 if (buf_location == IWL_FW_INI_LOCATION_NPK_PATH &&
134 alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
135 goto err;
136
137 if (buf_location == IWL_FW_INI_LOCATION_SRAM_PATH &&
138 alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
139 goto err;
140
141 trans->dbg.fw_mon_cfg[alloc_id] = *alloc;
142
143 return 0;
144err:
145 IWL_ERR(trans,
146 "WRT: Invalid allocation id %u and/or location id %u for allocation TLV\n",
147 alloc_id, buf_location);
148 return -EINVAL;
149}
150
151static int iwl_dbg_tlv_alloc_hcmd(struct iwl_trans *trans,
152 const struct iwl_ucode_tlv *tlv)
153{
154 const struct iwl_fw_ini_hcmd_tlv *hcmd = (const void *)tlv->data;
155 u32 tp = le32_to_cpu(hcmd->time_point);
156
157 if (le32_to_cpu(tlv->length) <= sizeof(*hcmd))
158 return -EINVAL;
159
160 /* Host commands can not be sent in early time point since the FW
161 * is not ready
162 */
163 if (tp == IWL_FW_INI_TIME_POINT_INVALID ||
164 tp >= IWL_FW_INI_TIME_POINT_NUM ||
165 tp == IWL_FW_INI_TIME_POINT_EARLY) {
166 IWL_ERR(trans,
167 "WRT: Invalid time point %u for host command TLV\n",
168 tp);
169 return -EINVAL;
170 }
171
172 return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].hcmd_list);
173}
174
175static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
176 const struct iwl_ucode_tlv *tlv)
177{
178 const struct iwl_fw_ini_region_tlv *reg = (const void *)tlv->data;
179 struct iwl_ucode_tlv **active_reg;
180 u32 id = le32_to_cpu(reg->id);
181 u8 type = reg->type;
182 u32 tlv_len = sizeof(*tlv) + le32_to_cpu(tlv->length);
183
184 /*
185 * The higher part of the ID from version 2 is debug policy.
186 * The id will be only lsb 16 bits, so mask it out.
187 */
188 if (le32_to_cpu(reg->hdr.version) >= 2)
189 id &= IWL_FW_INI_REGION_ID_MASK;
190
191 if (le32_to_cpu(tlv->length) < sizeof(*reg))
192 return -EINVAL;
193
194 /* for safe use of a string from FW, limit it to IWL_FW_INI_MAX_NAME */
195 IWL_DEBUG_FW(trans, "WRT: parsing region: %.*s\n",
196 IWL_FW_INI_MAX_NAME, reg->name);
197
198 if (id >= IWL_FW_INI_MAX_REGION_ID) {
199 IWL_ERR(trans, "WRT: Invalid region id %u\n", id);
200 return -EINVAL;
201 }
202
203 if (type <= IWL_FW_INI_REGION_INVALID ||
204 type >= IWL_FW_INI_REGION_NUM) {
205 IWL_ERR(trans, "WRT: Invalid region type %u\n", type);
206 return -EINVAL;
207 }
208
209 if (type == IWL_FW_INI_REGION_PCI_IOSF_CONFIG &&
210 !trans->ops->read_config32) {
211 IWL_ERR(trans, "WRT: Unsupported region type %u\n", type);
212 return -EOPNOTSUPP;
213 }
214
215 if (type == IWL_FW_INI_REGION_INTERNAL_BUFFER) {
216 trans->dbg.imr_data.sram_addr =
217 le32_to_cpu(reg->internal_buffer.base_addr);
218 trans->dbg.imr_data.sram_size =
219 le32_to_cpu(reg->internal_buffer.size);
220 }
221
222
223 active_reg = &trans->dbg.active_regions[id];
224 if (*active_reg) {
225 IWL_WARN(trans, "WRT: Overriding region id %u\n", id);
226
227 kfree(*active_reg);
228 }
229
230 *active_reg = kmemdup(tlv, tlv_len, GFP_KERNEL);
231 if (!*active_reg)
232 return -ENOMEM;
233
234 IWL_DEBUG_FW(trans, "WRT: Enabling region id %u type %u\n", id, type);
235
236 return 0;
237}
238
239static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
240 const struct iwl_ucode_tlv *tlv)
241{
242 const struct iwl_fw_ini_trigger_tlv *trig = (const void *)tlv->data;
243 struct iwl_fw_ini_trigger_tlv *dup_trig;
244 u32 tp = le32_to_cpu(trig->time_point);
245 u32 rf = le32_to_cpu(trig->reset_fw);
246 struct iwl_ucode_tlv *dup = NULL;
247 int ret;
248
249 if (le32_to_cpu(tlv->length) < sizeof(*trig))
250 return -EINVAL;
251
252 if (tp <= IWL_FW_INI_TIME_POINT_INVALID ||
253 tp >= IWL_FW_INI_TIME_POINT_NUM) {
254 IWL_ERR(trans,
255 "WRT: Invalid time point %u for trigger TLV\n",
256 tp);
257 return -EINVAL;
258 }
259
260 IWL_DEBUG_FW(trans,
261 "WRT: time point %u for trigger TLV with reset_fw %u\n",
262 tp, rf);
263 trans->dbg.last_tp_resetfw = 0xFF;
264 if (!le32_to_cpu(trig->occurrences)) {
265 dup = kmemdup(tlv, sizeof(*tlv) + le32_to_cpu(tlv->length),
266 GFP_KERNEL);
267 if (!dup)
268 return -ENOMEM;
269 dup_trig = (void *)dup->data;
270 dup_trig->occurrences = cpu_to_le32(-1);
271 tlv = dup;
272 }
273
274 ret = iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list);
275 kfree(dup);
276
277 return ret;
278}
279
280static int iwl_dbg_tlv_config_set(struct iwl_trans *trans,
281 const struct iwl_ucode_tlv *tlv)
282{
283 const struct iwl_fw_ini_conf_set_tlv *conf_set = (const void *)tlv->data;
284 u32 tp = le32_to_cpu(conf_set->time_point);
285 u32 type = le32_to_cpu(conf_set->set_type);
286
287 if (tp <= IWL_FW_INI_TIME_POINT_INVALID ||
288 tp >= IWL_FW_INI_TIME_POINT_NUM) {
289 IWL_DEBUG_FW(trans,
290 "WRT: Invalid time point %u for config set TLV\n", tp);
291 return -EINVAL;
292 }
293
294 if (type <= IWL_FW_INI_CONFIG_SET_TYPE_INVALID ||
295 type >= IWL_FW_INI_CONFIG_SET_TYPE_MAX_NUM) {
296 IWL_DEBUG_FW(trans,
297 "WRT: Invalid config set type %u for config set TLV\n", type);
298 return -EINVAL;
299 }
300
301 return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].config_list);
302}
303
304static int (*dbg_tlv_alloc[])(struct iwl_trans *trans,
305 const struct iwl_ucode_tlv *tlv) = {
306 [IWL_DBG_TLV_TYPE_DEBUG_INFO] = iwl_dbg_tlv_alloc_debug_info,
307 [IWL_DBG_TLV_TYPE_BUF_ALLOC] = iwl_dbg_tlv_alloc_buf_alloc,
308 [IWL_DBG_TLV_TYPE_HCMD] = iwl_dbg_tlv_alloc_hcmd,
309 [IWL_DBG_TLV_TYPE_REGION] = iwl_dbg_tlv_alloc_region,
310 [IWL_DBG_TLV_TYPE_TRIGGER] = iwl_dbg_tlv_alloc_trigger,
311 [IWL_DBG_TLV_TYPE_CONF_SET] = iwl_dbg_tlv_config_set,
312};
313
314void iwl_dbg_tlv_alloc(struct iwl_trans *trans, const struct iwl_ucode_tlv *tlv,
315 bool ext)
316{
317 enum iwl_ini_cfg_state *cfg_state = ext ?
318 &trans->dbg.external_ini_cfg : &trans->dbg.internal_ini_cfg;
319 const struct iwl_fw_ini_header *hdr = (const void *)&tlv->data[0];
320 u32 type;
321 u32 tlv_idx;
322 u32 domain;
323 int ret;
324
325 if (le32_to_cpu(tlv->length) < sizeof(*hdr))
326 return;
327
328 type = le32_to_cpu(tlv->type);
329 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
330 domain = le32_to_cpu(hdr->domain);
331
332 if (domain != IWL_FW_INI_DOMAIN_ALWAYS_ON &&
333 !(domain & trans->dbg.domains_bitmap)) {
334 IWL_DEBUG_FW(trans,
335 "WRT: Skipping TLV with disabled domain 0x%0x (0x%0x)\n",
336 domain, trans->dbg.domains_bitmap);
337 return;
338 }
339
340 if (tlv_idx >= ARRAY_SIZE(dbg_tlv_alloc) || !dbg_tlv_alloc[tlv_idx]) {
341 IWL_ERR(trans, "WRT: Unsupported TLV type 0x%x\n", type);
342 goto out_err;
343 }
344
345 if (!iwl_dbg_tlv_ver_support(tlv)) {
346 IWL_ERR(trans, "WRT: Unsupported TLV 0x%x version %u\n", type,
347 le32_to_cpu(hdr->version));
348 goto out_err;
349 }
350
351 ret = dbg_tlv_alloc[tlv_idx](trans, tlv);
352 if (ret) {
353 IWL_ERR(trans,
354 "WRT: Failed to allocate TLV 0x%x, ret %d, (ext=%d)\n",
355 type, ret, ext);
356 goto out_err;
357 }
358
359 if (*cfg_state == IWL_INI_CFG_STATE_NOT_LOADED)
360 *cfg_state = IWL_INI_CFG_STATE_LOADED;
361
362 return;
363
364out_err:
365 *cfg_state = IWL_INI_CFG_STATE_CORRUPTED;
366}
367
368void iwl_dbg_tlv_del_timers(struct iwl_trans *trans)
369{
370 struct list_head *timer_list = &trans->dbg.periodic_trig_list;
371 struct iwl_dbg_tlv_timer_node *node, *tmp;
372
373 list_for_each_entry_safe(node, tmp, timer_list, list) {
374 timer_shutdown_sync(&node->timer);
375 list_del(&node->list);
376 kfree(node);
377 }
378}
379IWL_EXPORT_SYMBOL(iwl_dbg_tlv_del_timers);
380
381static void iwl_dbg_tlv_fragments_free(struct iwl_trans *trans,
382 enum iwl_fw_ini_allocation_id alloc_id)
383{
384 struct iwl_fw_mon *fw_mon;
385 int i;
386
387 if (alloc_id <= IWL_FW_INI_ALLOCATION_INVALID ||
388 alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
389 return;
390
391 fw_mon = &trans->dbg.fw_mon_ini[alloc_id];
392
393 for (i = 0; i < fw_mon->num_frags; i++) {
394 struct iwl_dram_data *frag = &fw_mon->frags[i];
395
396 dma_free_coherent(trans->dev, frag->size, frag->block,
397 frag->physical);
398
399 frag->physical = 0;
400 frag->block = NULL;
401 frag->size = 0;
402 }
403
404 kfree(fw_mon->frags);
405 fw_mon->frags = NULL;
406 fw_mon->num_frags = 0;
407}
408
409void iwl_dbg_tlv_free(struct iwl_trans *trans)
410{
411 struct iwl_dbg_tlv_node *tlv_node, *tlv_node_tmp;
412 int i;
413
414 iwl_dbg_tlv_del_timers(trans);
415
416 for (i = 0; i < ARRAY_SIZE(trans->dbg.active_regions); i++) {
417 struct iwl_ucode_tlv **active_reg =
418 &trans->dbg.active_regions[i];
419
420 kfree(*active_reg);
421 *active_reg = NULL;
422 }
423
424 list_for_each_entry_safe(tlv_node, tlv_node_tmp,
425 &trans->dbg.debug_info_tlv_list, list) {
426 list_del(&tlv_node->list);
427 kfree(tlv_node);
428 }
429
430 for (i = 0; i < ARRAY_SIZE(trans->dbg.time_point); i++) {
431 struct iwl_dbg_tlv_time_point_data *tp =
432 &trans->dbg.time_point[i];
433
434 list_for_each_entry_safe(tlv_node, tlv_node_tmp, &tp->trig_list,
435 list) {
436 list_del(&tlv_node->list);
437 kfree(tlv_node);
438 }
439
440 list_for_each_entry_safe(tlv_node, tlv_node_tmp, &tp->hcmd_list,
441 list) {
442 list_del(&tlv_node->list);
443 kfree(tlv_node);
444 }
445
446 list_for_each_entry_safe(tlv_node, tlv_node_tmp,
447 &tp->active_trig_list, list) {
448 list_del(&tlv_node->list);
449 kfree(tlv_node);
450 }
451
452 list_for_each_entry_safe(tlv_node, tlv_node_tmp,
453 &tp->config_list, list) {
454 list_del(&tlv_node->list);
455 kfree(tlv_node);
456 }
457
458 }
459
460 for (i = 0; i < ARRAY_SIZE(trans->dbg.fw_mon_ini); i++)
461 iwl_dbg_tlv_fragments_free(trans, i);
462}
463
464static int iwl_dbg_tlv_parse_bin(struct iwl_trans *trans, const u8 *data,
465 size_t len)
466{
467 const struct iwl_ucode_tlv *tlv;
468 u32 tlv_len;
469
470 while (len >= sizeof(*tlv)) {
471 len -= sizeof(*tlv);
472 tlv = (const void *)data;
473
474 tlv_len = le32_to_cpu(tlv->length);
475
476 if (len < tlv_len) {
477 IWL_ERR(trans, "invalid TLV len: %zd/%u\n",
478 len, tlv_len);
479 return -EINVAL;
480 }
481 len -= ALIGN(tlv_len, 4);
482 data += sizeof(*tlv) + ALIGN(tlv_len, 4);
483
484 iwl_dbg_tlv_alloc(trans, tlv, true);
485 }
486
487 return 0;
488}
489
490void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans)
491{
492 const struct firmware *fw;
493 const char *yoyo_bin = "iwl-debug-yoyo.bin";
494 int res;
495
496 if (!iwlwifi_mod_params.enable_ini ||
497 trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000)
498 return;
499
500 res = firmware_request_nowarn(&fw, yoyo_bin, dev);
501 IWL_DEBUG_FW(trans, "%s %s\n", res ? "didn't load" : "loaded", yoyo_bin);
502
503 if (res)
504 return;
505
506 iwl_dbg_tlv_parse_bin(trans, fw->data, fw->size);
507
508 release_firmware(fw);
509}
510
511void iwl_dbg_tlv_init(struct iwl_trans *trans)
512{
513 int i;
514
515 INIT_LIST_HEAD(&trans->dbg.debug_info_tlv_list);
516 INIT_LIST_HEAD(&trans->dbg.periodic_trig_list);
517
518 for (i = 0; i < ARRAY_SIZE(trans->dbg.time_point); i++) {
519 struct iwl_dbg_tlv_time_point_data *tp =
520 &trans->dbg.time_point[i];
521
522 INIT_LIST_HEAD(&tp->trig_list);
523 INIT_LIST_HEAD(&tp->hcmd_list);
524 INIT_LIST_HEAD(&tp->active_trig_list);
525 INIT_LIST_HEAD(&tp->config_list);
526 }
527}
528
529static int iwl_dbg_tlv_alloc_fragment(struct iwl_fw_runtime *fwrt,
530 struct iwl_dram_data *frag, u32 pages)
531{
532 void *block = NULL;
533 dma_addr_t physical;
534
535 if (!frag || frag->size || !pages)
536 return -EIO;
537
538 /*
539 * We try to allocate as many pages as we can, starting with
540 * the requested amount and going down until we can allocate
541 * something. Because of DIV_ROUND_UP(), pages will never go
542 * down to 0 and stop the loop, so stop when pages reaches 1,
543 * which is too small anyway.
544 */
545 while (pages > 1) {
546 block = dma_alloc_coherent(fwrt->dev, pages * PAGE_SIZE,
547 &physical,
548 GFP_KERNEL | __GFP_NOWARN);
549 if (block)
550 break;
551
552 IWL_WARN(fwrt, "WRT: Failed to allocate fragment size %lu\n",
553 pages * PAGE_SIZE);
554
555 pages = DIV_ROUND_UP(pages, 2);
556 }
557
558 if (!block)
559 return -ENOMEM;
560
561 frag->physical = physical;
562 frag->block = block;
563 frag->size = pages * PAGE_SIZE;
564
565 return pages;
566}
567
568static int iwl_dbg_tlv_alloc_fragments(struct iwl_fw_runtime *fwrt,
569 enum iwl_fw_ini_allocation_id alloc_id)
570{
571 struct iwl_fw_mon *fw_mon;
572 struct iwl_fw_ini_allocation_tlv *fw_mon_cfg;
573 u32 num_frags, remain_pages, frag_pages;
574 int i;
575
576 if (alloc_id < IWL_FW_INI_ALLOCATION_INVALID ||
577 alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
578 return -EIO;
579
580 fw_mon_cfg = &fwrt->trans->dbg.fw_mon_cfg[alloc_id];
581 fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
582
583 if (fw_mon->num_frags ||
584 fw_mon_cfg->buf_location !=
585 cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH))
586 return 0;
587
588 num_frags = le32_to_cpu(fw_mon_cfg->max_frags_num);
589 if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
590 if (alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
591 return -EIO;
592 num_frags = 1;
593 } else if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ &&
594 alloc_id > IWL_FW_INI_ALLOCATION_ID_DBGC3) {
595 return -EIO;
596 }
597
598 remain_pages = DIV_ROUND_UP(le32_to_cpu(fw_mon_cfg->req_size),
599 PAGE_SIZE);
600 num_frags = min_t(u32, num_frags, BUF_ALLOC_MAX_NUM_FRAGS);
601 num_frags = min_t(u32, num_frags, remain_pages);
602 frag_pages = DIV_ROUND_UP(remain_pages, num_frags);
603
604 fw_mon->frags = kcalloc(num_frags, sizeof(*fw_mon->frags), GFP_KERNEL);
605 if (!fw_mon->frags)
606 return -ENOMEM;
607
608 for (i = 0; i < num_frags; i++) {
609 int pages = min_t(u32, frag_pages, remain_pages);
610
611 IWL_DEBUG_FW(fwrt,
612 "WRT: Allocating DRAM buffer (alloc_id=%u, fragment=%u, size=0x%lx)\n",
613 alloc_id, i, pages * PAGE_SIZE);
614
615 pages = iwl_dbg_tlv_alloc_fragment(fwrt, &fw_mon->frags[i],
616 pages);
617 if (pages < 0) {
618 u32 alloc_size = le32_to_cpu(fw_mon_cfg->req_size) -
619 (remain_pages * PAGE_SIZE);
620
621 if (alloc_size < le32_to_cpu(fw_mon_cfg->min_size)) {
622 iwl_dbg_tlv_fragments_free(fwrt->trans,
623 alloc_id);
624 return pages;
625 }
626 break;
627 }
628
629 remain_pages -= pages;
630 fw_mon->num_frags++;
631 }
632
633 return 0;
634}
635
636static int iwl_dbg_tlv_apply_buffer(struct iwl_fw_runtime *fwrt,
637 enum iwl_fw_ini_allocation_id alloc_id)
638{
639 struct iwl_fw_mon *fw_mon;
640 u32 remain_frags, num_commands;
641 int i, fw_mon_idx = 0;
642
643 if (!fw_has_capa(&fwrt->fw->ucode_capa,
644 IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP))
645 return 0;
646
647 if (alloc_id < IWL_FW_INI_ALLOCATION_INVALID ||
648 alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
649 return -EIO;
650
651 if (le32_to_cpu(fwrt->trans->dbg.fw_mon_cfg[alloc_id].buf_location) !=
652 IWL_FW_INI_LOCATION_DRAM_PATH)
653 return 0;
654
655 fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
656
657 /* the first fragment of DBGC1 is given to the FW via register
658 * or context info
659 */
660 if (alloc_id == IWL_FW_INI_ALLOCATION_ID_DBGC1)
661 fw_mon_idx++;
662
663 remain_frags = fw_mon->num_frags - fw_mon_idx;
664 if (!remain_frags)
665 return 0;
666
667 num_commands = DIV_ROUND_UP(remain_frags, BUF_ALLOC_MAX_NUM_FRAGS);
668
669 IWL_DEBUG_FW(fwrt, "WRT: Applying DRAM destination (alloc_id=%u)\n",
670 alloc_id);
671
672 for (i = 0; i < num_commands; i++) {
673 u32 num_frags = min_t(u32, remain_frags,
674 BUF_ALLOC_MAX_NUM_FRAGS);
675 struct iwl_buf_alloc_cmd data = {
676 .alloc_id = cpu_to_le32(alloc_id),
677 .num_frags = cpu_to_le32(num_frags),
678 .buf_location =
679 cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH),
680 };
681 struct iwl_host_cmd hcmd = {
682 .id = WIDE_ID(DEBUG_GROUP, BUFFER_ALLOCATION),
683 .data[0] = &data,
684 .len[0] = sizeof(data),
685 .flags = CMD_SEND_IN_RFKILL,
686 };
687 int ret, j;
688
689 for (j = 0; j < num_frags; j++) {
690 struct iwl_buf_alloc_frag *frag = &data.frags[j];
691 struct iwl_dram_data *fw_mon_frag =
692 &fw_mon->frags[fw_mon_idx++];
693
694 frag->addr = cpu_to_le64(fw_mon_frag->physical);
695 frag->size = cpu_to_le32(fw_mon_frag->size);
696 }
697 ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
698 if (ret)
699 return ret;
700
701 remain_frags -= num_frags;
702 }
703
704 return 0;
705}
706
707static void iwl_dbg_tlv_apply_buffers(struct iwl_fw_runtime *fwrt)
708{
709 int ret, i;
710
711 if (fw_has_capa(&fwrt->fw->ucode_capa,
712 IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT))
713 return;
714
715 for (i = 0; i < IWL_FW_INI_ALLOCATION_NUM; i++) {
716 ret = iwl_dbg_tlv_apply_buffer(fwrt, i);
717 if (ret)
718 IWL_WARN(fwrt,
719 "WRT: Failed to apply DRAM buffer for allocation id %d, ret=%d\n",
720 i, ret);
721 }
722}
723
724static int iwl_dbg_tlv_update_dram(struct iwl_fw_runtime *fwrt,
725 enum iwl_fw_ini_allocation_id alloc_id,
726 struct iwl_dram_info *dram_info)
727{
728 struct iwl_fw_mon *fw_mon;
729 u32 remain_frags, num_frags;
730 int j, fw_mon_idx = 0;
731 struct iwl_buf_alloc_cmd *data;
732
733 if (le32_to_cpu(fwrt->trans->dbg.fw_mon_cfg[alloc_id].buf_location) !=
734 IWL_FW_INI_LOCATION_DRAM_PATH) {
735 IWL_DEBUG_FW(fwrt, "DRAM_PATH is not supported alloc_id %u\n", alloc_id);
736 return -1;
737 }
738
739 fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
740
741 /* the first fragment of DBGC1 is given to the FW via register
742 * or context info
743 */
744 if (alloc_id == IWL_FW_INI_ALLOCATION_ID_DBGC1)
745 fw_mon_idx++;
746
747 remain_frags = fw_mon->num_frags - fw_mon_idx;
748 if (!remain_frags)
749 return -1;
750
751 num_frags = min_t(u32, remain_frags, BUF_ALLOC_MAX_NUM_FRAGS);
752 data = &dram_info->dram_frags[alloc_id - 1];
753 data->alloc_id = cpu_to_le32(alloc_id);
754 data->num_frags = cpu_to_le32(num_frags);
755 data->buf_location = cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH);
756
757 IWL_DEBUG_FW(fwrt, "WRT: DRAM buffer details alloc_id=%u, num_frags=%u\n",
758 cpu_to_le32(alloc_id), cpu_to_le32(num_frags));
759
760 for (j = 0; j < num_frags; j++) {
761 struct iwl_buf_alloc_frag *frag = &data->frags[j];
762 struct iwl_dram_data *fw_mon_frag = &fw_mon->frags[fw_mon_idx++];
763
764 frag->addr = cpu_to_le64(fw_mon_frag->physical);
765 frag->size = cpu_to_le32(fw_mon_frag->size);
766 IWL_DEBUG_FW(fwrt, "WRT: DRAM fragment details\n");
767 IWL_DEBUG_FW(fwrt, "frag=%u, addr=0x%016llx, size=0x%x)\n",
768 j, cpu_to_le64(fw_mon_frag->physical),
769 cpu_to_le32(fw_mon_frag->size));
770 }
771 return 0;
772}
773
774static void iwl_dbg_tlv_update_drams(struct iwl_fw_runtime *fwrt)
775{
776 int ret, i;
777 bool dram_alloc = false;
778 struct iwl_dram_data *frags =
779 &fwrt->trans->dbg.fw_mon_ini[IWL_FW_INI_ALLOCATION_ID_DBGC1].frags[0];
780 struct iwl_dram_info *dram_info;
781
782 if (!frags || !frags->block)
783 return;
784
785 dram_info = frags->block;
786
787 if (!fw_has_capa(&fwrt->fw->ucode_capa,
788 IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT))
789 return;
790
791 dram_info->first_word = cpu_to_le32(DRAM_INFO_FIRST_MAGIC_WORD);
792 dram_info->second_word = cpu_to_le32(DRAM_INFO_SECOND_MAGIC_WORD);
793
794 for (i = IWL_FW_INI_ALLOCATION_ID_DBGC1;
795 i < IWL_FW_INI_ALLOCATION_NUM; i++) {
796 ret = iwl_dbg_tlv_update_dram(fwrt, i, dram_info);
797 if (!ret)
798 dram_alloc = true;
799 else
800 IWL_WARN(fwrt,
801 "WRT: Failed to set DRAM buffer for alloc id %d, ret=%d\n",
802 i, ret);
803 }
804
805 if (dram_alloc)
806 IWL_DEBUG_FW(fwrt, "block data after %08x\n",
807 dram_info->first_word);
808 else
809 memset(frags->block, 0, sizeof(*dram_info));
810}
811
812static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt,
813 struct list_head *hcmd_list)
814{
815 struct iwl_dbg_tlv_node *node;
816
817 list_for_each_entry(node, hcmd_list, list) {
818 struct iwl_fw_ini_hcmd_tlv *hcmd = (void *)node->tlv.data;
819 struct iwl_fw_ini_hcmd *hcmd_data = &hcmd->hcmd;
820 u16 hcmd_len = le32_to_cpu(node->tlv.length) - sizeof(*hcmd);
821 struct iwl_host_cmd cmd = {
822 .id = WIDE_ID(hcmd_data->group, hcmd_data->id),
823 .len = { hcmd_len, },
824 .data = { hcmd_data->data, },
825 };
826
827 iwl_trans_send_cmd(fwrt->trans, &cmd);
828 }
829}
830
831static void iwl_dbg_tlv_apply_config(struct iwl_fw_runtime *fwrt,
832 struct list_head *conf_list)
833{
834 struct iwl_dbg_tlv_node *node;
835
836 list_for_each_entry(node, conf_list, list) {
837 struct iwl_fw_ini_conf_set_tlv *config_list = (void *)node->tlv.data;
838 u32 count, address, value;
839 u32 len = (le32_to_cpu(node->tlv.length) - sizeof(*config_list)) / 8;
840 u32 type = le32_to_cpu(config_list->set_type);
841 u32 offset = le32_to_cpu(config_list->addr_offset);
842
843 switch (type) {
844 case IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_PERIPHERY_MAC: {
845 if (!iwl_trans_grab_nic_access(fwrt->trans)) {
846 IWL_DEBUG_FW(fwrt, "WRT: failed to get nic access\n");
847 IWL_DEBUG_FW(fwrt, "WRT: skipping MAC PERIPHERY config\n");
848 continue;
849 }
850 IWL_DEBUG_FW(fwrt, "WRT: MAC PERIPHERY config len: len %u\n", len);
851 for (count = 0; count < len; count++) {
852 address = le32_to_cpu(config_list->addr_val[count].address);
853 value = le32_to_cpu(config_list->addr_val[count].value);
854 iwl_trans_write_prph(fwrt->trans, address + offset, value);
855 }
856 iwl_trans_release_nic_access(fwrt->trans);
857 break;
858 }
859 case IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_MEMORY: {
860 for (count = 0; count < len; count++) {
861 address = le32_to_cpu(config_list->addr_val[count].address);
862 value = le32_to_cpu(config_list->addr_val[count].value);
863 iwl_trans_write_mem32(fwrt->trans, address + offset, value);
864 IWL_DEBUG_FW(fwrt, "WRT: DEV_MEM: count %u, add: %u val: %u\n",
865 count, address, value);
866 }
867 break;
868 }
869 case IWL_FW_INI_CONFIG_SET_TYPE_CSR: {
870 for (count = 0; count < len; count++) {
871 address = le32_to_cpu(config_list->addr_val[count].address);
872 value = le32_to_cpu(config_list->addr_val[count].value);
873 iwl_write32(fwrt->trans, address + offset, value);
874 IWL_DEBUG_FW(fwrt, "WRT: CSR: count %u, add: %u val: %u\n",
875 count, address, value);
876 }
877 break;
878 }
879 case IWL_FW_INI_CONFIG_SET_TYPE_DBGC_DRAM_ADDR: {
880 struct iwl_dbgc1_info dram_info = {};
881 struct iwl_dram_data *frags = &fwrt->trans->dbg.fw_mon_ini[1].frags[0];
882 __le64 dram_base_addr;
883 __le32 dram_size;
884 u64 dram_addr;
885 u32 ret;
886
887 if (!frags)
888 break;
889
890 dram_base_addr = cpu_to_le64(frags->physical);
891 dram_size = cpu_to_le32(frags->size);
892 dram_addr = le64_to_cpu(dram_base_addr);
893
894 IWL_DEBUG_FW(fwrt, "WRT: dram_base_addr 0x%016llx, dram_size 0x%x\n",
895 dram_base_addr, dram_size);
896 IWL_DEBUG_FW(fwrt, "WRT: config_list->addr_offset: %u\n",
897 le32_to_cpu(config_list->addr_offset));
898 for (count = 0; count < len; count++) {
899 address = le32_to_cpu(config_list->addr_val[count].address);
900 dram_info.dbgc1_add_lsb =
901 cpu_to_le32((dram_addr & 0x00000000FFFFFFFFULL) + 0x400);
902 dram_info.dbgc1_add_msb =
903 cpu_to_le32((dram_addr & 0xFFFFFFFF00000000ULL) >> 32);
904 dram_info.dbgc1_size = cpu_to_le32(le32_to_cpu(dram_size) - 0x400);
905 ret = iwl_trans_write_mem(fwrt->trans,
906 address + offset, &dram_info, 4);
907 if (ret) {
908 IWL_ERR(fwrt, "Failed to write dram_info to HW_SMEM\n");
909 break;
910 }
911 }
912 break;
913 }
914 case IWL_FW_INI_CONFIG_SET_TYPE_PERIPH_SCRATCH_HWM: {
915 u32 debug_token_config =
916 le32_to_cpu(config_list->addr_val[0].value);
917
918 IWL_DEBUG_FW(fwrt, "WRT: Setting HWM debug token config: %u\n",
919 debug_token_config);
920 fwrt->trans->dbg.ucode_preset = debug_token_config;
921 break;
922 }
923 default:
924 break;
925 }
926 }
927}
928
929static void iwl_dbg_tlv_periodic_trig_handler(struct timer_list *t)
930{
931 struct iwl_dbg_tlv_timer_node *timer_node =
932 from_timer(timer_node, t, timer);
933 struct iwl_fwrt_dump_data dump_data = {
934 .trig = (void *)timer_node->tlv->data,
935 };
936 int ret;
937
938 ret = iwl_fw_dbg_ini_collect(timer_node->fwrt, &dump_data, false);
939 if (!ret || ret == -EBUSY) {
940 u32 occur = le32_to_cpu(dump_data.trig->occurrences);
941 u32 collect_interval = le32_to_cpu(dump_data.trig->data[0]);
942
943 if (!occur)
944 return;
945
946 mod_timer(t, jiffies + msecs_to_jiffies(collect_interval));
947 }
948}
949
950static void iwl_dbg_tlv_set_periodic_trigs(struct iwl_fw_runtime *fwrt)
951{
952 struct iwl_dbg_tlv_node *node;
953 struct list_head *trig_list =
954 &fwrt->trans->dbg.time_point[IWL_FW_INI_TIME_POINT_PERIODIC].active_trig_list;
955
956 list_for_each_entry(node, trig_list, list) {
957 struct iwl_fw_ini_trigger_tlv *trig = (void *)node->tlv.data;
958 struct iwl_dbg_tlv_timer_node *timer_node;
959 u32 occur = le32_to_cpu(trig->occurrences), collect_interval;
960 u32 min_interval = 100;
961
962 if (!occur)
963 continue;
964
965 /* make sure there is at least one dword of data for the
966 * interval value
967 */
968 if (le32_to_cpu(node->tlv.length) <
969 sizeof(*trig) + sizeof(__le32)) {
970 IWL_ERR(fwrt,
971 "WRT: Invalid periodic trigger data was not given\n");
972 continue;
973 }
974
975 if (le32_to_cpu(trig->data[0]) < min_interval) {
976 IWL_WARN(fwrt,
977 "WRT: Override min interval from %u to %u msec\n",
978 le32_to_cpu(trig->data[0]), min_interval);
979 trig->data[0] = cpu_to_le32(min_interval);
980 }
981
982 collect_interval = le32_to_cpu(trig->data[0]);
983
984 timer_node = kzalloc(sizeof(*timer_node), GFP_KERNEL);
985 if (!timer_node) {
986 IWL_ERR(fwrt,
987 "WRT: Failed to allocate periodic trigger\n");
988 continue;
989 }
990
991 timer_node->fwrt = fwrt;
992 timer_node->tlv = &node->tlv;
993 timer_setup(&timer_node->timer,
994 iwl_dbg_tlv_periodic_trig_handler, 0);
995
996 list_add_tail(&timer_node->list,
997 &fwrt->trans->dbg.periodic_trig_list);
998
999 IWL_DEBUG_FW(fwrt, "WRT: Enabling periodic trigger\n");
1000
1001 mod_timer(&timer_node->timer,
1002 jiffies + msecs_to_jiffies(collect_interval));
1003 }
1004}
1005
1006static bool is_trig_data_contained(const struct iwl_ucode_tlv *new,
1007 const struct iwl_ucode_tlv *old)
1008{
1009 const struct iwl_fw_ini_trigger_tlv *new_trig = (const void *)new->data;
1010 const struct iwl_fw_ini_trigger_tlv *old_trig = (const void *)old->data;
1011 const __le32 *new_data = new_trig->data, *old_data = old_trig->data;
1012 u32 new_dwords_num = iwl_tlv_array_len(new, new_trig, data);
1013 u32 old_dwords_num = iwl_tlv_array_len(old, old_trig, data);
1014 int i, j;
1015
1016 for (i = 0; i < new_dwords_num; i++) {
1017 bool match = false;
1018
1019 for (j = 0; j < old_dwords_num; j++) {
1020 if (new_data[i] == old_data[j]) {
1021 match = true;
1022 break;
1023 }
1024 }
1025 if (!match)
1026 return false;
1027 }
1028
1029 return true;
1030}
1031
1032static int iwl_dbg_tlv_override_trig_node(struct iwl_fw_runtime *fwrt,
1033 struct iwl_ucode_tlv *trig_tlv,
1034 struct iwl_dbg_tlv_node *node)
1035{
1036 struct iwl_ucode_tlv *node_tlv = &node->tlv;
1037 struct iwl_fw_ini_trigger_tlv *node_trig = (void *)node_tlv->data;
1038 struct iwl_fw_ini_trigger_tlv *trig = (void *)trig_tlv->data;
1039 u32 policy = le32_to_cpu(trig->apply_policy);
1040 u32 size = le32_to_cpu(trig_tlv->length);
1041 u32 trig_data_len = size - sizeof(*trig);
1042 u32 offset = 0;
1043
1044 if (!(policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA)) {
1045 u32 data_len = le32_to_cpu(node_tlv->length) -
1046 sizeof(*node_trig);
1047
1048 IWL_DEBUG_FW(fwrt,
1049 "WRT: Appending trigger data (time point %u)\n",
1050 le32_to_cpu(trig->time_point));
1051
1052 offset += data_len;
1053 size += data_len;
1054 } else {
1055 IWL_DEBUG_FW(fwrt,
1056 "WRT: Overriding trigger data (time point %u)\n",
1057 le32_to_cpu(trig->time_point));
1058 }
1059
1060 if (size != le32_to_cpu(node_tlv->length)) {
1061 struct list_head *prev = node->list.prev;
1062 struct iwl_dbg_tlv_node *tmp;
1063
1064 list_del(&node->list);
1065
1066 tmp = krealloc(node, sizeof(*node) + size, GFP_KERNEL);
1067 if (!tmp) {
1068 IWL_WARN(fwrt,
1069 "WRT: No memory to override trigger (time point %u)\n",
1070 le32_to_cpu(trig->time_point));
1071
1072 list_add(&node->list, prev);
1073
1074 return -ENOMEM;
1075 }
1076
1077 list_add(&tmp->list, prev);
1078 node_tlv = &tmp->tlv;
1079 node_trig = (void *)node_tlv->data;
1080 }
1081
1082 memcpy(node_trig->data + offset, trig->data, trig_data_len);
1083 node_tlv->length = cpu_to_le32(size);
1084
1085 if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG) {
1086 IWL_DEBUG_FW(fwrt,
1087 "WRT: Overriding trigger configuration (time point %u)\n",
1088 le32_to_cpu(trig->time_point));
1089
1090 /* the first 11 dwords are configuration related */
1091 memcpy(node_trig, trig, sizeof(__le32) * 11);
1092 }
1093
1094 if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS) {
1095 IWL_DEBUG_FW(fwrt,
1096 "WRT: Overriding trigger regions (time point %u)\n",
1097 le32_to_cpu(trig->time_point));
1098
1099 node_trig->regions_mask = trig->regions_mask;
1100 } else {
1101 IWL_DEBUG_FW(fwrt,
1102 "WRT: Appending trigger regions (time point %u)\n",
1103 le32_to_cpu(trig->time_point));
1104
1105 node_trig->regions_mask |= trig->regions_mask;
1106 }
1107
1108 return 0;
1109}
1110
1111static int
1112iwl_dbg_tlv_add_active_trigger(struct iwl_fw_runtime *fwrt,
1113 struct list_head *trig_list,
1114 struct iwl_ucode_tlv *trig_tlv)
1115{
1116 struct iwl_fw_ini_trigger_tlv *trig = (void *)trig_tlv->data;
1117 struct iwl_dbg_tlv_node *node, *match = NULL;
1118 u32 policy = le32_to_cpu(trig->apply_policy);
1119
1120 list_for_each_entry(node, trig_list, list) {
1121 if (!(policy & IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT))
1122 break;
1123
1124 if (!(policy & IWL_FW_INI_APPLY_POLICY_MATCH_DATA) ||
1125 is_trig_data_contained(trig_tlv, &node->tlv)) {
1126 match = node;
1127 break;
1128 }
1129 }
1130
1131 if (!match) {
1132 IWL_DEBUG_FW(fwrt, "WRT: Enabling trigger (time point %u)\n",
1133 le32_to_cpu(trig->time_point));
1134 return iwl_dbg_tlv_add(trig_tlv, trig_list);
1135 }
1136
1137 return iwl_dbg_tlv_override_trig_node(fwrt, trig_tlv, match);
1138}
1139
1140static void
1141iwl_dbg_tlv_gen_active_trig_list(struct iwl_fw_runtime *fwrt,
1142 struct iwl_dbg_tlv_time_point_data *tp)
1143{
1144 struct iwl_dbg_tlv_node *node;
1145 struct list_head *trig_list = &tp->trig_list;
1146 struct list_head *active_trig_list = &tp->active_trig_list;
1147
1148 list_for_each_entry(node, trig_list, list) {
1149 struct iwl_ucode_tlv *tlv = &node->tlv;
1150
1151 iwl_dbg_tlv_add_active_trigger(fwrt, active_trig_list, tlv);
1152 }
1153}
1154
1155static bool iwl_dbg_tlv_check_fw_pkt(struct iwl_fw_runtime *fwrt,
1156 struct iwl_fwrt_dump_data *dump_data,
1157 union iwl_dbg_tlv_tp_data *tp_data,
1158 u32 trig_data)
1159{
1160 struct iwl_rx_packet *pkt = tp_data->fw_pkt;
1161 struct iwl_cmd_header *wanted_hdr = (void *)&trig_data;
1162
1163 if (pkt && (pkt->hdr.cmd == wanted_hdr->cmd &&
1164 pkt->hdr.group_id == wanted_hdr->group_id)) {
1165 struct iwl_rx_packet *fw_pkt =
1166 kmemdup(pkt,
1167 sizeof(*pkt) + iwl_rx_packet_payload_len(pkt),
1168 GFP_ATOMIC);
1169
1170 if (!fw_pkt)
1171 return false;
1172
1173 dump_data->fw_pkt = fw_pkt;
1174
1175 return true;
1176 }
1177
1178 return false;
1179}
1180
1181static int
1182iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync,
1183 struct list_head *active_trig_list,
1184 union iwl_dbg_tlv_tp_data *tp_data,
1185 bool (*data_check)(struct iwl_fw_runtime *fwrt,
1186 struct iwl_fwrt_dump_data *dump_data,
1187 union iwl_dbg_tlv_tp_data *tp_data,
1188 u32 trig_data))
1189{
1190 struct iwl_dbg_tlv_node *node;
1191
1192 list_for_each_entry(node, active_trig_list, list) {
1193 struct iwl_fwrt_dump_data dump_data = {
1194 .trig = (void *)node->tlv.data,
1195 };
1196 u32 num_data = iwl_tlv_array_len(&node->tlv, dump_data.trig,
1197 data);
1198 int ret, i;
1199 u32 tp = le32_to_cpu(dump_data.trig->time_point);
1200
1201
1202 if (!num_data) {
1203 ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data, sync);
1204 if (ret)
1205 return ret;
1206 }
1207
1208 for (i = 0; i < num_data; i++) {
1209 if (!data_check ||
1210 data_check(fwrt, &dump_data, tp_data,
1211 le32_to_cpu(dump_data.trig->data[i]))) {
1212 ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data, sync);
1213 if (ret)
1214 return ret;
1215
1216 break;
1217 }
1218 }
1219
1220 fwrt->trans->dbg.restart_required = FALSE;
1221 IWL_DEBUG_INFO(fwrt, "WRT: tp %d, reset_fw %d\n",
1222 tp, dump_data.trig->reset_fw);
1223 IWL_DEBUG_INFO(fwrt, "WRT: restart_required %d, last_tp_resetfw %d\n",
1224 fwrt->trans->dbg.restart_required,
1225 fwrt->trans->dbg.last_tp_resetfw);
1226
1227 if (fwrt->trans->trans_cfg->device_family ==
1228 IWL_DEVICE_FAMILY_9000) {
1229 fwrt->trans->dbg.restart_required = TRUE;
1230 } else if (tp == IWL_FW_INI_TIME_POINT_FW_ASSERT &&
1231 fwrt->trans->dbg.last_tp_resetfw ==
1232 IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) {
1233 fwrt->trans->dbg.restart_required = FALSE;
1234 fwrt->trans->dbg.last_tp_resetfw = 0xFF;
1235 IWL_DEBUG_FW(fwrt, "WRT: FW_ASSERT due to reset_fw_mode-no restart\n");
1236 } else if (le32_to_cpu(dump_data.trig->reset_fw) ==
1237 IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW) {
1238 IWL_DEBUG_INFO(fwrt, "WRT: stop and reload firmware\n");
1239 fwrt->trans->dbg.restart_required = TRUE;
1240 } else if (le32_to_cpu(dump_data.trig->reset_fw) ==
1241 IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) {
1242 IWL_DEBUG_INFO(fwrt, "WRT: stop only and no reload firmware\n");
1243 fwrt->trans->dbg.restart_required = FALSE;
1244 fwrt->trans->dbg.last_tp_resetfw =
1245 le32_to_cpu(dump_data.trig->reset_fw);
1246 } else if (le32_to_cpu(dump_data.trig->reset_fw) ==
1247 IWL_FW_INI_RESET_FW_MODE_NOTHING) {
1248 IWL_DEBUG_INFO(fwrt,
1249 "WRT: nothing need to be done after debug collection\n");
1250 } else {
1251 IWL_ERR(fwrt, "WRT: wrong resetfw %d\n",
1252 le32_to_cpu(dump_data.trig->reset_fw));
1253 }
1254 }
1255 return 0;
1256}
1257
1258static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt)
1259{
1260 enum iwl_fw_ini_buffer_location *ini_dest = &fwrt->trans->dbg.ini_dest;
1261 int ret, i;
1262 u32 failed_alloc = 0;
1263
1264 if (*ini_dest != IWL_FW_INI_LOCATION_INVALID)
1265 return;
1266
1267 IWL_DEBUG_FW(fwrt,
1268 "WRT: Generating active triggers list, domain 0x%x\n",
1269 fwrt->trans->dbg.domains_bitmap);
1270
1271 for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.time_point); i++) {
1272 struct iwl_dbg_tlv_time_point_data *tp =
1273 &fwrt->trans->dbg.time_point[i];
1274
1275 iwl_dbg_tlv_gen_active_trig_list(fwrt, tp);
1276 }
1277
1278 *ini_dest = IWL_FW_INI_LOCATION_INVALID;
1279 for (i = 0; i < IWL_FW_INI_ALLOCATION_NUM; i++) {
1280 struct iwl_fw_ini_allocation_tlv *fw_mon_cfg =
1281 &fwrt->trans->dbg.fw_mon_cfg[i];
1282 u32 dest = le32_to_cpu(fw_mon_cfg->buf_location);
1283
1284 if (dest == IWL_FW_INI_LOCATION_INVALID) {
1285 failed_alloc |= BIT(i);
1286 continue;
1287 }
1288
1289 if (*ini_dest == IWL_FW_INI_LOCATION_INVALID)
1290 *ini_dest = dest;
1291
1292 if (dest != *ini_dest)
1293 continue;
1294
1295 ret = iwl_dbg_tlv_alloc_fragments(fwrt, i);
1296
1297 if (ret) {
1298 IWL_WARN(fwrt,
1299 "WRT: Failed to allocate DRAM buffer for allocation id %d, ret=%d\n",
1300 i, ret);
1301 failed_alloc |= BIT(i);
1302 }
1303 }
1304
1305 if (!failed_alloc)
1306 return;
1307
1308 for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.active_regions) && failed_alloc; i++) {
1309 struct iwl_fw_ini_region_tlv *reg;
1310 struct iwl_ucode_tlv **active_reg =
1311 &fwrt->trans->dbg.active_regions[i];
1312 u32 reg_type;
1313
1314 if (!*active_reg) {
1315 fwrt->trans->dbg.unsupported_region_msk |= BIT(i);
1316 continue;
1317 }
1318
1319 reg = (void *)(*active_reg)->data;
1320 reg_type = reg->type;
1321
1322 if (reg_type != IWL_FW_INI_REGION_DRAM_BUFFER ||
1323 !(BIT(le32_to_cpu(reg->dram_alloc_id)) & failed_alloc))
1324 continue;
1325
1326 IWL_DEBUG_FW(fwrt,
1327 "WRT: removing allocation id %d from region id %d\n",
1328 le32_to_cpu(reg->dram_alloc_id), i);
1329
1330 failed_alloc &= ~BIT(le32_to_cpu(reg->dram_alloc_id));
1331 fwrt->trans->dbg.unsupported_region_msk |= BIT(i);
1332
1333 kfree(*active_reg);
1334 *active_reg = NULL;
1335 }
1336}
1337
1338void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
1339 enum iwl_fw_ini_time_point tp_id,
1340 union iwl_dbg_tlv_tp_data *tp_data,
1341 bool sync)
1342{
1343 struct list_head *hcmd_list, *trig_list, *conf_list;
1344
1345 if (!iwl_trans_dbg_ini_valid(fwrt->trans) ||
1346 tp_id == IWL_FW_INI_TIME_POINT_INVALID ||
1347 tp_id >= IWL_FW_INI_TIME_POINT_NUM)
1348 return;
1349
1350 hcmd_list = &fwrt->trans->dbg.time_point[tp_id].hcmd_list;
1351 trig_list = &fwrt->trans->dbg.time_point[tp_id].active_trig_list;
1352 conf_list = &fwrt->trans->dbg.time_point[tp_id].config_list;
1353
1354 switch (tp_id) {
1355 case IWL_FW_INI_TIME_POINT_EARLY:
1356 iwl_dbg_tlv_init_cfg(fwrt);
1357 iwl_dbg_tlv_apply_config(fwrt, conf_list);
1358 iwl_dbg_tlv_update_drams(fwrt);
1359 iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL);
1360 break;
1361 case IWL_FW_INI_TIME_POINT_AFTER_ALIVE:
1362 iwl_dbg_tlv_apply_buffers(fwrt);
1363 iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
1364 iwl_dbg_tlv_apply_config(fwrt, conf_list);
1365 iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL);
1366 break;
1367 case IWL_FW_INI_TIME_POINT_PERIODIC:
1368 iwl_dbg_tlv_set_periodic_trigs(fwrt);
1369 iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
1370 break;
1371 case IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF:
1372 case IWL_FW_INI_TIME_POINT_MISSED_BEACONS:
1373 case IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION:
1374 iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
1375 iwl_dbg_tlv_apply_config(fwrt, conf_list);
1376 iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data,
1377 iwl_dbg_tlv_check_fw_pkt);
1378 break;
1379 default:
1380 iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
1381 iwl_dbg_tlv_apply_config(fwrt, conf_list);
1382 iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL);
1383 break;
1384 }
1385}
1386IWL_EXPORT_SYMBOL(_iwl_dbg_tlv_time_point);