Loading...
Note: File does not exist in v5.4.
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2020, Intel Corporation. */
3
4#include <linux/vmalloc.h>
5
6#include "ice.h"
7#include "ice_lib.h"
8#include "devlink.h"
9#include "devlink_port.h"
10#include "ice_eswitch.h"
11#include "ice_fw_update.h"
12#include "ice_dcb_lib.h"
13#include "ice_sf_eth.h"
14
15/* context for devlink info version reporting */
16struct ice_info_ctx {
17 char buf[128];
18 struct ice_orom_info pending_orom;
19 struct ice_nvm_info pending_nvm;
20 struct ice_netlist_info pending_netlist;
21 struct ice_hw_dev_caps dev_caps;
22};
23
24/* The following functions are used to format specific strings for various
25 * devlink info versions. The ctx parameter is used to provide the storage
26 * buffer, as well as any ancillary information calculated when the info
27 * request was made.
28 *
29 * If a version does not exist, for example when attempting to get the
30 * inactive version of flash when there is no pending update, the function
31 * should leave the buffer in the ctx structure empty.
32 */
33
34static void ice_info_get_dsn(struct ice_pf *pf, struct ice_info_ctx *ctx)
35{
36 u8 dsn[8];
37
38 /* Copy the DSN into an array in Big Endian format */
39 put_unaligned_be64(pci_get_dsn(pf->pdev), dsn);
40
41 snprintf(ctx->buf, sizeof(ctx->buf), "%8phD", dsn);
42}
43
44static void ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx)
45{
46 struct ice_hw *hw = &pf->hw;
47 int status;
48
49 status = ice_read_pba_string(hw, (u8 *)ctx->buf, sizeof(ctx->buf));
50 if (status)
51 /* We failed to locate the PBA, so just skip this entry */
52 dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %d\n",
53 status);
54}
55
56static void ice_info_fw_mgmt(struct ice_pf *pf, struct ice_info_ctx *ctx)
57{
58 struct ice_hw *hw = &pf->hw;
59
60 snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u",
61 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch);
62}
63
64static void ice_info_fw_api(struct ice_pf *pf, struct ice_info_ctx *ctx)
65{
66 struct ice_hw *hw = &pf->hw;
67
68 snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", hw->api_maj_ver,
69 hw->api_min_ver, hw->api_patch);
70}
71
72static void ice_info_fw_build(struct ice_pf *pf, struct ice_info_ctx *ctx)
73{
74 struct ice_hw *hw = &pf->hw;
75
76 snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", hw->fw_build);
77}
78
79static void ice_info_orom_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
80{
81 struct ice_orom_info *orom = &pf->hw.flash.orom;
82
83 snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u",
84 orom->major, orom->build, orom->patch);
85}
86
87static void
88ice_info_pending_orom_ver(struct ice_pf __always_unused *pf,
89 struct ice_info_ctx *ctx)
90{
91 struct ice_orom_info *orom = &ctx->pending_orom;
92
93 if (ctx->dev_caps.common_cap.nvm_update_pending_orom)
94 snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u",
95 orom->major, orom->build, orom->patch);
96}
97
98static void ice_info_nvm_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
99{
100 struct ice_nvm_info *nvm = &pf->hw.flash.nvm;
101
102 snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", nvm->major, nvm->minor);
103}
104
105static void
106ice_info_pending_nvm_ver(struct ice_pf __always_unused *pf,
107 struct ice_info_ctx *ctx)
108{
109 struct ice_nvm_info *nvm = &ctx->pending_nvm;
110
111 if (ctx->dev_caps.common_cap.nvm_update_pending_nvm)
112 snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x",
113 nvm->major, nvm->minor);
114}
115
116static void ice_info_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx)
117{
118 struct ice_nvm_info *nvm = &pf->hw.flash.nvm;
119
120 snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm->eetrack);
121}
122
123static void
124ice_info_pending_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx)
125{
126 struct ice_nvm_info *nvm = &ctx->pending_nvm;
127
128 if (ctx->dev_caps.common_cap.nvm_update_pending_nvm)
129 snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm->eetrack);
130}
131
132static void ice_info_ddp_pkg_name(struct ice_pf *pf, struct ice_info_ctx *ctx)
133{
134 struct ice_hw *hw = &pf->hw;
135
136 snprintf(ctx->buf, sizeof(ctx->buf), "%s", hw->active_pkg_name);
137}
138
139static void
140ice_info_ddp_pkg_version(struct ice_pf *pf, struct ice_info_ctx *ctx)
141{
142 struct ice_pkg_ver *pkg = &pf->hw.active_pkg_ver;
143
144 snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u.%u",
145 pkg->major, pkg->minor, pkg->update, pkg->draft);
146}
147
148static void
149ice_info_ddp_pkg_bundle_id(struct ice_pf *pf, struct ice_info_ctx *ctx)
150{
151 snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", pf->hw.active_track_id);
152}
153
154static void ice_info_netlist_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
155{
156 struct ice_netlist_info *netlist = &pf->hw.flash.netlist;
157
158 /* The netlist version fields are BCD formatted */
159 snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x",
160 netlist->major, netlist->minor,
161 netlist->type >> 16, netlist->type & 0xFFFF,
162 netlist->rev, netlist->cust_ver);
163}
164
165static void ice_info_netlist_build(struct ice_pf *pf, struct ice_info_ctx *ctx)
166{
167 struct ice_netlist_info *netlist = &pf->hw.flash.netlist;
168
169 snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash);
170}
171
172static void
173ice_info_pending_netlist_ver(struct ice_pf __always_unused *pf,
174 struct ice_info_ctx *ctx)
175{
176 struct ice_netlist_info *netlist = &ctx->pending_netlist;
177
178 /* The netlist version fields are BCD formatted */
179 if (ctx->dev_caps.common_cap.nvm_update_pending_netlist)
180 snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x",
181 netlist->major, netlist->minor,
182 netlist->type >> 16, netlist->type & 0xFFFF,
183 netlist->rev, netlist->cust_ver);
184}
185
186static void
187ice_info_pending_netlist_build(struct ice_pf __always_unused *pf,
188 struct ice_info_ctx *ctx)
189{
190 struct ice_netlist_info *netlist = &ctx->pending_netlist;
191
192 if (ctx->dev_caps.common_cap.nvm_update_pending_netlist)
193 snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash);
194}
195
196static void ice_info_cgu_fw_build(struct ice_pf *pf, struct ice_info_ctx *ctx)
197{
198 u32 id, cfg_ver, fw_ver;
199
200 if (!ice_is_feature_supported(pf, ICE_F_CGU))
201 return;
202 if (ice_aq_get_cgu_info(&pf->hw, &id, &cfg_ver, &fw_ver))
203 return;
204 snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", id, cfg_ver, fw_ver);
205}
206
207static void ice_info_cgu_id(struct ice_pf *pf, struct ice_info_ctx *ctx)
208{
209 if (!ice_is_feature_supported(pf, ICE_F_CGU))
210 return;
211 snprintf(ctx->buf, sizeof(ctx->buf), "%u", pf->hw.cgu_part_number);
212}
213
214#define fixed(key, getter) { ICE_VERSION_FIXED, key, getter, NULL }
215#define running(key, getter) { ICE_VERSION_RUNNING, key, getter, NULL }
216#define stored(key, getter, fallback) { ICE_VERSION_STORED, key, getter, fallback }
217
218/* The combined() macro inserts both the running entry as well as a stored
219 * entry. The running entry will always report the version from the active
220 * handler. The stored entry will first try the pending handler, and fallback
221 * to the active handler if the pending function does not report a version.
222 * The pending handler should check the status of a pending update for the
223 * relevant flash component. It should only fill in the buffer in the case
224 * where a valid pending version is available. This ensures that the related
225 * stored and running versions remain in sync, and that stored versions are
226 * correctly reported as expected.
227 */
228#define combined(key, active, pending) \
229 running(key, active), \
230 stored(key, pending, active)
231
232enum ice_version_type {
233 ICE_VERSION_FIXED,
234 ICE_VERSION_RUNNING,
235 ICE_VERSION_STORED,
236};
237
238static const struct ice_devlink_version {
239 enum ice_version_type type;
240 const char *key;
241 void (*getter)(struct ice_pf *pf, struct ice_info_ctx *ctx);
242 void (*fallback)(struct ice_pf *pf, struct ice_info_ctx *ctx);
243} ice_devlink_versions[] = {
244 fixed(DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, ice_info_pba),
245 running(DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, ice_info_fw_mgmt),
246 running("fw.mgmt.api", ice_info_fw_api),
247 running("fw.mgmt.build", ice_info_fw_build),
248 combined(DEVLINK_INFO_VERSION_GENERIC_FW_UNDI, ice_info_orom_ver, ice_info_pending_orom_ver),
249 combined("fw.psid.api", ice_info_nvm_ver, ice_info_pending_nvm_ver),
250 combined(DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, ice_info_eetrack, ice_info_pending_eetrack),
251 running("fw.app.name", ice_info_ddp_pkg_name),
252 running(DEVLINK_INFO_VERSION_GENERIC_FW_APP, ice_info_ddp_pkg_version),
253 running("fw.app.bundle_id", ice_info_ddp_pkg_bundle_id),
254 combined("fw.netlist", ice_info_netlist_ver, ice_info_pending_netlist_ver),
255 combined("fw.netlist.build", ice_info_netlist_build, ice_info_pending_netlist_build),
256 fixed("cgu.id", ice_info_cgu_id),
257 running("fw.cgu", ice_info_cgu_fw_build),
258};
259
260/**
261 * ice_devlink_info_get - .info_get devlink handler
262 * @devlink: devlink instance structure
263 * @req: the devlink info request
264 * @extack: extended netdev ack structure
265 *
266 * Callback for the devlink .info_get operation. Reports information about the
267 * device.
268 *
269 * Return: zero on success or an error code on failure.
270 */
271static int ice_devlink_info_get(struct devlink *devlink,
272 struct devlink_info_req *req,
273 struct netlink_ext_ack *extack)
274{
275 struct ice_pf *pf = devlink_priv(devlink);
276 struct device *dev = ice_pf_to_dev(pf);
277 struct ice_hw *hw = &pf->hw;
278 struct ice_info_ctx *ctx;
279 size_t i;
280 int err;
281
282 err = ice_wait_for_reset(pf, 10 * HZ);
283 if (err) {
284 NL_SET_ERR_MSG_MOD(extack, "Device is busy resetting");
285 return err;
286 }
287
288 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
289 if (!ctx)
290 return -ENOMEM;
291
292 /* discover capabilities first */
293 err = ice_discover_dev_caps(hw, &ctx->dev_caps);
294 if (err) {
295 dev_dbg(dev, "Failed to discover device capabilities, status %d aq_err %s\n",
296 err, ice_aq_str(hw->adminq.sq_last_status));
297 NL_SET_ERR_MSG_MOD(extack, "Unable to discover device capabilities");
298 goto out_free_ctx;
299 }
300
301 if (ctx->dev_caps.common_cap.nvm_update_pending_orom) {
302 err = ice_get_inactive_orom_ver(hw, &ctx->pending_orom);
303 if (err) {
304 dev_dbg(dev, "Unable to read inactive Option ROM version data, status %d aq_err %s\n",
305 err, ice_aq_str(hw->adminq.sq_last_status));
306
307 /* disable display of pending Option ROM */
308 ctx->dev_caps.common_cap.nvm_update_pending_orom = false;
309 }
310 }
311
312 if (ctx->dev_caps.common_cap.nvm_update_pending_nvm) {
313 err = ice_get_inactive_nvm_ver(hw, &ctx->pending_nvm);
314 if (err) {
315 dev_dbg(dev, "Unable to read inactive NVM version data, status %d aq_err %s\n",
316 err, ice_aq_str(hw->adminq.sq_last_status));
317
318 /* disable display of pending Option ROM */
319 ctx->dev_caps.common_cap.nvm_update_pending_nvm = false;
320 }
321 }
322
323 if (ctx->dev_caps.common_cap.nvm_update_pending_netlist) {
324 err = ice_get_inactive_netlist_ver(hw, &ctx->pending_netlist);
325 if (err) {
326 dev_dbg(dev, "Unable to read inactive Netlist version data, status %d aq_err %s\n",
327 err, ice_aq_str(hw->adminq.sq_last_status));
328
329 /* disable display of pending Option ROM */
330 ctx->dev_caps.common_cap.nvm_update_pending_netlist = false;
331 }
332 }
333
334 ice_info_get_dsn(pf, ctx);
335
336 err = devlink_info_serial_number_put(req, ctx->buf);
337 if (err) {
338 NL_SET_ERR_MSG_MOD(extack, "Unable to set serial number");
339 goto out_free_ctx;
340 }
341
342 for (i = 0; i < ARRAY_SIZE(ice_devlink_versions); i++) {
343 enum ice_version_type type = ice_devlink_versions[i].type;
344 const char *key = ice_devlink_versions[i].key;
345
346 memset(ctx->buf, 0, sizeof(ctx->buf));
347
348 ice_devlink_versions[i].getter(pf, ctx);
349
350 /* If the default getter doesn't report a version, use the
351 * fallback function. This is primarily useful in the case of
352 * "stored" versions that want to report the same value as the
353 * running version in the normal case of no pending update.
354 */
355 if (ctx->buf[0] == '\0' && ice_devlink_versions[i].fallback)
356 ice_devlink_versions[i].fallback(pf, ctx);
357
358 /* Do not report missing versions */
359 if (ctx->buf[0] == '\0')
360 continue;
361
362 switch (type) {
363 case ICE_VERSION_FIXED:
364 err = devlink_info_version_fixed_put(req, key, ctx->buf);
365 if (err) {
366 NL_SET_ERR_MSG_MOD(extack, "Unable to set fixed version");
367 goto out_free_ctx;
368 }
369 break;
370 case ICE_VERSION_RUNNING:
371 err = devlink_info_version_running_put(req, key, ctx->buf);
372 if (err) {
373 NL_SET_ERR_MSG_MOD(extack, "Unable to set running version");
374 goto out_free_ctx;
375 }
376 break;
377 case ICE_VERSION_STORED:
378 err = devlink_info_version_stored_put(req, key, ctx->buf);
379 if (err) {
380 NL_SET_ERR_MSG_MOD(extack, "Unable to set stored version");
381 goto out_free_ctx;
382 }
383 break;
384 }
385 }
386
387out_free_ctx:
388 kfree(ctx);
389 return err;
390}
391
392/**
393 * ice_devlink_reload_empr_start - Start EMP reset to activate new firmware
394 * @pf: pointer to the pf instance
395 * @extack: netlink extended ACK structure
396 *
397 * Allow user to activate new Embedded Management Processor firmware by
398 * issuing device specific EMP reset. Called in response to
399 * a DEVLINK_CMD_RELOAD with the DEVLINK_RELOAD_ACTION_FW_ACTIVATE.
400 *
401 * Note that teardown and rebuild of the driver state happens automatically as
402 * part of an interrupt and watchdog task. This is because all physical
403 * functions on the device must be able to reset when an EMP reset occurs from
404 * any source.
405 */
406static int
407ice_devlink_reload_empr_start(struct ice_pf *pf,
408 struct netlink_ext_ack *extack)
409{
410 struct device *dev = ice_pf_to_dev(pf);
411 struct ice_hw *hw = &pf->hw;
412 u8 pending;
413 int err;
414
415 err = ice_get_pending_updates(pf, &pending, extack);
416 if (err)
417 return err;
418
419 /* pending is a bitmask of which flash banks have a pending update,
420 * including the main NVM bank, the Option ROM bank, and the netlist
421 * bank. If any of these bits are set, then there is a pending update
422 * waiting to be activated.
423 */
424 if (!pending) {
425 NL_SET_ERR_MSG_MOD(extack, "No pending firmware update");
426 return -ECANCELED;
427 }
428
429 if (pf->fw_emp_reset_disabled) {
430 NL_SET_ERR_MSG_MOD(extack, "EMP reset is not available. To activate firmware, a reboot or power cycle is needed");
431 return -ECANCELED;
432 }
433
434 dev_dbg(dev, "Issuing device EMP reset to activate firmware\n");
435
436 err = ice_aq_nvm_update_empr(hw);
437 if (err) {
438 dev_err(dev, "Failed to trigger EMP device reset to reload firmware, err %d aq_err %s\n",
439 err, ice_aq_str(hw->adminq.sq_last_status));
440 NL_SET_ERR_MSG_MOD(extack, "Failed to trigger EMP device reset to reload firmware");
441 return err;
442 }
443
444 return 0;
445}
446
447/**
448 * ice_devlink_reinit_down - unload given PF
449 * @pf: pointer to the PF struct
450 */
451static void ice_devlink_reinit_down(struct ice_pf *pf)
452{
453 /* No need to take devl_lock, it's already taken by devlink API */
454 ice_unload(pf);
455 rtnl_lock();
456 ice_vsi_decfg(ice_get_main_vsi(pf));
457 rtnl_unlock();
458 ice_deinit_dev(pf);
459}
460
461/**
462 * ice_devlink_reload_down - prepare for reload
463 * @devlink: pointer to the devlink instance to reload
464 * @netns_change: if true, the network namespace is changing
465 * @action: the action to perform
466 * @limit: limits on what reload should do, such as not resetting
467 * @extack: netlink extended ACK structure
468 */
469static int
470ice_devlink_reload_down(struct devlink *devlink, bool netns_change,
471 enum devlink_reload_action action,
472 enum devlink_reload_limit limit,
473 struct netlink_ext_ack *extack)
474{
475 struct ice_pf *pf = devlink_priv(devlink);
476
477 switch (action) {
478 case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
479 if (ice_is_eswitch_mode_switchdev(pf)) {
480 NL_SET_ERR_MSG_MOD(extack,
481 "Go to legacy mode before doing reinit");
482 return -EOPNOTSUPP;
483 }
484 if (ice_is_adq_active(pf)) {
485 NL_SET_ERR_MSG_MOD(extack,
486 "Turn off ADQ before doing reinit");
487 return -EOPNOTSUPP;
488 }
489 if (ice_has_vfs(pf)) {
490 NL_SET_ERR_MSG_MOD(extack,
491 "Remove all VFs before doing reinit");
492 return -EOPNOTSUPP;
493 }
494 ice_devlink_reinit_down(pf);
495 return 0;
496 case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
497 return ice_devlink_reload_empr_start(pf, extack);
498 default:
499 WARN_ON(1);
500 return -EOPNOTSUPP;
501 }
502}
503
504/**
505 * ice_devlink_reload_empr_finish - Wait for EMP reset to finish
506 * @pf: pointer to the pf instance
507 * @extack: netlink extended ACK structure
508 *
509 * Wait for driver to finish rebuilding after EMP reset is completed. This
510 * includes time to wait for both the actual device reset as well as the time
511 * for the driver's rebuild to complete.
512 */
513static int
514ice_devlink_reload_empr_finish(struct ice_pf *pf,
515 struct netlink_ext_ack *extack)
516{
517 int err;
518
519 err = ice_wait_for_reset(pf, 60 * HZ);
520 if (err) {
521 NL_SET_ERR_MSG_MOD(extack, "Device still resetting after 1 minute");
522 return err;
523 }
524
525 return 0;
526}
527
528/**
529 * ice_get_tx_topo_user_sel - Read user's choice from flash
530 * @pf: pointer to pf structure
531 * @layers: value read from flash will be saved here
532 *
533 * Reads user's preference for Tx Scheduler Topology Tree from PFA TLV.
534 *
535 * Return: zero when read was successful, negative values otherwise.
536 */
537static int ice_get_tx_topo_user_sel(struct ice_pf *pf, uint8_t *layers)
538{
539 struct ice_aqc_nvm_tx_topo_user_sel usr_sel = {};
540 struct ice_hw *hw = &pf->hw;
541 int err;
542
543 err = ice_acquire_nvm(hw, ICE_RES_READ);
544 if (err)
545 return err;
546
547 err = ice_aq_read_nvm(hw, ICE_AQC_NVM_TX_TOPO_MOD_ID, 0,
548 sizeof(usr_sel), &usr_sel, true, true, NULL);
549 if (err)
550 goto exit_release_res;
551
552 if (usr_sel.data & ICE_AQC_NVM_TX_TOPO_USER_SEL)
553 *layers = ICE_SCHED_5_LAYERS;
554 else
555 *layers = ICE_SCHED_9_LAYERS;
556
557exit_release_res:
558 ice_release_nvm(hw);
559
560 return err;
561}
562
563/**
564 * ice_update_tx_topo_user_sel - Save user's preference in flash
565 * @pf: pointer to pf structure
566 * @layers: value to be saved in flash
567 *
568 * Variable "layers" defines user's preference about number of layers in Tx
569 * Scheduler Topology Tree. This choice should be stored in PFA TLV field
570 * and be picked up by driver, next time during init.
571 *
572 * Return: zero when save was successful, negative values otherwise.
573 */
574static int ice_update_tx_topo_user_sel(struct ice_pf *pf, int layers)
575{
576 struct ice_aqc_nvm_tx_topo_user_sel usr_sel = {};
577 struct ice_hw *hw = &pf->hw;
578 int err;
579
580 err = ice_acquire_nvm(hw, ICE_RES_WRITE);
581 if (err)
582 return err;
583
584 err = ice_aq_read_nvm(hw, ICE_AQC_NVM_TX_TOPO_MOD_ID, 0,
585 sizeof(usr_sel), &usr_sel, true, true, NULL);
586 if (err)
587 goto exit_release_res;
588
589 if (layers == ICE_SCHED_5_LAYERS)
590 usr_sel.data |= ICE_AQC_NVM_TX_TOPO_USER_SEL;
591 else
592 usr_sel.data &= ~ICE_AQC_NVM_TX_TOPO_USER_SEL;
593
594 err = ice_write_one_nvm_block(pf, ICE_AQC_NVM_TX_TOPO_MOD_ID, 2,
595 sizeof(usr_sel.data), &usr_sel.data,
596 true, NULL, NULL);
597exit_release_res:
598 ice_release_nvm(hw);
599
600 return err;
601}
602
603/**
604 * ice_devlink_tx_sched_layers_get - Get tx_scheduling_layers parameter
605 * @devlink: pointer to the devlink instance
606 * @id: the parameter ID to set
607 * @ctx: context to store the parameter value
608 *
609 * Return: zero on success and negative value on failure.
610 */
611static int ice_devlink_tx_sched_layers_get(struct devlink *devlink, u32 id,
612 struct devlink_param_gset_ctx *ctx)
613{
614 struct ice_pf *pf = devlink_priv(devlink);
615 int err;
616
617 err = ice_get_tx_topo_user_sel(pf, &ctx->val.vu8);
618 if (err)
619 return err;
620
621 return 0;
622}
623
624/**
625 * ice_devlink_tx_sched_layers_set - Set tx_scheduling_layers parameter
626 * @devlink: pointer to the devlink instance
627 * @id: the parameter ID to set
628 * @ctx: context to get the parameter value
629 * @extack: netlink extended ACK structure
630 *
631 * Return: zero on success and negative value on failure.
632 */
633static int ice_devlink_tx_sched_layers_set(struct devlink *devlink, u32 id,
634 struct devlink_param_gset_ctx *ctx,
635 struct netlink_ext_ack *extack)
636{
637 struct ice_pf *pf = devlink_priv(devlink);
638 int err;
639
640 err = ice_update_tx_topo_user_sel(pf, ctx->val.vu8);
641 if (err)
642 return err;
643
644 NL_SET_ERR_MSG_MOD(extack,
645 "Tx scheduling layers have been changed on this device. You must do the PCI slot powercycle for the change to take effect.");
646
647 return 0;
648}
649
650/**
651 * ice_devlink_tx_sched_layers_validate - Validate passed tx_scheduling_layers
652 * parameter value
653 * @devlink: unused pointer to devlink instance
654 * @id: the parameter ID to validate
655 * @val: value to validate
656 * @extack: netlink extended ACK structure
657 *
658 * Supported values are:
659 * - 5 - five layers Tx Scheduler Topology Tree
660 * - 9 - nine layers Tx Scheduler Topology Tree
661 *
662 * Return: zero when passed parameter value is supported. Negative value on
663 * error.
664 */
665static int ice_devlink_tx_sched_layers_validate(struct devlink *devlink, u32 id,
666 union devlink_param_value val,
667 struct netlink_ext_ack *extack)
668{
669 if (val.vu8 != ICE_SCHED_5_LAYERS && val.vu8 != ICE_SCHED_9_LAYERS) {
670 NL_SET_ERR_MSG_MOD(extack,
671 "Wrong number of tx scheduler layers provided.");
672 return -EINVAL;
673 }
674
675 return 0;
676}
677
678/**
679 * ice_tear_down_devlink_rate_tree - removes devlink-rate exported tree
680 * @pf: pf struct
681 *
682 * This function tears down tree exported during VF's creation.
683 */
684void ice_tear_down_devlink_rate_tree(struct ice_pf *pf)
685{
686 struct devlink *devlink;
687 struct ice_vf *vf;
688 unsigned int bkt;
689
690 devlink = priv_to_devlink(pf);
691
692 devl_lock(devlink);
693 mutex_lock(&pf->vfs.table_lock);
694 ice_for_each_vf(pf, bkt, vf) {
695 if (vf->devlink_port.devlink_rate)
696 devl_rate_leaf_destroy(&vf->devlink_port);
697 }
698 mutex_unlock(&pf->vfs.table_lock);
699
700 devl_rate_nodes_destroy(devlink);
701 devl_unlock(devlink);
702}
703
704/**
705 * ice_enable_custom_tx - try to enable custom Tx feature
706 * @pf: pf struct
707 *
708 * This function tries to enable custom Tx feature,
709 * it's not possible to enable it, if DCB or ADQ is active.
710 */
711static bool ice_enable_custom_tx(struct ice_pf *pf)
712{
713 struct ice_port_info *pi = ice_get_main_vsi(pf)->port_info;
714 struct device *dev = ice_pf_to_dev(pf);
715
716 if (pi->is_custom_tx_enabled)
717 /* already enabled, return true */
718 return true;
719
720 if (ice_is_adq_active(pf)) {
721 dev_err(dev, "ADQ active, can't modify Tx scheduler tree\n");
722 return false;
723 }
724
725 if (ice_is_dcb_active(pf)) {
726 dev_err(dev, "DCB active, can't modify Tx scheduler tree\n");
727 return false;
728 }
729
730 pi->is_custom_tx_enabled = true;
731
732 return true;
733}
734
735/**
736 * ice_traverse_tx_tree - traverse Tx scheduler tree
737 * @devlink: devlink struct
738 * @node: current node, used for recursion
739 * @tc_node: tc_node struct, that is treated as a root
740 * @pf: pf struct
741 *
742 * This function traverses Tx scheduler tree and exports
743 * entire structure to the devlink-rate.
744 */
745static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node *node,
746 struct ice_sched_node *tc_node, struct ice_pf *pf)
747{
748 struct devlink_rate *rate_node = NULL;
749 struct ice_dynamic_port *sf;
750 struct ice_vf *vf;
751 int i;
752
753 if (node->rate_node)
754 /* already added, skip to the next */
755 goto traverse_children;
756
757 if (node->parent == tc_node) {
758 /* create root node */
759 rate_node = devl_rate_node_create(devlink, node, node->name, NULL);
760 } else if (node->vsi_handle &&
761 pf->vsi[node->vsi_handle]->type == ICE_VSI_VF &&
762 pf->vsi[node->vsi_handle]->vf) {
763 vf = pf->vsi[node->vsi_handle]->vf;
764 if (!vf->devlink_port.devlink_rate)
765 /* leaf nodes doesn't have children
766 * so we don't set rate_node
767 */
768 devl_rate_leaf_create(&vf->devlink_port, node,
769 node->parent->rate_node);
770 } else if (node->vsi_handle &&
771 pf->vsi[node->vsi_handle]->type == ICE_VSI_SF &&
772 pf->vsi[node->vsi_handle]->sf) {
773 sf = pf->vsi[node->vsi_handle]->sf;
774 if (!sf->devlink_port.devlink_rate)
775 /* leaf nodes doesn't have children
776 * so we don't set rate_node
777 */
778 devl_rate_leaf_create(&sf->devlink_port, node,
779 node->parent->rate_node);
780 } else if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF &&
781 node->parent->rate_node) {
782 rate_node = devl_rate_node_create(devlink, node, node->name,
783 node->parent->rate_node);
784 }
785
786 if (rate_node && !IS_ERR(rate_node))
787 node->rate_node = rate_node;
788
789traverse_children:
790 for (i = 0; i < node->num_children; i++)
791 ice_traverse_tx_tree(devlink, node->children[i], tc_node, pf);
792}
793
794/**
795 * ice_devlink_rate_init_tx_topology - export Tx scheduler tree to devlink rate
796 * @devlink: devlink struct
797 * @vsi: main vsi struct
798 *
799 * This function finds a root node, then calls ice_traverse_tx tree, which
800 * traverses the tree and exports it's contents to devlink rate.
801 */
802int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *vsi)
803{
804 struct ice_port_info *pi = vsi->port_info;
805 struct ice_sched_node *tc_node;
806 struct ice_pf *pf = vsi->back;
807 int i;
808
809 tc_node = pi->root->children[0];
810 mutex_lock(&pi->sched_lock);
811 for (i = 0; i < tc_node->num_children; i++)
812 ice_traverse_tx_tree(devlink, tc_node->children[i], tc_node, pf);
813 mutex_unlock(&pi->sched_lock);
814
815 return 0;
816}
817
818static void ice_clear_rate_nodes(struct ice_sched_node *node)
819{
820 node->rate_node = NULL;
821
822 for (int i = 0; i < node->num_children; i++)
823 ice_clear_rate_nodes(node->children[i]);
824}
825
826/**
827 * ice_devlink_rate_clear_tx_topology - clear node->rate_node
828 * @vsi: main vsi struct
829 *
830 * Clear rate_node to cleanup creation of Tx topology.
831 *
832 */
833void ice_devlink_rate_clear_tx_topology(struct ice_vsi *vsi)
834{
835 struct ice_port_info *pi = vsi->port_info;
836
837 mutex_lock(&pi->sched_lock);
838 ice_clear_rate_nodes(pi->root->children[0]);
839 mutex_unlock(&pi->sched_lock);
840}
841
842/**
843 * ice_set_object_tx_share - sets node scheduling parameter
844 * @pi: devlink struct instance
845 * @node: node struct instance
846 * @bw: bandwidth in bytes per second
847 * @extack: extended netdev ack structure
848 *
849 * This function sets ICE_MIN_BW scheduling BW limit.
850 */
851static int ice_set_object_tx_share(struct ice_port_info *pi, struct ice_sched_node *node,
852 u64 bw, struct netlink_ext_ack *extack)
853{
854 int status;
855
856 mutex_lock(&pi->sched_lock);
857 /* converts bytes per second to kilo bits per second */
858 node->tx_share = div_u64(bw, 125);
859 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW, node->tx_share);
860 mutex_unlock(&pi->sched_lock);
861
862 if (status)
863 NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_share");
864
865 return status;
866}
867
868/**
869 * ice_set_object_tx_max - sets node scheduling parameter
870 * @pi: devlink struct instance
871 * @node: node struct instance
872 * @bw: bandwidth in bytes per second
873 * @extack: extended netdev ack structure
874 *
875 * This function sets ICE_MAX_BW scheduling BW limit.
876 */
877static int ice_set_object_tx_max(struct ice_port_info *pi, struct ice_sched_node *node,
878 u64 bw, struct netlink_ext_ack *extack)
879{
880 int status;
881
882 mutex_lock(&pi->sched_lock);
883 /* converts bytes per second value to kilo bits per second */
884 node->tx_max = div_u64(bw, 125);
885 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW, node->tx_max);
886 mutex_unlock(&pi->sched_lock);
887
888 if (status)
889 NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_max");
890
891 return status;
892}
893
894/**
895 * ice_set_object_tx_priority - sets node scheduling parameter
896 * @pi: devlink struct instance
897 * @node: node struct instance
898 * @priority: value representing priority for strict priority arbitration
899 * @extack: extended netdev ack structure
900 *
901 * This function sets priority of node among siblings.
902 */
903static int ice_set_object_tx_priority(struct ice_port_info *pi, struct ice_sched_node *node,
904 u32 priority, struct netlink_ext_ack *extack)
905{
906 int status;
907
908 if (priority >= 8) {
909 NL_SET_ERR_MSG_MOD(extack, "Priority should be less than 8");
910 return -EINVAL;
911 }
912
913 mutex_lock(&pi->sched_lock);
914 node->tx_priority = priority;
915 status = ice_sched_set_node_priority(pi, node, node->tx_priority);
916 mutex_unlock(&pi->sched_lock);
917
918 if (status)
919 NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_priority");
920
921 return status;
922}
923
924/**
925 * ice_set_object_tx_weight - sets node scheduling parameter
926 * @pi: devlink struct instance
927 * @node: node struct instance
928 * @weight: value represeting relative weight for WFQ arbitration
929 * @extack: extended netdev ack structure
930 *
931 * This function sets node weight for WFQ algorithm.
932 */
933static int ice_set_object_tx_weight(struct ice_port_info *pi, struct ice_sched_node *node,
934 u32 weight, struct netlink_ext_ack *extack)
935{
936 int status;
937
938 if (weight > 200 || weight < 1) {
939 NL_SET_ERR_MSG_MOD(extack, "Weight must be between 1 and 200");
940 return -EINVAL;
941 }
942
943 mutex_lock(&pi->sched_lock);
944 node->tx_weight = weight;
945 status = ice_sched_set_node_weight(pi, node, node->tx_weight);
946 mutex_unlock(&pi->sched_lock);
947
948 if (status)
949 NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_weight");
950
951 return status;
952}
953
954/**
955 * ice_get_pi_from_dev_rate - get port info from devlink_rate
956 * @rate_node: devlink struct instance
957 *
958 * This function returns corresponding port_info struct of devlink_rate
959 */
960static struct ice_port_info *ice_get_pi_from_dev_rate(struct devlink_rate *rate_node)
961{
962 struct ice_pf *pf = devlink_priv(rate_node->devlink);
963
964 return ice_get_main_vsi(pf)->port_info;
965}
966
967static int ice_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv,
968 struct netlink_ext_ack *extack)
969{
970 struct ice_sched_node *node;
971 struct ice_port_info *pi;
972
973 pi = ice_get_pi_from_dev_rate(rate_node);
974
975 if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink)))
976 return -EBUSY;
977
978 /* preallocate memory for ice_sched_node */
979 node = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*node), GFP_KERNEL);
980 if (!node)
981 return -ENOMEM;
982
983 *priv = node;
984
985 return 0;
986}
987
988static int ice_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv,
989 struct netlink_ext_ack *extack)
990{
991 struct ice_sched_node *node, *tc_node;
992 struct ice_port_info *pi;
993
994 pi = ice_get_pi_from_dev_rate(rate_node);
995 tc_node = pi->root->children[0];
996 node = priv;
997
998 if (!rate_node->parent || !node || tc_node == node || !extack)
999 return 0;
1000
1001 if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink)))
1002 return -EBUSY;
1003
1004 /* can't allow to delete a node with children */
1005 if (node->num_children)
1006 return -EINVAL;
1007
1008 mutex_lock(&pi->sched_lock);
1009 ice_free_sched_node(pi, node);
1010 mutex_unlock(&pi->sched_lock);
1011
1012 return 0;
1013}
1014
1015static int ice_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void *priv,
1016 u64 tx_max, struct netlink_ext_ack *extack)
1017{
1018 struct ice_sched_node *node = priv;
1019
1020 if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink)))
1021 return -EBUSY;
1022
1023 if (!node)
1024 return 0;
1025
1026 return ice_set_object_tx_max(ice_get_pi_from_dev_rate(rate_leaf),
1027 node, tx_max, extack);
1028}
1029
1030static int ice_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void *priv,
1031 u64 tx_share, struct netlink_ext_ack *extack)
1032{
1033 struct ice_sched_node *node = priv;
1034
1035 if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink)))
1036 return -EBUSY;
1037
1038 if (!node)
1039 return 0;
1040
1041 return ice_set_object_tx_share(ice_get_pi_from_dev_rate(rate_leaf), node,
1042 tx_share, extack);
1043}
1044
1045static int ice_devlink_rate_leaf_tx_priority_set(struct devlink_rate *rate_leaf, void *priv,
1046 u32 tx_priority, struct netlink_ext_ack *extack)
1047{
1048 struct ice_sched_node *node = priv;
1049
1050 if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink)))
1051 return -EBUSY;
1052
1053 if (!node)
1054 return 0;
1055
1056 return ice_set_object_tx_priority(ice_get_pi_from_dev_rate(rate_leaf), node,
1057 tx_priority, extack);
1058}
1059
1060static int ice_devlink_rate_leaf_tx_weight_set(struct devlink_rate *rate_leaf, void *priv,
1061 u32 tx_weight, struct netlink_ext_ack *extack)
1062{
1063 struct ice_sched_node *node = priv;
1064
1065 if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink)))
1066 return -EBUSY;
1067
1068 if (!node)
1069 return 0;
1070
1071 return ice_set_object_tx_weight(ice_get_pi_from_dev_rate(rate_leaf), node,
1072 tx_weight, extack);
1073}
1074
1075static int ice_devlink_rate_node_tx_max_set(struct devlink_rate *rate_node, void *priv,
1076 u64 tx_max, struct netlink_ext_ack *extack)
1077{
1078 struct ice_sched_node *node = priv;
1079
1080 if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink)))
1081 return -EBUSY;
1082
1083 if (!node)
1084 return 0;
1085
1086 return ice_set_object_tx_max(ice_get_pi_from_dev_rate(rate_node),
1087 node, tx_max, extack);
1088}
1089
1090static int ice_devlink_rate_node_tx_share_set(struct devlink_rate *rate_node, void *priv,
1091 u64 tx_share, struct netlink_ext_ack *extack)
1092{
1093 struct ice_sched_node *node = priv;
1094
1095 if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink)))
1096 return -EBUSY;
1097
1098 if (!node)
1099 return 0;
1100
1101 return ice_set_object_tx_share(ice_get_pi_from_dev_rate(rate_node),
1102 node, tx_share, extack);
1103}
1104
1105static int ice_devlink_rate_node_tx_priority_set(struct devlink_rate *rate_node, void *priv,
1106 u32 tx_priority, struct netlink_ext_ack *extack)
1107{
1108 struct ice_sched_node *node = priv;
1109
1110 if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink)))
1111 return -EBUSY;
1112
1113 if (!node)
1114 return 0;
1115
1116 return ice_set_object_tx_priority(ice_get_pi_from_dev_rate(rate_node),
1117 node, tx_priority, extack);
1118}
1119
1120static int ice_devlink_rate_node_tx_weight_set(struct devlink_rate *rate_node, void *priv,
1121 u32 tx_weight, struct netlink_ext_ack *extack)
1122{
1123 struct ice_sched_node *node = priv;
1124
1125 if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink)))
1126 return -EBUSY;
1127
1128 if (!node)
1129 return 0;
1130
1131 return ice_set_object_tx_weight(ice_get_pi_from_dev_rate(rate_node),
1132 node, tx_weight, extack);
1133}
1134
1135static int ice_devlink_set_parent(struct devlink_rate *devlink_rate,
1136 struct devlink_rate *parent,
1137 void *priv, void *parent_priv,
1138 struct netlink_ext_ack *extack)
1139{
1140 struct ice_port_info *pi = ice_get_pi_from_dev_rate(devlink_rate);
1141 struct ice_sched_node *tc_node, *node, *parent_node;
1142 u16 num_nodes_added;
1143 u32 first_node_teid;
1144 u32 node_teid;
1145 int status;
1146
1147 tc_node = pi->root->children[0];
1148 node = priv;
1149
1150 if (!extack)
1151 return 0;
1152
1153 if (!ice_enable_custom_tx(devlink_priv(devlink_rate->devlink)))
1154 return -EBUSY;
1155
1156 if (!parent) {
1157 if (!node || tc_node == node || node->num_children)
1158 return -EINVAL;
1159
1160 mutex_lock(&pi->sched_lock);
1161 ice_free_sched_node(pi, node);
1162 mutex_unlock(&pi->sched_lock);
1163
1164 return 0;
1165 }
1166
1167 parent_node = parent_priv;
1168
1169 /* if the node doesn't exist, create it */
1170 if (!node->parent) {
1171 mutex_lock(&pi->sched_lock);
1172 status = ice_sched_add_elems(pi, tc_node, parent_node,
1173 parent_node->tx_sched_layer + 1,
1174 1, &num_nodes_added, &first_node_teid,
1175 &node);
1176 mutex_unlock(&pi->sched_lock);
1177
1178 if (status) {
1179 NL_SET_ERR_MSG_MOD(extack, "Can't add a new node");
1180 return status;
1181 }
1182
1183 if (devlink_rate->tx_share)
1184 ice_set_object_tx_share(pi, node, devlink_rate->tx_share, extack);
1185 if (devlink_rate->tx_max)
1186 ice_set_object_tx_max(pi, node, devlink_rate->tx_max, extack);
1187 if (devlink_rate->tx_priority)
1188 ice_set_object_tx_priority(pi, node, devlink_rate->tx_priority, extack);
1189 if (devlink_rate->tx_weight)
1190 ice_set_object_tx_weight(pi, node, devlink_rate->tx_weight, extack);
1191 } else {
1192 node_teid = le32_to_cpu(node->info.node_teid);
1193 mutex_lock(&pi->sched_lock);
1194 status = ice_sched_move_nodes(pi, parent_node, 1, &node_teid);
1195 mutex_unlock(&pi->sched_lock);
1196
1197 if (status)
1198 NL_SET_ERR_MSG_MOD(extack, "Can't move existing node to a new parent");
1199 }
1200
1201 return status;
1202}
1203
1204/**
1205 * ice_devlink_reinit_up - do reinit of the given PF
1206 * @pf: pointer to the PF struct
1207 */
1208static int ice_devlink_reinit_up(struct ice_pf *pf)
1209{
1210 struct ice_vsi *vsi = ice_get_main_vsi(pf);
1211 int err;
1212
1213 err = ice_init_dev(pf);
1214 if (err)
1215 return err;
1216
1217 vsi->flags = ICE_VSI_FLAG_INIT;
1218
1219 rtnl_lock();
1220 err = ice_vsi_cfg(vsi);
1221 rtnl_unlock();
1222 if (err)
1223 goto err_vsi_cfg;
1224
1225 /* No need to take devl_lock, it's already taken by devlink API */
1226 err = ice_load(pf);
1227 if (err)
1228 goto err_load;
1229
1230 return 0;
1231
1232err_load:
1233 rtnl_lock();
1234 ice_vsi_decfg(vsi);
1235 rtnl_unlock();
1236err_vsi_cfg:
1237 ice_deinit_dev(pf);
1238 return err;
1239}
1240
1241/**
1242 * ice_devlink_reload_up - do reload up after reinit
1243 * @devlink: pointer to the devlink instance reloading
1244 * @action: the action requested
1245 * @limit: limits imposed by userspace, such as not resetting
1246 * @actions_performed: on return, indicate what actions actually performed
1247 * @extack: netlink extended ACK structure
1248 */
1249static int
1250ice_devlink_reload_up(struct devlink *devlink,
1251 enum devlink_reload_action action,
1252 enum devlink_reload_limit limit,
1253 u32 *actions_performed,
1254 struct netlink_ext_ack *extack)
1255{
1256 struct ice_pf *pf = devlink_priv(devlink);
1257
1258 switch (action) {
1259 case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
1260 *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
1261 return ice_devlink_reinit_up(pf);
1262 case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
1263 *actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
1264 return ice_devlink_reload_empr_finish(pf, extack);
1265 default:
1266 WARN_ON(1);
1267 return -EOPNOTSUPP;
1268 }
1269}
1270
1271static const struct devlink_ops ice_devlink_ops = {
1272 .supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
1273 .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
1274 BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
1275 .reload_down = ice_devlink_reload_down,
1276 .reload_up = ice_devlink_reload_up,
1277 .eswitch_mode_get = ice_eswitch_mode_get,
1278 .eswitch_mode_set = ice_eswitch_mode_set,
1279 .info_get = ice_devlink_info_get,
1280 .flash_update = ice_devlink_flash_update,
1281
1282 .rate_node_new = ice_devlink_rate_node_new,
1283 .rate_node_del = ice_devlink_rate_node_del,
1284
1285 .rate_leaf_tx_max_set = ice_devlink_rate_leaf_tx_max_set,
1286 .rate_leaf_tx_share_set = ice_devlink_rate_leaf_tx_share_set,
1287 .rate_leaf_tx_priority_set = ice_devlink_rate_leaf_tx_priority_set,
1288 .rate_leaf_tx_weight_set = ice_devlink_rate_leaf_tx_weight_set,
1289
1290 .rate_node_tx_max_set = ice_devlink_rate_node_tx_max_set,
1291 .rate_node_tx_share_set = ice_devlink_rate_node_tx_share_set,
1292 .rate_node_tx_priority_set = ice_devlink_rate_node_tx_priority_set,
1293 .rate_node_tx_weight_set = ice_devlink_rate_node_tx_weight_set,
1294
1295 .rate_leaf_parent_set = ice_devlink_set_parent,
1296 .rate_node_parent_set = ice_devlink_set_parent,
1297
1298 .port_new = ice_devlink_port_new,
1299};
1300
1301static const struct devlink_ops ice_sf_devlink_ops;
1302
1303static int
1304ice_devlink_enable_roce_get(struct devlink *devlink, u32 id,
1305 struct devlink_param_gset_ctx *ctx)
1306{
1307 struct ice_pf *pf = devlink_priv(devlink);
1308
1309 ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? true : false;
1310
1311 return 0;
1312}
1313
1314static int ice_devlink_enable_roce_set(struct devlink *devlink, u32 id,
1315 struct devlink_param_gset_ctx *ctx,
1316 struct netlink_ext_ack *extack)
1317{
1318 struct ice_pf *pf = devlink_priv(devlink);
1319 bool roce_ena = ctx->val.vbool;
1320 int ret;
1321
1322 if (!roce_ena) {
1323 ice_unplug_aux_dev(pf);
1324 pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
1325 return 0;
1326 }
1327
1328 pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
1329 ret = ice_plug_aux_dev(pf);
1330 if (ret)
1331 pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
1332
1333 return ret;
1334}
1335
1336static int
1337ice_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
1338 union devlink_param_value val,
1339 struct netlink_ext_ack *extack)
1340{
1341 struct ice_pf *pf = devlink_priv(devlink);
1342
1343 if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
1344 return -EOPNOTSUPP;
1345
1346 if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP) {
1347 NL_SET_ERR_MSG_MOD(extack, "iWARP is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously");
1348 return -EOPNOTSUPP;
1349 }
1350
1351 return 0;
1352}
1353
1354static int
1355ice_devlink_enable_iw_get(struct devlink *devlink, u32 id,
1356 struct devlink_param_gset_ctx *ctx)
1357{
1358 struct ice_pf *pf = devlink_priv(devlink);
1359
1360 ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP;
1361
1362 return 0;
1363}
1364
1365static int ice_devlink_enable_iw_set(struct devlink *devlink, u32 id,
1366 struct devlink_param_gset_ctx *ctx,
1367 struct netlink_ext_ack *extack)
1368{
1369 struct ice_pf *pf = devlink_priv(devlink);
1370 bool iw_ena = ctx->val.vbool;
1371 int ret;
1372
1373 if (!iw_ena) {
1374 ice_unplug_aux_dev(pf);
1375 pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP;
1376 return 0;
1377 }
1378
1379 pf->rdma_mode |= IIDC_RDMA_PROTOCOL_IWARP;
1380 ret = ice_plug_aux_dev(pf);
1381 if (ret)
1382 pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP;
1383
1384 return ret;
1385}
1386
1387static int
1388ice_devlink_enable_iw_validate(struct devlink *devlink, u32 id,
1389 union devlink_param_value val,
1390 struct netlink_ext_ack *extack)
1391{
1392 struct ice_pf *pf = devlink_priv(devlink);
1393
1394 if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
1395 return -EOPNOTSUPP;
1396
1397 if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2) {
1398 NL_SET_ERR_MSG_MOD(extack, "RoCEv2 is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously");
1399 return -EOPNOTSUPP;
1400 }
1401
1402 return 0;
1403}
1404
1405#define DEVLINK_LOCAL_FWD_DISABLED_STR "disabled"
1406#define DEVLINK_LOCAL_FWD_ENABLED_STR "enabled"
1407#define DEVLINK_LOCAL_FWD_PRIORITIZED_STR "prioritized"
1408
1409/**
1410 * ice_devlink_local_fwd_mode_to_str - Get string for local_fwd mode.
1411 * @mode: local forwarding for mode used in port_info struct.
1412 *
1413 * Return: Mode respective string or "Invalid".
1414 */
1415static const char *
1416ice_devlink_local_fwd_mode_to_str(enum ice_local_fwd_mode mode)
1417{
1418 switch (mode) {
1419 case ICE_LOCAL_FWD_MODE_ENABLED:
1420 return DEVLINK_LOCAL_FWD_ENABLED_STR;
1421 case ICE_LOCAL_FWD_MODE_PRIORITIZED:
1422 return DEVLINK_LOCAL_FWD_PRIORITIZED_STR;
1423 case ICE_LOCAL_FWD_MODE_DISABLED:
1424 return DEVLINK_LOCAL_FWD_DISABLED_STR;
1425 }
1426
1427 return "Invalid";
1428}
1429
1430/**
1431 * ice_devlink_local_fwd_str_to_mode - Get local_fwd mode from string name.
1432 * @mode_str: local forwarding mode string.
1433 *
1434 * Return: Mode value or negative number if invalid.
1435 */
1436static int ice_devlink_local_fwd_str_to_mode(const char *mode_str)
1437{
1438 if (!strcmp(mode_str, DEVLINK_LOCAL_FWD_ENABLED_STR))
1439 return ICE_LOCAL_FWD_MODE_ENABLED;
1440 else if (!strcmp(mode_str, DEVLINK_LOCAL_FWD_PRIORITIZED_STR))
1441 return ICE_LOCAL_FWD_MODE_PRIORITIZED;
1442 else if (!strcmp(mode_str, DEVLINK_LOCAL_FWD_DISABLED_STR))
1443 return ICE_LOCAL_FWD_MODE_DISABLED;
1444
1445 return -EINVAL;
1446}
1447
1448/**
1449 * ice_devlink_local_fwd_get - Get local_fwd parameter.
1450 * @devlink: Pointer to the devlink instance.
1451 * @id: The parameter ID to set.
1452 * @ctx: Context to store the parameter value.
1453 *
1454 * Return: Zero.
1455 */
1456static int ice_devlink_local_fwd_get(struct devlink *devlink, u32 id,
1457 struct devlink_param_gset_ctx *ctx)
1458{
1459 struct ice_pf *pf = devlink_priv(devlink);
1460 struct ice_port_info *pi;
1461 const char *mode_str;
1462
1463 pi = pf->hw.port_info;
1464 mode_str = ice_devlink_local_fwd_mode_to_str(pi->local_fwd_mode);
1465 snprintf(ctx->val.vstr, sizeof(ctx->val.vstr), "%s", mode_str);
1466
1467 return 0;
1468}
1469
1470/**
1471 * ice_devlink_local_fwd_set - Set local_fwd parameter.
1472 * @devlink: Pointer to the devlink instance.
1473 * @id: The parameter ID to set.
1474 * @ctx: Context to get the parameter value.
1475 * @extack: Netlink extended ACK structure.
1476 *
1477 * Return: Zero.
1478 */
1479static int ice_devlink_local_fwd_set(struct devlink *devlink, u32 id,
1480 struct devlink_param_gset_ctx *ctx,
1481 struct netlink_ext_ack *extack)
1482{
1483 int new_local_fwd_mode = ice_devlink_local_fwd_str_to_mode(ctx->val.vstr);
1484 struct ice_pf *pf = devlink_priv(devlink);
1485 struct device *dev = ice_pf_to_dev(pf);
1486 struct ice_port_info *pi;
1487
1488 pi = pf->hw.port_info;
1489 if (pi->local_fwd_mode != new_local_fwd_mode) {
1490 pi->local_fwd_mode = new_local_fwd_mode;
1491 dev_info(dev, "Setting local_fwd to %s\n", ctx->val.vstr);
1492 ice_schedule_reset(pf, ICE_RESET_CORER);
1493 }
1494
1495 return 0;
1496}
1497
1498/**
1499 * ice_devlink_local_fwd_validate - Validate passed local_fwd parameter value.
1500 * @devlink: Unused pointer to devlink instance.
1501 * @id: The parameter ID to validate.
1502 * @val: Value to validate.
1503 * @extack: Netlink extended ACK structure.
1504 *
1505 * Supported values are:
1506 * "enabled" - local_fwd is enabled, "disabled" - local_fwd is disabled
1507 * "prioritized" - local_fwd traffic is prioritized in scheduling.
1508 *
1509 * Return: Zero when passed parameter value is supported. Negative value on
1510 * error.
1511 */
1512static int ice_devlink_local_fwd_validate(struct devlink *devlink, u32 id,
1513 union devlink_param_value val,
1514 struct netlink_ext_ack *extack)
1515{
1516 if (ice_devlink_local_fwd_str_to_mode(val.vstr) < 0) {
1517 NL_SET_ERR_MSG_MOD(extack, "Error: Requested value is not supported.");
1518 return -EINVAL;
1519 }
1520
1521 return 0;
1522}
1523
1524enum ice_param_id {
1525 ICE_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
1526 ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS,
1527 ICE_DEVLINK_PARAM_ID_LOCAL_FWD,
1528};
1529
1530static const struct devlink_param ice_dvl_rdma_params[] = {
1531 DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1532 ice_devlink_enable_roce_get,
1533 ice_devlink_enable_roce_set,
1534 ice_devlink_enable_roce_validate),
1535 DEVLINK_PARAM_GENERIC(ENABLE_IWARP, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1536 ice_devlink_enable_iw_get,
1537 ice_devlink_enable_iw_set,
1538 ice_devlink_enable_iw_validate),
1539};
1540
1541static const struct devlink_param ice_dvl_sched_params[] = {
1542 DEVLINK_PARAM_DRIVER(ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS,
1543 "tx_scheduling_layers",
1544 DEVLINK_PARAM_TYPE_U8,
1545 BIT(DEVLINK_PARAM_CMODE_PERMANENT),
1546 ice_devlink_tx_sched_layers_get,
1547 ice_devlink_tx_sched_layers_set,
1548 ice_devlink_tx_sched_layers_validate),
1549 DEVLINK_PARAM_DRIVER(ICE_DEVLINK_PARAM_ID_LOCAL_FWD,
1550 "local_forwarding", DEVLINK_PARAM_TYPE_STRING,
1551 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1552 ice_devlink_local_fwd_get,
1553 ice_devlink_local_fwd_set,
1554 ice_devlink_local_fwd_validate),
1555};
1556
1557static void ice_devlink_free(void *devlink_ptr)
1558{
1559 devlink_free((struct devlink *)devlink_ptr);
1560}
1561
1562/**
1563 * ice_allocate_pf - Allocate devlink and return PF structure pointer
1564 * @dev: the device to allocate for
1565 *
1566 * Allocate a devlink instance for this device and return the private area as
1567 * the PF structure. The devlink memory is kept track of through devres by
1568 * adding an action to remove it when unwinding.
1569 */
1570struct ice_pf *ice_allocate_pf(struct device *dev)
1571{
1572 struct devlink *devlink;
1573
1574 devlink = devlink_alloc(&ice_devlink_ops, sizeof(struct ice_pf), dev);
1575 if (!devlink)
1576 return NULL;
1577
1578 /* Add an action to teardown the devlink when unwinding the driver */
1579 if (devm_add_action_or_reset(dev, ice_devlink_free, devlink))
1580 return NULL;
1581
1582 return devlink_priv(devlink);
1583}
1584
1585/**
1586 * ice_allocate_sf - Allocate devlink and return SF structure pointer
1587 * @dev: the device to allocate for
1588 * @pf: pointer to the PF structure
1589 *
1590 * Allocate a devlink instance for SF.
1591 *
1592 * Return: ice_sf_priv pointer to allocated memory or ERR_PTR in case of error
1593 */
1594struct ice_sf_priv *ice_allocate_sf(struct device *dev, struct ice_pf *pf)
1595{
1596 struct devlink *devlink;
1597 int err;
1598
1599 devlink = devlink_alloc(&ice_sf_devlink_ops, sizeof(struct ice_sf_priv),
1600 dev);
1601 if (!devlink)
1602 return ERR_PTR(-ENOMEM);
1603
1604 err = devl_nested_devlink_set(priv_to_devlink(pf), devlink);
1605 if (err) {
1606 devlink_free(devlink);
1607 return ERR_PTR(err);
1608 }
1609
1610 return devlink_priv(devlink);
1611}
1612
1613/**
1614 * ice_devlink_register - Register devlink interface for this PF
1615 * @pf: the PF to register the devlink for.
1616 *
1617 * Register the devlink instance associated with this physical function.
1618 *
1619 * Return: zero on success or an error code on failure.
1620 */
1621void ice_devlink_register(struct ice_pf *pf)
1622{
1623 struct devlink *devlink = priv_to_devlink(pf);
1624
1625 devl_register(devlink);
1626}
1627
1628/**
1629 * ice_devlink_unregister - Unregister devlink resources for this PF.
1630 * @pf: the PF structure to cleanup
1631 *
1632 * Releases resources used by devlink and cleans up associated memory.
1633 */
1634void ice_devlink_unregister(struct ice_pf *pf)
1635{
1636 devl_unregister(priv_to_devlink(pf));
1637}
1638
1639int ice_devlink_register_params(struct ice_pf *pf)
1640{
1641 struct devlink *devlink = priv_to_devlink(pf);
1642 struct ice_hw *hw = &pf->hw;
1643 int status;
1644
1645 status = devl_params_register(devlink, ice_dvl_rdma_params,
1646 ARRAY_SIZE(ice_dvl_rdma_params));
1647 if (status)
1648 return status;
1649
1650 if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en)
1651 status = devl_params_register(devlink, ice_dvl_sched_params,
1652 ARRAY_SIZE(ice_dvl_sched_params));
1653
1654 return status;
1655}
1656
1657void ice_devlink_unregister_params(struct ice_pf *pf)
1658{
1659 struct devlink *devlink = priv_to_devlink(pf);
1660 struct ice_hw *hw = &pf->hw;
1661
1662 devl_params_unregister(devlink, ice_dvl_rdma_params,
1663 ARRAY_SIZE(ice_dvl_rdma_params));
1664
1665 if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en)
1666 devl_params_unregister(devlink, ice_dvl_sched_params,
1667 ARRAY_SIZE(ice_dvl_sched_params));
1668}
1669
1670#define ICE_DEVLINK_READ_BLK_SIZE (1024 * 1024)
1671
1672static const struct devlink_region_ops ice_nvm_region_ops;
1673static const struct devlink_region_ops ice_sram_region_ops;
1674
1675/**
1676 * ice_devlink_nvm_snapshot - Capture a snapshot of the NVM flash contents
1677 * @devlink: the devlink instance
1678 * @ops: the devlink region to snapshot
1679 * @extack: extended ACK response structure
1680 * @data: on exit points to snapshot data buffer
1681 *
1682 * This function is called in response to a DEVLINK_CMD_REGION_NEW for either
1683 * the nvm-flash or shadow-ram region.
1684 *
1685 * It captures a snapshot of the NVM or Shadow RAM flash contents. This
1686 * snapshot can then later be viewed via the DEVLINK_CMD_REGION_READ netlink
1687 * interface.
1688 *
1689 * @returns zero on success, and updates the data pointer. Returns a non-zero
1690 * error code on failure.
1691 */
1692static int ice_devlink_nvm_snapshot(struct devlink *devlink,
1693 const struct devlink_region_ops *ops,
1694 struct netlink_ext_ack *extack, u8 **data)
1695{
1696 struct ice_pf *pf = devlink_priv(devlink);
1697 struct device *dev = ice_pf_to_dev(pf);
1698 struct ice_hw *hw = &pf->hw;
1699 bool read_shadow_ram;
1700 u8 *nvm_data, *tmp, i;
1701 u32 nvm_size, left;
1702 s8 num_blks;
1703 int status;
1704
1705 if (ops == &ice_nvm_region_ops) {
1706 read_shadow_ram = false;
1707 nvm_size = hw->flash.flash_size;
1708 } else if (ops == &ice_sram_region_ops) {
1709 read_shadow_ram = true;
1710 nvm_size = hw->flash.sr_words * 2u;
1711 } else {
1712 NL_SET_ERR_MSG_MOD(extack, "Unexpected region in snapshot function");
1713 return -EOPNOTSUPP;
1714 }
1715
1716 nvm_data = vzalloc(nvm_size);
1717 if (!nvm_data)
1718 return -ENOMEM;
1719
1720 num_blks = DIV_ROUND_UP(nvm_size, ICE_DEVLINK_READ_BLK_SIZE);
1721 tmp = nvm_data;
1722 left = nvm_size;
1723
1724 /* Some systems take longer to read the NVM than others which causes the
1725 * FW to reclaim the NVM lock before the entire NVM has been read. Fix
1726 * this by breaking the reads of the NVM into smaller chunks that will
1727 * probably not take as long. This has some overhead since we are
1728 * increasing the number of AQ commands, but it should always work
1729 */
1730 for (i = 0; i < num_blks; i++) {
1731 u32 read_sz = min_t(u32, ICE_DEVLINK_READ_BLK_SIZE, left);
1732
1733 status = ice_acquire_nvm(hw, ICE_RES_READ);
1734 if (status) {
1735 dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
1736 status, hw->adminq.sq_last_status);
1737 NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
1738 vfree(nvm_data);
1739 return -EIO;
1740 }
1741
1742 status = ice_read_flat_nvm(hw, i * ICE_DEVLINK_READ_BLK_SIZE,
1743 &read_sz, tmp, read_shadow_ram);
1744 if (status) {
1745 dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n",
1746 read_sz, status, hw->adminq.sq_last_status);
1747 NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents");
1748 ice_release_nvm(hw);
1749 vfree(nvm_data);
1750 return -EIO;
1751 }
1752 ice_release_nvm(hw);
1753
1754 tmp += read_sz;
1755 left -= read_sz;
1756 }
1757
1758 *data = nvm_data;
1759
1760 return 0;
1761}
1762
1763/**
1764 * ice_devlink_nvm_read - Read a portion of NVM flash contents
1765 * @devlink: the devlink instance
1766 * @ops: the devlink region to snapshot
1767 * @extack: extended ACK response structure
1768 * @offset: the offset to start at
1769 * @size: the amount to read
1770 * @data: the data buffer to read into
1771 *
1772 * This function is called in response to DEVLINK_CMD_REGION_READ to directly
1773 * read a section of the NVM contents.
1774 *
1775 * It reads from either the nvm-flash or shadow-ram region contents.
1776 *
1777 * @returns zero on success, and updates the data pointer. Returns a non-zero
1778 * error code on failure.
1779 */
1780static int ice_devlink_nvm_read(struct devlink *devlink,
1781 const struct devlink_region_ops *ops,
1782 struct netlink_ext_ack *extack,
1783 u64 offset, u32 size, u8 *data)
1784{
1785 struct ice_pf *pf = devlink_priv(devlink);
1786 struct device *dev = ice_pf_to_dev(pf);
1787 struct ice_hw *hw = &pf->hw;
1788 bool read_shadow_ram;
1789 u64 nvm_size;
1790 int status;
1791
1792 if (ops == &ice_nvm_region_ops) {
1793 read_shadow_ram = false;
1794 nvm_size = hw->flash.flash_size;
1795 } else if (ops == &ice_sram_region_ops) {
1796 read_shadow_ram = true;
1797 nvm_size = hw->flash.sr_words * 2u;
1798 } else {
1799 NL_SET_ERR_MSG_MOD(extack, "Unexpected region in snapshot function");
1800 return -EOPNOTSUPP;
1801 }
1802
1803 if (offset + size >= nvm_size) {
1804 NL_SET_ERR_MSG_MOD(extack, "Cannot read beyond the region size");
1805 return -ERANGE;
1806 }
1807
1808 status = ice_acquire_nvm(hw, ICE_RES_READ);
1809 if (status) {
1810 dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
1811 status, hw->adminq.sq_last_status);
1812 NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
1813 return -EIO;
1814 }
1815
1816 status = ice_read_flat_nvm(hw, (u32)offset, &size, data,
1817 read_shadow_ram);
1818 if (status) {
1819 dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n",
1820 size, status, hw->adminq.sq_last_status);
1821 NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents");
1822 ice_release_nvm(hw);
1823 return -EIO;
1824 }
1825 ice_release_nvm(hw);
1826
1827 return 0;
1828}
1829
1830/**
1831 * ice_devlink_devcaps_snapshot - Capture snapshot of device capabilities
1832 * @devlink: the devlink instance
1833 * @ops: the devlink region being snapshotted
1834 * @extack: extended ACK response structure
1835 * @data: on exit points to snapshot data buffer
1836 *
1837 * This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for
1838 * the device-caps devlink region. It captures a snapshot of the device
1839 * capabilities reported by firmware.
1840 *
1841 * @returns zero on success, and updates the data pointer. Returns a non-zero
1842 * error code on failure.
1843 */
1844static int
1845ice_devlink_devcaps_snapshot(struct devlink *devlink,
1846 const struct devlink_region_ops *ops,
1847 struct netlink_ext_ack *extack, u8 **data)
1848{
1849 struct ice_pf *pf = devlink_priv(devlink);
1850 struct device *dev = ice_pf_to_dev(pf);
1851 struct ice_hw *hw = &pf->hw;
1852 void *devcaps;
1853 int status;
1854
1855 devcaps = vzalloc(ICE_AQ_MAX_BUF_LEN);
1856 if (!devcaps)
1857 return -ENOMEM;
1858
1859 status = ice_aq_list_caps(hw, devcaps, ICE_AQ_MAX_BUF_LEN, NULL,
1860 ice_aqc_opc_list_dev_caps, NULL);
1861 if (status) {
1862 dev_dbg(dev, "ice_aq_list_caps: failed to read device capabilities, err %d aq_err %d\n",
1863 status, hw->adminq.sq_last_status);
1864 NL_SET_ERR_MSG_MOD(extack, "Failed to read device capabilities");
1865 vfree(devcaps);
1866 return status;
1867 }
1868
1869 *data = (u8 *)devcaps;
1870
1871 return 0;
1872}
1873
1874static const struct devlink_region_ops ice_nvm_region_ops = {
1875 .name = "nvm-flash",
1876 .destructor = vfree,
1877 .snapshot = ice_devlink_nvm_snapshot,
1878 .read = ice_devlink_nvm_read,
1879};
1880
1881static const struct devlink_region_ops ice_sram_region_ops = {
1882 .name = "shadow-ram",
1883 .destructor = vfree,
1884 .snapshot = ice_devlink_nvm_snapshot,
1885 .read = ice_devlink_nvm_read,
1886};
1887
1888static const struct devlink_region_ops ice_devcaps_region_ops = {
1889 .name = "device-caps",
1890 .destructor = vfree,
1891 .snapshot = ice_devlink_devcaps_snapshot,
1892};
1893
1894/**
1895 * ice_devlink_init_regions - Initialize devlink regions
1896 * @pf: the PF device structure
1897 *
1898 * Create devlink regions used to enable access to dump the contents of the
1899 * flash memory on the device.
1900 */
1901void ice_devlink_init_regions(struct ice_pf *pf)
1902{
1903 struct devlink *devlink = priv_to_devlink(pf);
1904 struct device *dev = ice_pf_to_dev(pf);
1905 u64 nvm_size, sram_size;
1906
1907 nvm_size = pf->hw.flash.flash_size;
1908 pf->nvm_region = devl_region_create(devlink, &ice_nvm_region_ops, 1,
1909 nvm_size);
1910 if (IS_ERR(pf->nvm_region)) {
1911 dev_err(dev, "failed to create NVM devlink region, err %ld\n",
1912 PTR_ERR(pf->nvm_region));
1913 pf->nvm_region = NULL;
1914 }
1915
1916 sram_size = pf->hw.flash.sr_words * 2u;
1917 pf->sram_region = devl_region_create(devlink, &ice_sram_region_ops,
1918 1, sram_size);
1919 if (IS_ERR(pf->sram_region)) {
1920 dev_err(dev, "failed to create shadow-ram devlink region, err %ld\n",
1921 PTR_ERR(pf->sram_region));
1922 pf->sram_region = NULL;
1923 }
1924
1925 pf->devcaps_region = devl_region_create(devlink,
1926 &ice_devcaps_region_ops, 10,
1927 ICE_AQ_MAX_BUF_LEN);
1928 if (IS_ERR(pf->devcaps_region)) {
1929 dev_err(dev, "failed to create device-caps devlink region, err %ld\n",
1930 PTR_ERR(pf->devcaps_region));
1931 pf->devcaps_region = NULL;
1932 }
1933}
1934
1935/**
1936 * ice_devlink_destroy_regions - Destroy devlink regions
1937 * @pf: the PF device structure
1938 *
1939 * Remove previously created regions for this PF.
1940 */
1941void ice_devlink_destroy_regions(struct ice_pf *pf)
1942{
1943 if (pf->nvm_region)
1944 devl_region_destroy(pf->nvm_region);
1945
1946 if (pf->sram_region)
1947 devl_region_destroy(pf->sram_region);
1948
1949 if (pf->devcaps_region)
1950 devl_region_destroy(pf->devcaps_region);
1951}