Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4#include <net/devlink.h>
5#include "ice_sched.h"
6
7/**
8 * ice_sched_add_root_node - Insert the Tx scheduler root node in SW DB
9 * @pi: port information structure
10 * @info: Scheduler element information from firmware
11 *
12 * This function inserts the root node of the scheduling tree topology
13 * to the SW DB.
14 */
15static int
16ice_sched_add_root_node(struct ice_port_info *pi,
17 struct ice_aqc_txsched_elem_data *info)
18{
19 struct ice_sched_node *root;
20 struct ice_hw *hw;
21
22 if (!pi)
23 return -EINVAL;
24
25 hw = pi->hw;
26
27 root = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*root), GFP_KERNEL);
28 if (!root)
29 return -ENOMEM;
30
31 root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0],
32 sizeof(*root->children), GFP_KERNEL);
33 if (!root->children) {
34 devm_kfree(ice_hw_to_dev(hw), root);
35 return -ENOMEM;
36 }
37
38 memcpy(&root->info, info, sizeof(*info));
39 pi->root = root;
40 return 0;
41}
42
43/**
44 * ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB
45 * @start_node: pointer to the starting ice_sched_node struct in a sub-tree
46 * @teid: node TEID to search
47 *
48 * This function searches for a node matching the TEID in the scheduling tree
49 * from the SW DB. The search is recursive and is restricted by the number of
50 * layers it has searched through; stopping at the max supported layer.
51 *
52 * This function needs to be called when holding the port_info->sched_lock
53 */
54struct ice_sched_node *
55ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
56{
57 u16 i;
58
59 /* The TEID is same as that of the start_node */
60 if (ICE_TXSCHED_GET_NODE_TEID(start_node) == teid)
61 return start_node;
62
63 /* The node has no children or is at the max layer */
64 if (!start_node->num_children ||
65 start_node->tx_sched_layer >= ICE_AQC_TOPO_MAX_LEVEL_NUM ||
66 start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF)
67 return NULL;
68
69 /* Check if TEID matches to any of the children nodes */
70 for (i = 0; i < start_node->num_children; i++)
71 if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid)
72 return start_node->children[i];
73
74 /* Search within each child's sub-tree */
75 for (i = 0; i < start_node->num_children; i++) {
76 struct ice_sched_node *tmp;
77
78 tmp = ice_sched_find_node_by_teid(start_node->children[i],
79 teid);
80 if (tmp)
81 return tmp;
82 }
83
84 return NULL;
85}
86
87/**
88 * ice_aqc_send_sched_elem_cmd - send scheduling elements cmd
89 * @hw: pointer to the HW struct
90 * @cmd_opc: cmd opcode
91 * @elems_req: number of elements to request
92 * @buf: pointer to buffer
93 * @buf_size: buffer size in bytes
94 * @elems_resp: returns total number of elements response
95 * @cd: pointer to command details structure or NULL
96 *
97 * This function sends a scheduling elements cmd (cmd_opc)
98 */
99static int
100ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
101 u16 elems_req, void *buf, u16 buf_size,
102 u16 *elems_resp, struct ice_sq_cd *cd)
103{
104 struct ice_aqc_sched_elem_cmd *cmd;
105 struct ice_aq_desc desc;
106 int status;
107
108 cmd = &desc.params.sched_elem_cmd;
109 ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc);
110 cmd->num_elem_req = cpu_to_le16(elems_req);
111 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
112 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
113 if (!status && elems_resp)
114 *elems_resp = le16_to_cpu(cmd->num_elem_resp);
115
116 return status;
117}
118
119/**
120 * ice_aq_query_sched_elems - query scheduler elements
121 * @hw: pointer to the HW struct
122 * @elems_req: number of elements to query
123 * @buf: pointer to buffer
124 * @buf_size: buffer size in bytes
125 * @elems_ret: returns total number of elements returned
126 * @cd: pointer to command details structure or NULL
127 *
128 * Query scheduling elements (0x0404)
129 */
130int
131ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
132 struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
133 u16 *elems_ret, struct ice_sq_cd *cd)
134{
135 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_get_sched_elems,
136 elems_req, (void *)buf, buf_size,
137 elems_ret, cd);
138}
139
140/**
141 * ice_sched_add_node - Insert the Tx scheduler node in SW DB
142 * @pi: port information structure
143 * @layer: Scheduler layer of the node
144 * @info: Scheduler element information from firmware
145 * @prealloc_node: preallocated ice_sched_node struct for SW DB
146 *
147 * This function inserts a scheduler node to the SW DB.
148 */
149int
150ice_sched_add_node(struct ice_port_info *pi, u8 layer,
151 struct ice_aqc_txsched_elem_data *info,
152 struct ice_sched_node *prealloc_node)
153{
154 struct ice_aqc_txsched_elem_data elem;
155 struct ice_sched_node *parent;
156 struct ice_sched_node *node;
157 struct ice_hw *hw;
158 int status;
159
160 if (!pi)
161 return -EINVAL;
162
163 hw = pi->hw;
164
165 /* A valid parent node should be there */
166 parent = ice_sched_find_node_by_teid(pi->root,
167 le32_to_cpu(info->parent_teid));
168 if (!parent) {
169 ice_debug(hw, ICE_DBG_SCHED, "Parent Node not found for parent_teid=0x%x\n",
170 le32_to_cpu(info->parent_teid));
171 return -EINVAL;
172 }
173
174 /* query the current node information from FW before adding it
175 * to the SW DB
176 */
177 status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), &elem);
178 if (status)
179 return status;
180
181 if (prealloc_node)
182 node = prealloc_node;
183 else
184 node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL);
185 if (!node)
186 return -ENOMEM;
187 if (hw->max_children[layer]) {
188 node->children = devm_kcalloc(ice_hw_to_dev(hw),
189 hw->max_children[layer],
190 sizeof(*node->children), GFP_KERNEL);
191 if (!node->children) {
192 devm_kfree(ice_hw_to_dev(hw), node);
193 return -ENOMEM;
194 }
195 }
196
197 node->in_use = true;
198 node->parent = parent;
199 node->tx_sched_layer = layer;
200 parent->children[parent->num_children++] = node;
201 node->info = elem;
202 return 0;
203}
204
205/**
206 * ice_aq_delete_sched_elems - delete scheduler elements
207 * @hw: pointer to the HW struct
208 * @grps_req: number of groups to delete
209 * @buf: pointer to buffer
210 * @buf_size: buffer size in bytes
211 * @grps_del: returns total number of elements deleted
212 * @cd: pointer to command details structure or NULL
213 *
214 * Delete scheduling elements (0x040F)
215 */
216static int
217ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
218 struct ice_aqc_delete_elem *buf, u16 buf_size,
219 u16 *grps_del, struct ice_sq_cd *cd)
220{
221 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_delete_sched_elems,
222 grps_req, (void *)buf, buf_size,
223 grps_del, cd);
224}
225
226/**
227 * ice_sched_remove_elems - remove nodes from HW
228 * @hw: pointer to the HW struct
229 * @parent: pointer to the parent node
230 * @node_teid: node teid to be deleted
231 *
232 * This function remove nodes from HW
233 */
234static int
235ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
236 u32 node_teid)
237{
238 DEFINE_RAW_FLEX(struct ice_aqc_delete_elem, buf, teid, 1);
239 u16 buf_size = __struct_size(buf);
240 u16 num_groups_removed = 0;
241 int status;
242
243 buf->hdr.parent_teid = parent->info.node_teid;
244 buf->hdr.num_elems = cpu_to_le16(1);
245 buf->teid[0] = cpu_to_le32(node_teid);
246
247 status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
248 &num_groups_removed, NULL);
249 if (status || num_groups_removed != 1)
250 ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n",
251 hw->adminq.sq_last_status);
252
253 return status;
254}
255
256/**
257 * ice_sched_get_first_node - get the first node of the given layer
258 * @pi: port information structure
259 * @parent: pointer the base node of the subtree
260 * @layer: layer number
261 *
262 * This function retrieves the first node of the given layer from the subtree
263 */
264static struct ice_sched_node *
265ice_sched_get_first_node(struct ice_port_info *pi,
266 struct ice_sched_node *parent, u8 layer)
267{
268 return pi->sib_head[parent->tc_num][layer];
269}
270
271/**
272 * ice_sched_get_tc_node - get pointer to TC node
273 * @pi: port information structure
274 * @tc: TC number
275 *
276 * This function returns the TC node pointer
277 */
278struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc)
279{
280 u8 i;
281
282 if (!pi || !pi->root)
283 return NULL;
284 for (i = 0; i < pi->root->num_children; i++)
285 if (pi->root->children[i]->tc_num == tc)
286 return pi->root->children[i];
287 return NULL;
288}
289
290/**
291 * ice_free_sched_node - Free a Tx scheduler node from SW DB
292 * @pi: port information structure
293 * @node: pointer to the ice_sched_node struct
294 *
295 * This function frees up a node from SW DB as well as from HW
296 *
297 * This function needs to be called with the port_info->sched_lock held
298 */
299void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
300{
301 struct ice_sched_node *parent;
302 struct ice_hw *hw = pi->hw;
303 u8 i, j;
304
305 /* Free the children before freeing up the parent node
306 * The parent array is updated below and that shifts the nodes
307 * in the array. So always pick the first child if num children > 0
308 */
309 while (node->num_children)
310 ice_free_sched_node(pi, node->children[0]);
311
312 /* Leaf, TC and root nodes can't be deleted by SW */
313 if (node->tx_sched_layer >= hw->sw_entry_point_layer &&
314 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
315 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT &&
316 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) {
317 u32 teid = le32_to_cpu(node->info.node_teid);
318
319 ice_sched_remove_elems(hw, node->parent, teid);
320 }
321 parent = node->parent;
322 /* root has no parent */
323 if (parent) {
324 struct ice_sched_node *p;
325
326 /* update the parent */
327 for (i = 0; i < parent->num_children; i++)
328 if (parent->children[i] == node) {
329 for (j = i + 1; j < parent->num_children; j++)
330 parent->children[j - 1] =
331 parent->children[j];
332 parent->num_children--;
333 break;
334 }
335
336 p = ice_sched_get_first_node(pi, node, node->tx_sched_layer);
337 while (p) {
338 if (p->sibling == node) {
339 p->sibling = node->sibling;
340 break;
341 }
342 p = p->sibling;
343 }
344
345 /* update the sibling head if head is getting removed */
346 if (pi->sib_head[node->tc_num][node->tx_sched_layer] == node)
347 pi->sib_head[node->tc_num][node->tx_sched_layer] =
348 node->sibling;
349 }
350
351 devm_kfree(ice_hw_to_dev(hw), node->children);
352 kfree(node->name);
353 xa_erase(&pi->sched_node_ids, node->id);
354 devm_kfree(ice_hw_to_dev(hw), node);
355}
356
357/**
358 * ice_aq_get_dflt_topo - gets default scheduler topology
359 * @hw: pointer to the HW struct
360 * @lport: logical port number
361 * @buf: pointer to buffer
362 * @buf_size: buffer size in bytes
363 * @num_branches: returns total number of queue to port branches
364 * @cd: pointer to command details structure or NULL
365 *
366 * Get default scheduler topology (0x400)
367 */
368static int
369ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
370 struct ice_aqc_get_topo_elem *buf, u16 buf_size,
371 u8 *num_branches, struct ice_sq_cd *cd)
372{
373 struct ice_aqc_get_topo *cmd;
374 struct ice_aq_desc desc;
375 int status;
376
377 cmd = &desc.params.get_topo;
378 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo);
379 cmd->port_num = lport;
380 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
381 if (!status && num_branches)
382 *num_branches = cmd->num_branches;
383
384 return status;
385}
386
387/**
388 * ice_aq_add_sched_elems - adds scheduling element
389 * @hw: pointer to the HW struct
390 * @grps_req: the number of groups that are requested to be added
391 * @buf: pointer to buffer
392 * @buf_size: buffer size in bytes
393 * @grps_added: returns total number of groups added
394 * @cd: pointer to command details structure or NULL
395 *
396 * Add scheduling elements (0x0401)
397 */
398static int
399ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
400 struct ice_aqc_add_elem *buf, u16 buf_size,
401 u16 *grps_added, struct ice_sq_cd *cd)
402{
403 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_add_sched_elems,
404 grps_req, (void *)buf, buf_size,
405 grps_added, cd);
406}
407
408/**
409 * ice_aq_cfg_sched_elems - configures scheduler elements
410 * @hw: pointer to the HW struct
411 * @elems_req: number of elements to configure
412 * @buf: pointer to buffer
413 * @buf_size: buffer size in bytes
414 * @elems_cfgd: returns total number of elements configured
415 * @cd: pointer to command details structure or NULL
416 *
417 * Configure scheduling elements (0x0403)
418 */
419static int
420ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
421 struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
422 u16 *elems_cfgd, struct ice_sq_cd *cd)
423{
424 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_cfg_sched_elems,
425 elems_req, (void *)buf, buf_size,
426 elems_cfgd, cd);
427}
428
429/**
430 * ice_aq_move_sched_elems - move scheduler element (just 1 group)
431 * @hw: pointer to the HW struct
432 * @buf: pointer to buffer
433 * @buf_size: buffer size in bytes
434 * @grps_movd: returns total number of groups moved
435 *
436 * Move scheduling elements (0x0408)
437 */
438int
439ice_aq_move_sched_elems(struct ice_hw *hw, struct ice_aqc_move_elem *buf,
440 u16 buf_size, u16 *grps_movd)
441{
442 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_move_sched_elems,
443 1, buf, buf_size, grps_movd, NULL);
444}
445
446/**
447 * ice_aq_suspend_sched_elems - suspend scheduler elements
448 * @hw: pointer to the HW struct
449 * @elems_req: number of elements to suspend
450 * @buf: pointer to buffer
451 * @buf_size: buffer size in bytes
452 * @elems_ret: returns total number of elements suspended
453 * @cd: pointer to command details structure or NULL
454 *
455 * Suspend scheduling elements (0x0409)
456 */
457static int
458ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
459 u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
460{
461 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_suspend_sched_elems,
462 elems_req, (void *)buf, buf_size,
463 elems_ret, cd);
464}
465
466/**
467 * ice_aq_resume_sched_elems - resume scheduler elements
468 * @hw: pointer to the HW struct
469 * @elems_req: number of elements to resume
470 * @buf: pointer to buffer
471 * @buf_size: buffer size in bytes
472 * @elems_ret: returns total number of elements resumed
473 * @cd: pointer to command details structure or NULL
474 *
475 * resume scheduling elements (0x040A)
476 */
477static int
478ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
479 u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
480{
481 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_resume_sched_elems,
482 elems_req, (void *)buf, buf_size,
483 elems_ret, cd);
484}
485
486/**
487 * ice_aq_query_sched_res - query scheduler resource
488 * @hw: pointer to the HW struct
489 * @buf_size: buffer size in bytes
490 * @buf: pointer to buffer
491 * @cd: pointer to command details structure or NULL
492 *
493 * Query scheduler resource allocation (0x0412)
494 */
495static int
496ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
497 struct ice_aqc_query_txsched_res_resp *buf,
498 struct ice_sq_cd *cd)
499{
500 struct ice_aq_desc desc;
501
502 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res);
503 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
504}
505
506/**
507 * ice_sched_suspend_resume_elems - suspend or resume HW nodes
508 * @hw: pointer to the HW struct
509 * @num_nodes: number of nodes
510 * @node_teids: array of node teids to be suspended or resumed
511 * @suspend: true means suspend / false means resume
512 *
513 * This function suspends or resumes HW nodes
514 */
515int
516ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
517 bool suspend)
518{
519 u16 i, buf_size, num_elem_ret = 0;
520 __le32 *buf;
521 int status;
522
523 buf_size = sizeof(*buf) * num_nodes;
524 buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
525 if (!buf)
526 return -ENOMEM;
527
528 for (i = 0; i < num_nodes; i++)
529 buf[i] = cpu_to_le32(node_teids[i]);
530
531 if (suspend)
532 status = ice_aq_suspend_sched_elems(hw, num_nodes, buf,
533 buf_size, &num_elem_ret,
534 NULL);
535 else
536 status = ice_aq_resume_sched_elems(hw, num_nodes, buf,
537 buf_size, &num_elem_ret,
538 NULL);
539 if (status || num_elem_ret != num_nodes)
540 ice_debug(hw, ICE_DBG_SCHED, "suspend/resume failed\n");
541
542 devm_kfree(ice_hw_to_dev(hw), buf);
543 return status;
544}
545
546/**
547 * ice_alloc_lan_q_ctx - allocate LAN queue contexts for the given VSI and TC
548 * @hw: pointer to the HW struct
549 * @vsi_handle: VSI handle
550 * @tc: TC number
551 * @new_numqs: number of queues
552 */
553static int
554ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
555{
556 struct ice_vsi_ctx *vsi_ctx;
557 struct ice_q_ctx *q_ctx;
558 u16 idx;
559
560 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
561 if (!vsi_ctx)
562 return -EINVAL;
563 /* allocate LAN queue contexts */
564 if (!vsi_ctx->lan_q_ctx[tc]) {
565 q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
566 sizeof(*q_ctx), GFP_KERNEL);
567 if (!q_ctx)
568 return -ENOMEM;
569
570 for (idx = 0; idx < new_numqs; idx++) {
571 q_ctx[idx].q_handle = ICE_INVAL_Q_HANDLE;
572 q_ctx[idx].q_teid = ICE_INVAL_TEID;
573 }
574
575 vsi_ctx->lan_q_ctx[tc] = q_ctx;
576 vsi_ctx->num_lan_q_entries[tc] = new_numqs;
577 return 0;
578 }
579 /* num queues are increased, update the queue contexts */
580 if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) {
581 u16 prev_num = vsi_ctx->num_lan_q_entries[tc];
582
583 q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
584 sizeof(*q_ctx), GFP_KERNEL);
585 if (!q_ctx)
586 return -ENOMEM;
587
588 memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc],
589 prev_num * sizeof(*q_ctx));
590 devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]);
591
592 for (idx = prev_num; idx < new_numqs; idx++) {
593 q_ctx[idx].q_handle = ICE_INVAL_Q_HANDLE;
594 q_ctx[idx].q_teid = ICE_INVAL_TEID;
595 }
596
597 vsi_ctx->lan_q_ctx[tc] = q_ctx;
598 vsi_ctx->num_lan_q_entries[tc] = new_numqs;
599 }
600 return 0;
601}
602
603/**
604 * ice_alloc_rdma_q_ctx - allocate RDMA queue contexts for the given VSI and TC
605 * @hw: pointer to the HW struct
606 * @vsi_handle: VSI handle
607 * @tc: TC number
608 * @new_numqs: number of queues
609 */
610static int
611ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
612{
613 struct ice_vsi_ctx *vsi_ctx;
614 struct ice_q_ctx *q_ctx;
615
616 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
617 if (!vsi_ctx)
618 return -EINVAL;
619 /* allocate RDMA queue contexts */
620 if (!vsi_ctx->rdma_q_ctx[tc]) {
621 vsi_ctx->rdma_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
622 new_numqs,
623 sizeof(*q_ctx),
624 GFP_KERNEL);
625 if (!vsi_ctx->rdma_q_ctx[tc])
626 return -ENOMEM;
627 vsi_ctx->num_rdma_q_entries[tc] = new_numqs;
628 return 0;
629 }
630 /* num queues are increased, update the queue contexts */
631 if (new_numqs > vsi_ctx->num_rdma_q_entries[tc]) {
632 u16 prev_num = vsi_ctx->num_rdma_q_entries[tc];
633
634 q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
635 sizeof(*q_ctx), GFP_KERNEL);
636 if (!q_ctx)
637 return -ENOMEM;
638 memcpy(q_ctx, vsi_ctx->rdma_q_ctx[tc],
639 prev_num * sizeof(*q_ctx));
640 devm_kfree(ice_hw_to_dev(hw), vsi_ctx->rdma_q_ctx[tc]);
641 vsi_ctx->rdma_q_ctx[tc] = q_ctx;
642 vsi_ctx->num_rdma_q_entries[tc] = new_numqs;
643 }
644 return 0;
645}
646
647/**
648 * ice_aq_rl_profile - performs a rate limiting task
649 * @hw: pointer to the HW struct
650 * @opcode: opcode for add, query, or remove profile(s)
651 * @num_profiles: the number of profiles
652 * @buf: pointer to buffer
653 * @buf_size: buffer size in bytes
654 * @num_processed: number of processed add or remove profile(s) to return
655 * @cd: pointer to command details structure
656 *
657 * RL profile function to add, query, or remove profile(s)
658 */
659static int
660ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
661 u16 num_profiles, struct ice_aqc_rl_profile_elem *buf,
662 u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd)
663{
664 struct ice_aqc_rl_profile *cmd;
665 struct ice_aq_desc desc;
666 int status;
667
668 cmd = &desc.params.rl_profile;
669
670 ice_fill_dflt_direct_cmd_desc(&desc, opcode);
671 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
672 cmd->num_profiles = cpu_to_le16(num_profiles);
673 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
674 if (!status && num_processed)
675 *num_processed = le16_to_cpu(cmd->num_processed);
676 return status;
677}
678
679/**
680 * ice_aq_add_rl_profile - adds rate limiting profile(s)
681 * @hw: pointer to the HW struct
682 * @num_profiles: the number of profile(s) to be add
683 * @buf: pointer to buffer
684 * @buf_size: buffer size in bytes
685 * @num_profiles_added: total number of profiles added to return
686 * @cd: pointer to command details structure
687 *
688 * Add RL profile (0x0410)
689 */
690static int
691ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
692 struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
693 u16 *num_profiles_added, struct ice_sq_cd *cd)
694{
695 return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles, num_profiles,
696 buf, buf_size, num_profiles_added, cd);
697}
698
699/**
700 * ice_aq_remove_rl_profile - removes RL profile(s)
701 * @hw: pointer to the HW struct
702 * @num_profiles: the number of profile(s) to remove
703 * @buf: pointer to buffer
704 * @buf_size: buffer size in bytes
705 * @num_profiles_removed: total number of profiles removed to return
706 * @cd: pointer to command details structure or NULL
707 *
708 * Remove RL profile (0x0415)
709 */
710static int
711ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,
712 struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
713 u16 *num_profiles_removed, struct ice_sq_cd *cd)
714{
715 return ice_aq_rl_profile(hw, ice_aqc_opc_remove_rl_profiles,
716 num_profiles, buf, buf_size,
717 num_profiles_removed, cd);
718}
719
720/**
721 * ice_sched_del_rl_profile - remove RL profile
722 * @hw: pointer to the HW struct
723 * @rl_info: rate limit profile information
724 *
725 * If the profile ID is not referenced anymore, it removes profile ID with
726 * its associated parameters from HW DB,and locally. The caller needs to
727 * hold scheduler lock.
728 */
729static int
730ice_sched_del_rl_profile(struct ice_hw *hw,
731 struct ice_aqc_rl_profile_info *rl_info)
732{
733 struct ice_aqc_rl_profile_elem *buf;
734 u16 num_profiles_removed;
735 u16 num_profiles = 1;
736 int status;
737
738 if (rl_info->prof_id_ref != 0)
739 return -EBUSY;
740
741 /* Safe to remove profile ID */
742 buf = &rl_info->profile;
743 status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf),
744 &num_profiles_removed, NULL);
745 if (status || num_profiles_removed != num_profiles)
746 return -EIO;
747
748 /* Delete stale entry now */
749 list_del(&rl_info->list_entry);
750 devm_kfree(ice_hw_to_dev(hw), rl_info);
751 return status;
752}
753
754/**
755 * ice_sched_clear_rl_prof - clears RL prof entries
756 * @pi: port information structure
757 *
758 * This function removes all RL profile from HW as well as from SW DB.
759 */
760static void ice_sched_clear_rl_prof(struct ice_port_info *pi)
761{
762 u16 ln;
763
764 for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {
765 struct ice_aqc_rl_profile_info *rl_prof_elem;
766 struct ice_aqc_rl_profile_info *rl_prof_tmp;
767
768 list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp,
769 &pi->rl_prof_list[ln], list_entry) {
770 struct ice_hw *hw = pi->hw;
771 int status;
772
773 rl_prof_elem->prof_id_ref = 0;
774 status = ice_sched_del_rl_profile(hw, rl_prof_elem);
775 if (status) {
776 ice_debug(hw, ICE_DBG_SCHED, "Remove rl profile failed\n");
777 /* On error, free mem required */
778 list_del(&rl_prof_elem->list_entry);
779 devm_kfree(ice_hw_to_dev(hw), rl_prof_elem);
780 }
781 }
782 }
783}
784
785/**
786 * ice_sched_clear_agg - clears the aggregator related information
787 * @hw: pointer to the hardware structure
788 *
789 * This function removes aggregator list and free up aggregator related memory
790 * previously allocated.
791 */
792void ice_sched_clear_agg(struct ice_hw *hw)
793{
794 struct ice_sched_agg_info *agg_info;
795 struct ice_sched_agg_info *atmp;
796
797 list_for_each_entry_safe(agg_info, atmp, &hw->agg_list, list_entry) {
798 struct ice_sched_agg_vsi_info *agg_vsi_info;
799 struct ice_sched_agg_vsi_info *vtmp;
800
801 list_for_each_entry_safe(agg_vsi_info, vtmp,
802 &agg_info->agg_vsi_list, list_entry) {
803 list_del(&agg_vsi_info->list_entry);
804 devm_kfree(ice_hw_to_dev(hw), agg_vsi_info);
805 }
806 list_del(&agg_info->list_entry);
807 devm_kfree(ice_hw_to_dev(hw), agg_info);
808 }
809}
810
811/**
812 * ice_sched_clear_tx_topo - clears the scheduler tree nodes
813 * @pi: port information structure
814 *
815 * This function removes all the nodes from HW as well as from SW DB.
816 */
817static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
818{
819 if (!pi)
820 return;
821 /* remove RL profiles related lists */
822 ice_sched_clear_rl_prof(pi);
823 if (pi->root) {
824 ice_free_sched_node(pi, pi->root);
825 pi->root = NULL;
826 }
827}
828
829/**
830 * ice_sched_clear_port - clear the scheduler elements from SW DB for a port
831 * @pi: port information structure
832 *
833 * Cleanup scheduling elements from SW DB
834 */
835void ice_sched_clear_port(struct ice_port_info *pi)
836{
837 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
838 return;
839
840 pi->port_state = ICE_SCHED_PORT_STATE_INIT;
841 mutex_lock(&pi->sched_lock);
842 ice_sched_clear_tx_topo(pi);
843 mutex_unlock(&pi->sched_lock);
844 mutex_destroy(&pi->sched_lock);
845}
846
847/**
848 * ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports
849 * @hw: pointer to the HW struct
850 *
851 * Cleanup scheduling elements from SW DB for all the ports
852 */
853void ice_sched_cleanup_all(struct ice_hw *hw)
854{
855 if (!hw)
856 return;
857
858 devm_kfree(ice_hw_to_dev(hw), hw->layer_info);
859 hw->layer_info = NULL;
860
861 ice_sched_clear_port(hw->port_info);
862
863 hw->num_tx_sched_layers = 0;
864 hw->num_tx_sched_phys_layers = 0;
865 hw->flattened_layers = 0;
866 hw->max_cgds = 0;
867}
868
869/**
870 * ice_sched_add_elems - add nodes to HW and SW DB
871 * @pi: port information structure
872 * @tc_node: pointer to the branch node
873 * @parent: pointer to the parent node
874 * @layer: layer number to add nodes
875 * @num_nodes: number of nodes
876 * @num_nodes_added: pointer to num nodes added
877 * @first_node_teid: if new nodes are added then return the TEID of first node
878 * @prealloc_nodes: preallocated nodes struct for software DB
879 *
880 * This function add nodes to HW as well as to SW DB for a given layer
881 */
882int
883ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
884 struct ice_sched_node *parent, u8 layer, u16 num_nodes,
885 u16 *num_nodes_added, u32 *first_node_teid,
886 struct ice_sched_node **prealloc_nodes)
887{
888 struct ice_sched_node *prev, *new_node;
889 struct ice_aqc_add_elem *buf;
890 u16 i, num_groups_added = 0;
891 struct ice_hw *hw = pi->hw;
892 size_t buf_size;
893 int status = 0;
894 u32 teid;
895
896 buf_size = struct_size(buf, generic, num_nodes);
897 buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
898 if (!buf)
899 return -ENOMEM;
900
901 buf->hdr.parent_teid = parent->info.node_teid;
902 buf->hdr.num_elems = cpu_to_le16(num_nodes);
903 for (i = 0; i < num_nodes; i++) {
904 buf->generic[i].parent_teid = parent->info.node_teid;
905 buf->generic[i].data.elem_type = ICE_AQC_ELEM_TYPE_SE_GENERIC;
906 buf->generic[i].data.valid_sections =
907 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
908 ICE_AQC_ELEM_VALID_EIR;
909 buf->generic[i].data.generic = 0;
910 buf->generic[i].data.cir_bw.bw_profile_idx =
911 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
912 buf->generic[i].data.cir_bw.bw_alloc =
913 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
914 buf->generic[i].data.eir_bw.bw_profile_idx =
915 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
916 buf->generic[i].data.eir_bw.bw_alloc =
917 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
918 }
919
920 status = ice_aq_add_sched_elems(hw, 1, buf, buf_size,
921 &num_groups_added, NULL);
922 if (status || num_groups_added != 1) {
923 ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n",
924 hw->adminq.sq_last_status);
925 devm_kfree(ice_hw_to_dev(hw), buf);
926 return -EIO;
927 }
928
929 *num_nodes_added = num_nodes;
930 /* add nodes to the SW DB */
931 for (i = 0; i < num_nodes; i++) {
932 if (prealloc_nodes)
933 status = ice_sched_add_node(pi, layer, &buf->generic[i], prealloc_nodes[i]);
934 else
935 status = ice_sched_add_node(pi, layer, &buf->generic[i], NULL);
936
937 if (status) {
938 ice_debug(hw, ICE_DBG_SCHED, "add nodes in SW DB failed status =%d\n",
939 status);
940 break;
941 }
942
943 teid = le32_to_cpu(buf->generic[i].node_teid);
944 new_node = ice_sched_find_node_by_teid(parent, teid);
945 if (!new_node) {
946 ice_debug(hw, ICE_DBG_SCHED, "Node is missing for teid =%d\n", teid);
947 break;
948 }
949
950 new_node->sibling = NULL;
951 new_node->tc_num = tc_node->tc_num;
952 new_node->tx_weight = ICE_SCHED_DFLT_BW_WT;
953 new_node->tx_share = ICE_SCHED_DFLT_BW;
954 new_node->tx_max = ICE_SCHED_DFLT_BW;
955 new_node->name = kzalloc(SCHED_NODE_NAME_MAX_LEN, GFP_KERNEL);
956 if (!new_node->name)
957 return -ENOMEM;
958
959 status = xa_alloc(&pi->sched_node_ids, &new_node->id, NULL, XA_LIMIT(0, UINT_MAX),
960 GFP_KERNEL);
961 if (status) {
962 ice_debug(hw, ICE_DBG_SCHED, "xa_alloc failed for sched node status =%d\n",
963 status);
964 break;
965 }
966
967 snprintf(new_node->name, SCHED_NODE_NAME_MAX_LEN, "node_%u", new_node->id);
968
969 /* add it to previous node sibling pointer */
970 /* Note: siblings are not linked across branches */
971 prev = ice_sched_get_first_node(pi, tc_node, layer);
972 if (prev && prev != new_node) {
973 while (prev->sibling)
974 prev = prev->sibling;
975 prev->sibling = new_node;
976 }
977
978 /* initialize the sibling head */
979 if (!pi->sib_head[tc_node->tc_num][layer])
980 pi->sib_head[tc_node->tc_num][layer] = new_node;
981
982 if (i == 0)
983 *first_node_teid = teid;
984 }
985
986 devm_kfree(ice_hw_to_dev(hw), buf);
987 return status;
988}
989
990/**
991 * ice_sched_add_nodes_to_hw_layer - Add nodes to HW layer
992 * @pi: port information structure
993 * @tc_node: pointer to TC node
994 * @parent: pointer to parent node
995 * @layer: layer number to add nodes
996 * @num_nodes: number of nodes to be added
997 * @first_node_teid: pointer to the first node TEID
998 * @num_nodes_added: pointer to number of nodes added
999 *
1000 * Add nodes into specific HW layer.
1001 */
1002static int
1003ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
1004 struct ice_sched_node *tc_node,
1005 struct ice_sched_node *parent, u8 layer,
1006 u16 num_nodes, u32 *first_node_teid,
1007 u16 *num_nodes_added)
1008{
1009 u16 max_child_nodes;
1010
1011 *num_nodes_added = 0;
1012
1013 if (!num_nodes)
1014 return 0;
1015
1016 if (!parent || layer < pi->hw->sw_entry_point_layer)
1017 return -EINVAL;
1018
1019 /* max children per node per layer */
1020 max_child_nodes = pi->hw->max_children[parent->tx_sched_layer];
1021
1022 /* current number of children + required nodes exceed max children */
1023 if ((parent->num_children + num_nodes) > max_child_nodes) {
1024 /* Fail if the parent is a TC node */
1025 if (parent == tc_node)
1026 return -EIO;
1027 return -ENOSPC;
1028 }
1029
1030 return ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes,
1031 num_nodes_added, first_node_teid, NULL);
1032}
1033
1034/**
1035 * ice_sched_add_nodes_to_layer - Add nodes to a given layer
1036 * @pi: port information structure
1037 * @tc_node: pointer to TC node
1038 * @parent: pointer to parent node
1039 * @layer: layer number to add nodes
1040 * @num_nodes: number of nodes to be added
1041 * @first_node_teid: pointer to the first node TEID
1042 * @num_nodes_added: pointer to number of nodes added
1043 *
1044 * This function add nodes to a given layer.
1045 */
1046int
1047ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
1048 struct ice_sched_node *tc_node,
1049 struct ice_sched_node *parent, u8 layer,
1050 u16 num_nodes, u32 *first_node_teid,
1051 u16 *num_nodes_added)
1052{
1053 u32 *first_teid_ptr = first_node_teid;
1054 u16 new_num_nodes = num_nodes;
1055 int status = 0;
1056
1057 *num_nodes_added = 0;
1058 while (*num_nodes_added < num_nodes) {
1059 u16 max_child_nodes, num_added = 0;
1060 u32 temp;
1061
1062 status = ice_sched_add_nodes_to_hw_layer(pi, tc_node, parent,
1063 layer, new_num_nodes,
1064 first_teid_ptr,
1065 &num_added);
1066 if (!status)
1067 *num_nodes_added += num_added;
1068 /* added more nodes than requested ? */
1069 if (*num_nodes_added > num_nodes) {
1070 ice_debug(pi->hw, ICE_DBG_SCHED, "added extra nodes %d %d\n", num_nodes,
1071 *num_nodes_added);
1072 status = -EIO;
1073 break;
1074 }
1075 /* break if all the nodes are added successfully */
1076 if (!status && (*num_nodes_added == num_nodes))
1077 break;
1078 /* break if the error is not max limit */
1079 if (status && status != -ENOSPC)
1080 break;
1081 /* Exceeded the max children */
1082 max_child_nodes = pi->hw->max_children[parent->tx_sched_layer];
1083 /* utilize all the spaces if the parent is not full */
1084 if (parent->num_children < max_child_nodes) {
1085 new_num_nodes = max_child_nodes - parent->num_children;
1086 } else {
1087 /* This parent is full, try the next sibling */
1088 parent = parent->sibling;
1089 /* Don't modify the first node TEID memory if the
1090 * first node was added already in the above call.
1091 * Instead send some temp memory for all other
1092 * recursive calls.
1093 */
1094 if (num_added)
1095 first_teid_ptr = &temp;
1096
1097 new_num_nodes = num_nodes - *num_nodes_added;
1098 }
1099 }
1100 return status;
1101}
1102
1103/**
1104 * ice_sched_get_qgrp_layer - get the current queue group layer number
1105 * @hw: pointer to the HW struct
1106 *
1107 * This function returns the current queue group layer number
1108 */
1109static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw)
1110{
1111 /* It's always total layers - 1, the array is 0 relative so -2 */
1112 return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET;
1113}
1114
1115/**
1116 * ice_sched_get_vsi_layer - get the current VSI layer number
1117 * @hw: pointer to the HW struct
1118 *
1119 * This function returns the current VSI layer number
1120 */
1121u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
1122{
1123 /* Num Layers VSI layer
1124 * 9 6
1125 * 7 4
1126 * 5 or less sw_entry_point_layer
1127 */
1128 /* calculate the VSI layer based on number of layers. */
1129 if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS)
1130 return hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
1131 else if (hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS)
1132 /* qgroup and VSI layers are same */
1133 return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET;
1134 return hw->sw_entry_point_layer;
1135}
1136
1137/**
1138 * ice_sched_get_agg_layer - get the current aggregator layer number
1139 * @hw: pointer to the HW struct
1140 *
1141 * This function returns the current aggregator layer number
1142 */
1143u8 ice_sched_get_agg_layer(struct ice_hw *hw)
1144{
1145 /* Num Layers aggregator layer
1146 * 9 4
1147 * 7 or less sw_entry_point_layer
1148 */
1149 /* calculate the aggregator layer based on number of layers. */
1150 if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS)
1151 return hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET;
1152 else
1153 return hw->sw_entry_point_layer;
1154}
1155
1156/**
1157 * ice_rm_dflt_leaf_node - remove the default leaf node in the tree
1158 * @pi: port information structure
1159 *
1160 * This function removes the leaf node that was created by the FW
1161 * during initialization
1162 */
1163static void ice_rm_dflt_leaf_node(struct ice_port_info *pi)
1164{
1165 struct ice_sched_node *node;
1166
1167 node = pi->root;
1168 while (node) {
1169 if (!node->num_children)
1170 break;
1171 node = node->children[0];
1172 }
1173 if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) {
1174 u32 teid = le32_to_cpu(node->info.node_teid);
1175 int status;
1176
1177 /* remove the default leaf node */
1178 status = ice_sched_remove_elems(pi->hw, node->parent, teid);
1179 if (!status)
1180 ice_free_sched_node(pi, node);
1181 }
1182}
1183
1184/**
1185 * ice_sched_rm_dflt_nodes - free the default nodes in the tree
1186 * @pi: port information structure
1187 *
1188 * This function frees all the nodes except root and TC that were created by
1189 * the FW during initialization
1190 */
1191static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
1192{
1193 struct ice_sched_node *node;
1194
1195 ice_rm_dflt_leaf_node(pi);
1196
1197 /* remove the default nodes except TC and root nodes */
1198 node = pi->root;
1199 while (node) {
1200 if (node->tx_sched_layer >= pi->hw->sw_entry_point_layer &&
1201 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
1202 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT) {
1203 ice_free_sched_node(pi, node);
1204 break;
1205 }
1206
1207 if (!node->num_children)
1208 break;
1209 node = node->children[0];
1210 }
1211}
1212
1213/**
1214 * ice_sched_init_port - Initialize scheduler by querying information from FW
1215 * @pi: port info structure for the tree to cleanup
1216 *
1217 * This function is the initial call to find the total number of Tx scheduler
1218 * resources, default topology created by firmware and storing the information
1219 * in SW DB.
1220 */
1221int ice_sched_init_port(struct ice_port_info *pi)
1222{
1223 struct ice_aqc_get_topo_elem *buf;
1224 struct ice_hw *hw;
1225 u8 num_branches;
1226 u16 num_elems;
1227 int status;
1228 u8 i, j;
1229
1230 if (!pi)
1231 return -EINVAL;
1232 hw = pi->hw;
1233
1234 /* Query the Default Topology from FW */
1235 buf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
1236 if (!buf)
1237 return -ENOMEM;
1238
1239 /* Query default scheduling tree topology */
1240 status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN,
1241 &num_branches, NULL);
1242 if (status)
1243 goto err_init_port;
1244
1245 /* num_branches should be between 1-8 */
1246 if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) {
1247 ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n",
1248 num_branches);
1249 status = -EINVAL;
1250 goto err_init_port;
1251 }
1252
1253 /* get the number of elements on the default/first branch */
1254 num_elems = le16_to_cpu(buf[0].hdr.num_elems);
1255
1256 /* num_elems should always be between 1-9 */
1257 if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) {
1258 ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n",
1259 num_elems);
1260 status = -EINVAL;
1261 goto err_init_port;
1262 }
1263
1264 /* If the last node is a leaf node then the index of the queue group
1265 * layer is two less than the number of elements.
1266 */
1267 if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type ==
1268 ICE_AQC_ELEM_TYPE_LEAF)
1269 pi->last_node_teid =
1270 le32_to_cpu(buf[0].generic[num_elems - 2].node_teid);
1271 else
1272 pi->last_node_teid =
1273 le32_to_cpu(buf[0].generic[num_elems - 1].node_teid);
1274
1275 /* Insert the Tx Sched root node */
1276 status = ice_sched_add_root_node(pi, &buf[0].generic[0]);
1277 if (status)
1278 goto err_init_port;
1279
1280 /* Parse the default tree and cache the information */
1281 for (i = 0; i < num_branches; i++) {
1282 num_elems = le16_to_cpu(buf[i].hdr.num_elems);
1283
1284 /* Skip root element as already inserted */
1285 for (j = 1; j < num_elems; j++) {
1286 /* update the sw entry point */
1287 if (buf[0].generic[j].data.elem_type ==
1288 ICE_AQC_ELEM_TYPE_ENTRY_POINT)
1289 hw->sw_entry_point_layer = j;
1290
1291 status = ice_sched_add_node(pi, j, &buf[i].generic[j], NULL);
1292 if (status)
1293 goto err_init_port;
1294 }
1295 }
1296
1297 /* Remove the default nodes. */
1298 if (pi->root)
1299 ice_sched_rm_dflt_nodes(pi);
1300
1301 /* initialize the port for handling the scheduler tree */
1302 pi->port_state = ICE_SCHED_PORT_STATE_READY;
1303 mutex_init(&pi->sched_lock);
1304 for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
1305 INIT_LIST_HEAD(&pi->rl_prof_list[i]);
1306
1307err_init_port:
1308 if (status && pi->root) {
1309 ice_free_sched_node(pi, pi->root);
1310 pi->root = NULL;
1311 }
1312
1313 kfree(buf);
1314 return status;
1315}
1316
1317/**
1318 * ice_sched_query_res_alloc - query the FW for num of logical sched layers
1319 * @hw: pointer to the HW struct
1320 *
1321 * query FW for allocated scheduler resources and store in HW struct
1322 */
1323int ice_sched_query_res_alloc(struct ice_hw *hw)
1324{
1325 struct ice_aqc_query_txsched_res_resp *buf;
1326 __le16 max_sibl;
1327 int status = 0;
1328 u16 i;
1329
1330 if (hw->layer_info)
1331 return status;
1332
1333 buf = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*buf), GFP_KERNEL);
1334 if (!buf)
1335 return -ENOMEM;
1336
1337 status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL);
1338 if (status)
1339 goto sched_query_out;
1340
1341 hw->num_tx_sched_layers = le16_to_cpu(buf->sched_props.logical_levels);
1342 hw->num_tx_sched_phys_layers =
1343 le16_to_cpu(buf->sched_props.phys_levels);
1344 hw->flattened_layers = buf->sched_props.flattening_bitmap;
1345 hw->max_cgds = buf->sched_props.max_pf_cgds;
1346
1347 /* max sibling group size of current layer refers to the max children
1348 * of the below layer node.
1349 * layer 1 node max children will be layer 2 max sibling group size
1350 * layer 2 node max children will be layer 3 max sibling group size
1351 * and so on. This array will be populated from root (index 0) to
1352 * qgroup layer 7. Leaf node has no children.
1353 */
1354 for (i = 0; i < hw->num_tx_sched_layers - 1; i++) {
1355 max_sibl = buf->layer_props[i + 1].max_sibl_grp_sz;
1356 hw->max_children[i] = le16_to_cpu(max_sibl);
1357 }
1358
1359 hw->layer_info = devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props,
1360 (hw->num_tx_sched_layers *
1361 sizeof(*hw->layer_info)),
1362 GFP_KERNEL);
1363 if (!hw->layer_info) {
1364 status = -ENOMEM;
1365 goto sched_query_out;
1366 }
1367
1368sched_query_out:
1369 devm_kfree(ice_hw_to_dev(hw), buf);
1370 return status;
1371}
1372
1373/**
1374 * ice_sched_get_psm_clk_freq - determine the PSM clock frequency
1375 * @hw: pointer to the HW struct
1376 *
1377 * Determine the PSM clock frequency and store in HW struct
1378 */
1379void ice_sched_get_psm_clk_freq(struct ice_hw *hw)
1380{
1381 u32 val, clk_src;
1382
1383 val = rd32(hw, GLGEN_CLKSTAT_SRC);
1384 clk_src = FIELD_GET(GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M, val);
1385
1386#define PSM_CLK_SRC_367_MHZ 0x0
1387#define PSM_CLK_SRC_416_MHZ 0x1
1388#define PSM_CLK_SRC_446_MHZ 0x2
1389#define PSM_CLK_SRC_390_MHZ 0x3
1390
1391 switch (clk_src) {
1392 case PSM_CLK_SRC_367_MHZ:
1393 hw->psm_clk_freq = ICE_PSM_CLK_367MHZ_IN_HZ;
1394 break;
1395 case PSM_CLK_SRC_416_MHZ:
1396 hw->psm_clk_freq = ICE_PSM_CLK_416MHZ_IN_HZ;
1397 break;
1398 case PSM_CLK_SRC_446_MHZ:
1399 hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ;
1400 break;
1401 case PSM_CLK_SRC_390_MHZ:
1402 hw->psm_clk_freq = ICE_PSM_CLK_390MHZ_IN_HZ;
1403 break;
1404 default:
1405 ice_debug(hw, ICE_DBG_SCHED, "PSM clk_src unexpected %u\n",
1406 clk_src);
1407 /* fall back to a safe default */
1408 hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ;
1409 }
1410}
1411
1412/**
1413 * ice_sched_find_node_in_subtree - Find node in part of base node subtree
1414 * @hw: pointer to the HW struct
1415 * @base: pointer to the base node
1416 * @node: pointer to the node to search
1417 *
1418 * This function checks whether a given node is part of the base node
1419 * subtree or not
1420 */
1421static bool
1422ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
1423 struct ice_sched_node *node)
1424{
1425 u8 i;
1426
1427 for (i = 0; i < base->num_children; i++) {
1428 struct ice_sched_node *child = base->children[i];
1429
1430 if (node == child)
1431 return true;
1432
1433 if (child->tx_sched_layer > node->tx_sched_layer)
1434 return false;
1435
1436 /* this recursion is intentional, and wouldn't
1437 * go more than 8 calls
1438 */
1439 if (ice_sched_find_node_in_subtree(hw, child, node))
1440 return true;
1441 }
1442 return false;
1443}
1444
1445/**
1446 * ice_sched_get_free_qgrp - Scan all queue group siblings and find a free node
1447 * @pi: port information structure
1448 * @vsi_node: software VSI handle
1449 * @qgrp_node: first queue group node identified for scanning
1450 * @owner: LAN or RDMA
1451 *
1452 * This function retrieves a free LAN or RDMA queue group node by scanning
1453 * qgrp_node and its siblings for the queue group with the fewest number
1454 * of queues currently assigned.
1455 */
1456static struct ice_sched_node *
1457ice_sched_get_free_qgrp(struct ice_port_info *pi,
1458 struct ice_sched_node *vsi_node,
1459 struct ice_sched_node *qgrp_node, u8 owner)
1460{
1461 struct ice_sched_node *min_qgrp;
1462 u8 min_children;
1463
1464 if (!qgrp_node)
1465 return qgrp_node;
1466 min_children = qgrp_node->num_children;
1467 if (!min_children)
1468 return qgrp_node;
1469 min_qgrp = qgrp_node;
1470 /* scan all queue groups until find a node which has less than the
1471 * minimum number of children. This way all queue group nodes get
1472 * equal number of shares and active. The bandwidth will be equally
1473 * distributed across all queues.
1474 */
1475 while (qgrp_node) {
1476 /* make sure the qgroup node is part of the VSI subtree */
1477 if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
1478 if (qgrp_node->num_children < min_children &&
1479 qgrp_node->owner == owner) {
1480 /* replace the new min queue group node */
1481 min_qgrp = qgrp_node;
1482 min_children = min_qgrp->num_children;
1483 /* break if it has no children, */
1484 if (!min_children)
1485 break;
1486 }
1487 qgrp_node = qgrp_node->sibling;
1488 }
1489 return min_qgrp;
1490}
1491
1492/**
1493 * ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node
1494 * @pi: port information structure
1495 * @vsi_handle: software VSI handle
1496 * @tc: branch number
1497 * @owner: LAN or RDMA
1498 *
1499 * This function retrieves a free LAN or RDMA queue group node
1500 */
1501struct ice_sched_node *
1502ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
1503 u8 owner)
1504{
1505 struct ice_sched_node *vsi_node, *qgrp_node;
1506 struct ice_vsi_ctx *vsi_ctx;
1507 u8 qgrp_layer, vsi_layer;
1508 u16 max_children;
1509
1510 qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
1511 vsi_layer = ice_sched_get_vsi_layer(pi->hw);
1512 max_children = pi->hw->max_children[qgrp_layer];
1513
1514 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
1515 if (!vsi_ctx)
1516 return NULL;
1517 vsi_node = vsi_ctx->sched.vsi_node[tc];
1518 /* validate invalid VSI ID */
1519 if (!vsi_node)
1520 return NULL;
1521
1522 /* If the queue group and VSI layer are same then queues
1523 * are all attached directly to VSI
1524 */
1525 if (qgrp_layer == vsi_layer)
1526 return vsi_node;
1527
1528 /* get the first queue group node from VSI sub-tree */
1529 qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
1530 while (qgrp_node) {
1531 /* make sure the qgroup node is part of the VSI subtree */
1532 if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
1533 if (qgrp_node->num_children < max_children &&
1534 qgrp_node->owner == owner)
1535 break;
1536 qgrp_node = qgrp_node->sibling;
1537 }
1538
1539 /* Select the best queue group */
1540 return ice_sched_get_free_qgrp(pi, vsi_node, qgrp_node, owner);
1541}
1542
1543/**
1544 * ice_sched_get_vsi_node - Get a VSI node based on VSI ID
1545 * @pi: pointer to the port information structure
1546 * @tc_node: pointer to the TC node
1547 * @vsi_handle: software VSI handle
1548 *
1549 * This function retrieves a VSI node for a given VSI ID from a given
1550 * TC branch
1551 */
1552static struct ice_sched_node *
1553ice_sched_get_vsi_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
1554 u16 vsi_handle)
1555{
1556 struct ice_sched_node *node;
1557 u8 vsi_layer;
1558
1559 vsi_layer = ice_sched_get_vsi_layer(pi->hw);
1560 node = ice_sched_get_first_node(pi, tc_node, vsi_layer);
1561
1562 /* Check whether it already exists */
1563 while (node) {
1564 if (node->vsi_handle == vsi_handle)
1565 return node;
1566 node = node->sibling;
1567 }
1568
1569 return node;
1570}
1571
1572/**
1573 * ice_sched_get_agg_node - Get an aggregator node based on aggregator ID
1574 * @pi: pointer to the port information structure
1575 * @tc_node: pointer to the TC node
1576 * @agg_id: aggregator ID
1577 *
1578 * This function retrieves an aggregator node for a given aggregator ID from
1579 * a given TC branch
1580 */
1581struct ice_sched_node *
1582ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
1583 u32 agg_id)
1584{
1585 struct ice_sched_node *node;
1586 struct ice_hw *hw = pi->hw;
1587 u8 agg_layer;
1588
1589 if (!hw)
1590 return NULL;
1591 agg_layer = ice_sched_get_agg_layer(hw);
1592 node = ice_sched_get_first_node(pi, tc_node, agg_layer);
1593
1594 /* Check whether it already exists */
1595 while (node) {
1596 if (node->agg_id == agg_id)
1597 return node;
1598 node = node->sibling;
1599 }
1600
1601 return node;
1602}
1603
1604/**
1605 * ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes
1606 * @hw: pointer to the HW struct
1607 * @num_qs: number of queues
1608 * @num_nodes: num nodes array
1609 *
1610 * This function calculates the number of VSI child nodes based on the
1611 * number of queues.
1612 */
1613static void
1614ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
1615{
1616 u16 num = num_qs;
1617 u8 i, qgl, vsil;
1618
1619 qgl = ice_sched_get_qgrp_layer(hw);
1620 vsil = ice_sched_get_vsi_layer(hw);
1621
1622 /* calculate num nodes from queue group to VSI layer */
1623 for (i = qgl; i > vsil; i--) {
1624 /* round to the next integer if there is a remainder */
1625 num = DIV_ROUND_UP(num, hw->max_children[i]);
1626
1627 /* need at least one node */
1628 num_nodes[i] = num ? num : 1;
1629 }
1630}
1631
1632/**
1633 * ice_sched_add_vsi_child_nodes - add VSI child nodes to tree
1634 * @pi: port information structure
1635 * @vsi_handle: software VSI handle
1636 * @tc_node: pointer to the TC node
1637 * @num_nodes: pointer to the num nodes that needs to be added per layer
1638 * @owner: node owner (LAN or RDMA)
1639 *
1640 * This function adds the VSI child nodes to tree. It gets called for
1641 * LAN and RDMA separately.
1642 */
1643static int
1644ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
1645 struct ice_sched_node *tc_node, u16 *num_nodes,
1646 u8 owner)
1647{
1648 struct ice_sched_node *parent, *node;
1649 struct ice_hw *hw = pi->hw;
1650 u32 first_node_teid;
1651 u16 num_added = 0;
1652 u8 i, qgl, vsil;
1653
1654 qgl = ice_sched_get_qgrp_layer(hw);
1655 vsil = ice_sched_get_vsi_layer(hw);
1656 parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
1657 for (i = vsil + 1; i <= qgl; i++) {
1658 int status;
1659
1660 if (!parent)
1661 return -EIO;
1662
1663 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
1664 num_nodes[i],
1665 &first_node_teid,
1666 &num_added);
1667 if (status || num_nodes[i] != num_added)
1668 return -EIO;
1669
1670 /* The newly added node can be a new parent for the next
1671 * layer nodes
1672 */
1673 if (num_added) {
1674 parent = ice_sched_find_node_by_teid(tc_node,
1675 first_node_teid);
1676 node = parent;
1677 while (node) {
1678 node->owner = owner;
1679 node = node->sibling;
1680 }
1681 } else {
1682 parent = parent->children[0];
1683 }
1684 }
1685
1686 return 0;
1687}
1688
1689/**
1690 * ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes
1691 * @pi: pointer to the port info structure
1692 * @tc_node: pointer to TC node
1693 * @num_nodes: pointer to num nodes array
1694 *
1695 * This function calculates the number of supported nodes needed to add this
1696 * VSI into Tx tree including the VSI, parent and intermediate nodes in below
1697 * layers
1698 */
1699static void
1700ice_sched_calc_vsi_support_nodes(struct ice_port_info *pi,
1701 struct ice_sched_node *tc_node, u16 *num_nodes)
1702{
1703 struct ice_sched_node *node;
1704 u8 vsil;
1705 int i;
1706
1707 vsil = ice_sched_get_vsi_layer(pi->hw);
1708 for (i = vsil; i >= pi->hw->sw_entry_point_layer; i--)
1709 /* Add intermediate nodes if TC has no children and
1710 * need at least one node for VSI
1711 */
1712 if (!tc_node->num_children || i == vsil) {
1713 num_nodes[i]++;
1714 } else {
1715 /* If intermediate nodes are reached max children
1716 * then add a new one.
1717 */
1718 node = ice_sched_get_first_node(pi, tc_node, (u8)i);
1719 /* scan all the siblings */
1720 while (node) {
1721 if (node->num_children < pi->hw->max_children[i])
1722 break;
1723 node = node->sibling;
1724 }
1725
1726 /* tree has one intermediate node to add this new VSI.
1727 * So no need to calculate supported nodes for below
1728 * layers.
1729 */
1730 if (node)
1731 break;
1732 /* all the nodes are full, allocate a new one */
1733 num_nodes[i]++;
1734 }
1735}
1736
1737/**
1738 * ice_sched_add_vsi_support_nodes - add VSI supported nodes into Tx tree
1739 * @pi: port information structure
1740 * @vsi_handle: software VSI handle
1741 * @tc_node: pointer to TC node
1742 * @num_nodes: pointer to num nodes array
1743 *
1744 * This function adds the VSI supported nodes into Tx tree including the
1745 * VSI, its parent and intermediate nodes in below layers
1746 */
1747static int
1748ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
1749 struct ice_sched_node *tc_node, u16 *num_nodes)
1750{
1751 struct ice_sched_node *parent = tc_node;
1752 u32 first_node_teid;
1753 u16 num_added = 0;
1754 u8 i, vsil;
1755
1756 if (!pi)
1757 return -EINVAL;
1758
1759 vsil = ice_sched_get_vsi_layer(pi->hw);
1760 for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
1761 int status;
1762
1763 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
1764 i, num_nodes[i],
1765 &first_node_teid,
1766 &num_added);
1767 if (status || num_nodes[i] != num_added)
1768 return -EIO;
1769
1770 /* The newly added node can be a new parent for the next
1771 * layer nodes
1772 */
1773 if (num_added)
1774 parent = ice_sched_find_node_by_teid(tc_node,
1775 first_node_teid);
1776 else
1777 parent = parent->children[0];
1778
1779 if (!parent)
1780 return -EIO;
1781
1782 if (i == vsil)
1783 parent->vsi_handle = vsi_handle;
1784 }
1785
1786 return 0;
1787}
1788
1789/**
1790 * ice_sched_add_vsi_to_topo - add a new VSI into tree
1791 * @pi: port information structure
1792 * @vsi_handle: software VSI handle
1793 * @tc: TC number
1794 *
1795 * This function adds a new VSI into scheduler tree
1796 */
1797static int
1798ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
1799{
1800 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
1801 struct ice_sched_node *tc_node;
1802
1803 tc_node = ice_sched_get_tc_node(pi, tc);
1804 if (!tc_node)
1805 return -EINVAL;
1806
1807 /* calculate number of supported nodes needed for this VSI */
1808 ice_sched_calc_vsi_support_nodes(pi, tc_node, num_nodes);
1809
1810 /* add VSI supported nodes to TC subtree */
1811 return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node,
1812 num_nodes);
1813}
1814
1815/**
1816 * ice_sched_update_vsi_child_nodes - update VSI child nodes
1817 * @pi: port information structure
1818 * @vsi_handle: software VSI handle
1819 * @tc: TC number
1820 * @new_numqs: new number of max queues
1821 * @owner: owner of this subtree
1822 *
1823 * This function updates the VSI child nodes based on the number of queues
1824 */
1825static int
1826ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
1827 u8 tc, u16 new_numqs, u8 owner)
1828{
1829 u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
1830 struct ice_sched_node *vsi_node;
1831 struct ice_sched_node *tc_node;
1832 struct ice_vsi_ctx *vsi_ctx;
1833 struct ice_hw *hw = pi->hw;
1834 u16 prev_numqs;
1835 int status = 0;
1836
1837 tc_node = ice_sched_get_tc_node(pi, tc);
1838 if (!tc_node)
1839 return -EIO;
1840
1841 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
1842 if (!vsi_node)
1843 return -EIO;
1844
1845 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1846 if (!vsi_ctx)
1847 return -EINVAL;
1848
1849 if (owner == ICE_SCHED_NODE_OWNER_LAN)
1850 prev_numqs = vsi_ctx->sched.max_lanq[tc];
1851 else
1852 prev_numqs = vsi_ctx->sched.max_rdmaq[tc];
1853 /* num queues are not changed or less than the previous number */
1854 if (new_numqs <= prev_numqs)
1855 return status;
1856 if (owner == ICE_SCHED_NODE_OWNER_LAN) {
1857 status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);
1858 if (status)
1859 return status;
1860 } else {
1861 status = ice_alloc_rdma_q_ctx(hw, vsi_handle, tc, new_numqs);
1862 if (status)
1863 return status;
1864 }
1865
1866 if (new_numqs)
1867 ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
1868 /* Keep the max number of queue configuration all the time. Update the
1869 * tree only if number of queues > previous number of queues. This may
1870 * leave some extra nodes in the tree if number of queues < previous
1871 * number but that wouldn't harm anything. Removing those extra nodes
1872 * may complicate the code if those nodes are part of SRL or
1873 * individually rate limited.
1874 */
1875 status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
1876 new_num_nodes, owner);
1877 if (status)
1878 return status;
1879 if (owner == ICE_SCHED_NODE_OWNER_LAN)
1880 vsi_ctx->sched.max_lanq[tc] = new_numqs;
1881 else
1882 vsi_ctx->sched.max_rdmaq[tc] = new_numqs;
1883
1884 return 0;
1885}
1886
1887/**
1888 * ice_sched_cfg_vsi - configure the new/existing VSI
1889 * @pi: port information structure
1890 * @vsi_handle: software VSI handle
1891 * @tc: TC number
1892 * @maxqs: max number of queues
1893 * @owner: LAN or RDMA
1894 * @enable: TC enabled or disabled
1895 *
1896 * This function adds/updates VSI nodes based on the number of queues. If TC is
1897 * enabled and VSI is in suspended state then resume the VSI back. If TC is
1898 * disabled then suspend the VSI if it is not already.
1899 */
1900int
1901ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
1902 u8 owner, bool enable)
1903{
1904 struct ice_sched_node *vsi_node, *tc_node;
1905 struct ice_vsi_ctx *vsi_ctx;
1906 struct ice_hw *hw = pi->hw;
1907 int status = 0;
1908
1909 ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle);
1910 tc_node = ice_sched_get_tc_node(pi, tc);
1911 if (!tc_node)
1912 return -EINVAL;
1913 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1914 if (!vsi_ctx)
1915 return -EINVAL;
1916 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
1917
1918 /* suspend the VSI if TC is not enabled */
1919 if (!enable) {
1920 if (vsi_node && vsi_node->in_use) {
1921 u32 teid = le32_to_cpu(vsi_node->info.node_teid);
1922
1923 status = ice_sched_suspend_resume_elems(hw, 1, &teid,
1924 true);
1925 if (!status)
1926 vsi_node->in_use = false;
1927 }
1928 return status;
1929 }
1930
1931 /* TC is enabled, if it is a new VSI then add it to the tree */
1932 if (!vsi_node) {
1933 status = ice_sched_add_vsi_to_topo(pi, vsi_handle, tc);
1934 if (status)
1935 return status;
1936
1937 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
1938 if (!vsi_node)
1939 return -EIO;
1940
1941 vsi_ctx->sched.vsi_node[tc] = vsi_node;
1942 vsi_node->in_use = true;
1943 /* invalidate the max queues whenever VSI gets added first time
1944 * into the scheduler tree (boot or after reset). We need to
1945 * recreate the child nodes all the time in these cases.
1946 */
1947 vsi_ctx->sched.max_lanq[tc] = 0;
1948 vsi_ctx->sched.max_rdmaq[tc] = 0;
1949 }
1950
1951 /* update the VSI child nodes */
1952 status = ice_sched_update_vsi_child_nodes(pi, vsi_handle, tc, maxqs,
1953 owner);
1954 if (status)
1955 return status;
1956
1957 /* TC is enabled, resume the VSI if it is in the suspend state */
1958 if (!vsi_node->in_use) {
1959 u32 teid = le32_to_cpu(vsi_node->info.node_teid);
1960
1961 status = ice_sched_suspend_resume_elems(hw, 1, &teid, false);
1962 if (!status)
1963 vsi_node->in_use = true;
1964 }
1965
1966 return status;
1967}
1968
1969/**
1970 * ice_sched_rm_agg_vsi_info - remove aggregator related VSI info entry
1971 * @pi: port information structure
1972 * @vsi_handle: software VSI handle
1973 *
1974 * This function removes single aggregator VSI info entry from
1975 * aggregator list.
1976 */
1977static void ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
1978{
1979 struct ice_sched_agg_info *agg_info;
1980 struct ice_sched_agg_info *atmp;
1981
1982 list_for_each_entry_safe(agg_info, atmp, &pi->hw->agg_list,
1983 list_entry) {
1984 struct ice_sched_agg_vsi_info *agg_vsi_info;
1985 struct ice_sched_agg_vsi_info *vtmp;
1986
1987 list_for_each_entry_safe(agg_vsi_info, vtmp,
1988 &agg_info->agg_vsi_list, list_entry)
1989 if (agg_vsi_info->vsi_handle == vsi_handle) {
1990 list_del(&agg_vsi_info->list_entry);
1991 devm_kfree(ice_hw_to_dev(pi->hw),
1992 agg_vsi_info);
1993 return;
1994 }
1995 }
1996}
1997
1998/**
1999 * ice_sched_is_leaf_node_present - check for a leaf node in the sub-tree
2000 * @node: pointer to the sub-tree node
2001 *
2002 * This function checks for a leaf node presence in a given sub-tree node.
2003 */
2004static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node)
2005{
2006 u8 i;
2007
2008 for (i = 0; i < node->num_children; i++)
2009 if (ice_sched_is_leaf_node_present(node->children[i]))
2010 return true;
2011 /* check for a leaf node */
2012 return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF);
2013}
2014
2015/**
2016 * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes
2017 * @pi: port information structure
2018 * @vsi_handle: software VSI handle
2019 * @owner: LAN or RDMA
2020 *
2021 * This function removes the VSI and its LAN or RDMA children nodes from the
2022 * scheduler tree.
2023 */
2024static int
2025ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
2026{
2027 struct ice_vsi_ctx *vsi_ctx;
2028 int status = -EINVAL;
2029 u8 i;
2030
2031 ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle);
2032 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
2033 return status;
2034 mutex_lock(&pi->sched_lock);
2035 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
2036 if (!vsi_ctx)
2037 goto exit_sched_rm_vsi_cfg;
2038
2039 ice_for_each_traffic_class(i) {
2040 struct ice_sched_node *vsi_node, *tc_node;
2041 u8 j = 0;
2042
2043 tc_node = ice_sched_get_tc_node(pi, i);
2044 if (!tc_node)
2045 continue;
2046
2047 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
2048 if (!vsi_node)
2049 continue;
2050
2051 if (ice_sched_is_leaf_node_present(vsi_node)) {
2052 ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i);
2053 status = -EBUSY;
2054 goto exit_sched_rm_vsi_cfg;
2055 }
2056 while (j < vsi_node->num_children) {
2057 if (vsi_node->children[j]->owner == owner) {
2058 ice_free_sched_node(pi, vsi_node->children[j]);
2059
2060 /* reset the counter again since the num
2061 * children will be updated after node removal
2062 */
2063 j = 0;
2064 } else {
2065 j++;
2066 }
2067 }
2068 /* remove the VSI if it has no children */
2069 if (!vsi_node->num_children) {
2070 ice_free_sched_node(pi, vsi_node);
2071 vsi_ctx->sched.vsi_node[i] = NULL;
2072
2073 /* clean up aggregator related VSI info if any */
2074 ice_sched_rm_agg_vsi_info(pi, vsi_handle);
2075 }
2076 if (owner == ICE_SCHED_NODE_OWNER_LAN)
2077 vsi_ctx->sched.max_lanq[i] = 0;
2078 else
2079 vsi_ctx->sched.max_rdmaq[i] = 0;
2080 }
2081 status = 0;
2082
2083exit_sched_rm_vsi_cfg:
2084 mutex_unlock(&pi->sched_lock);
2085 return status;
2086}
2087
2088/**
2089 * ice_rm_vsi_lan_cfg - remove VSI and its LAN children nodes
2090 * @pi: port information structure
2091 * @vsi_handle: software VSI handle
2092 *
2093 * This function clears the VSI and its LAN children nodes from scheduler tree
2094 * for all TCs.
2095 */
2096int ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
2097{
2098 return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
2099}
2100
2101/**
2102 * ice_rm_vsi_rdma_cfg - remove VSI and its RDMA children nodes
2103 * @pi: port information structure
2104 * @vsi_handle: software VSI handle
2105 *
2106 * This function clears the VSI and its RDMA children nodes from scheduler tree
2107 * for all TCs.
2108 */
2109int ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle)
2110{
2111 return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_RDMA);
2112}
2113
2114/**
2115 * ice_get_agg_info - get the aggregator ID
2116 * @hw: pointer to the hardware structure
2117 * @agg_id: aggregator ID
2118 *
2119 * This function validates aggregator ID. The function returns info if
2120 * aggregator ID is present in list otherwise it returns null.
2121 */
2122static struct ice_sched_agg_info *
2123ice_get_agg_info(struct ice_hw *hw, u32 agg_id)
2124{
2125 struct ice_sched_agg_info *agg_info;
2126
2127 list_for_each_entry(agg_info, &hw->agg_list, list_entry)
2128 if (agg_info->agg_id == agg_id)
2129 return agg_info;
2130
2131 return NULL;
2132}
2133
2134/**
2135 * ice_sched_get_free_vsi_parent - Find a free parent node in aggregator subtree
2136 * @hw: pointer to the HW struct
2137 * @node: pointer to a child node
2138 * @num_nodes: num nodes count array
2139 *
2140 * This function walks through the aggregator subtree to find a free parent
2141 * node
2142 */
2143struct ice_sched_node *
2144ice_sched_get_free_vsi_parent(struct ice_hw *hw, struct ice_sched_node *node,
2145 u16 *num_nodes)
2146{
2147 u8 l = node->tx_sched_layer;
2148 u8 vsil, i;
2149
2150 vsil = ice_sched_get_vsi_layer(hw);
2151
2152 /* Is it VSI parent layer ? */
2153 if (l == vsil - 1)
2154 return (node->num_children < hw->max_children[l]) ? node : NULL;
2155
2156 /* We have intermediate nodes. Let's walk through the subtree. If the
2157 * intermediate node has space to add a new node then clear the count
2158 */
2159 if (node->num_children < hw->max_children[l])
2160 num_nodes[l] = 0;
2161 /* The below recursive call is intentional and wouldn't go more than
2162 * 2 or 3 iterations.
2163 */
2164
2165 for (i = 0; i < node->num_children; i++) {
2166 struct ice_sched_node *parent;
2167
2168 parent = ice_sched_get_free_vsi_parent(hw, node->children[i],
2169 num_nodes);
2170 if (parent)
2171 return parent;
2172 }
2173
2174 return NULL;
2175}
2176
2177/**
2178 * ice_sched_update_parent - update the new parent in SW DB
2179 * @new_parent: pointer to a new parent node
2180 * @node: pointer to a child node
2181 *
2182 * This function removes the child from the old parent and adds it to a new
2183 * parent
2184 */
2185void
2186ice_sched_update_parent(struct ice_sched_node *new_parent,
2187 struct ice_sched_node *node)
2188{
2189 struct ice_sched_node *old_parent;
2190 u8 i, j;
2191
2192 old_parent = node->parent;
2193
2194 /* update the old parent children */
2195 for (i = 0; i < old_parent->num_children; i++)
2196 if (old_parent->children[i] == node) {
2197 for (j = i + 1; j < old_parent->num_children; j++)
2198 old_parent->children[j - 1] =
2199 old_parent->children[j];
2200 old_parent->num_children--;
2201 break;
2202 }
2203
2204 /* now move the node to a new parent */
2205 new_parent->children[new_parent->num_children++] = node;
2206 node->parent = new_parent;
2207 node->info.parent_teid = new_parent->info.node_teid;
2208}
2209
2210/**
2211 * ice_sched_move_nodes - move child nodes to a given parent
2212 * @pi: port information structure
2213 * @parent: pointer to parent node
2214 * @num_items: number of child nodes to be moved
2215 * @list: pointer to child node teids
2216 *
2217 * This function move the child nodes to a given parent.
2218 */
2219int
2220ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
2221 u16 num_items, u32 *list)
2222{
2223 DEFINE_RAW_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
2224 u16 buf_len = __struct_size(buf);
2225 struct ice_sched_node *node;
2226 u16 i, grps_movd = 0;
2227 struct ice_hw *hw;
2228 int status = 0;
2229
2230 hw = pi->hw;
2231
2232 if (!parent || !num_items)
2233 return -EINVAL;
2234
2235 /* Does parent have enough space */
2236 if (parent->num_children + num_items >
2237 hw->max_children[parent->tx_sched_layer])
2238 return -ENOSPC;
2239
2240 for (i = 0; i < num_items; i++) {
2241 node = ice_sched_find_node_by_teid(pi->root, list[i]);
2242 if (!node) {
2243 status = -EINVAL;
2244 break;
2245 }
2246
2247 buf->hdr.src_parent_teid = node->info.parent_teid;
2248 buf->hdr.dest_parent_teid = parent->info.node_teid;
2249 buf->teid[0] = node->info.node_teid;
2250 buf->hdr.num_elems = cpu_to_le16(1);
2251 status = ice_aq_move_sched_elems(hw, buf, buf_len, &grps_movd);
2252 if (status && grps_movd != 1) {
2253 status = -EIO;
2254 break;
2255 }
2256
2257 /* update the SW DB */
2258 ice_sched_update_parent(parent, node);
2259 }
2260
2261 return status;
2262}
2263
2264/**
2265 * ice_sched_move_vsi_to_agg - move VSI to aggregator node
2266 * @pi: port information structure
2267 * @vsi_handle: software VSI handle
2268 * @agg_id: aggregator ID
2269 * @tc: TC number
2270 *
2271 * This function moves a VSI to an aggregator node or its subtree.
2272 * Intermediate nodes may be created if required.
2273 */
2274static int
2275ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id,
2276 u8 tc)
2277{
2278 struct ice_sched_node *vsi_node, *agg_node, *tc_node, *parent;
2279 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
2280 u32 first_node_teid, vsi_teid;
2281 u16 num_nodes_added;
2282 u8 aggl, vsil, i;
2283 int status;
2284
2285 tc_node = ice_sched_get_tc_node(pi, tc);
2286 if (!tc_node)
2287 return -EIO;
2288
2289 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
2290 if (!agg_node)
2291 return -ENOENT;
2292
2293 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
2294 if (!vsi_node)
2295 return -ENOENT;
2296
2297 /* Is this VSI already part of given aggregator? */
2298 if (ice_sched_find_node_in_subtree(pi->hw, agg_node, vsi_node))
2299 return 0;
2300
2301 aggl = ice_sched_get_agg_layer(pi->hw);
2302 vsil = ice_sched_get_vsi_layer(pi->hw);
2303
2304 /* set intermediate node count to 1 between aggregator and VSI layers */
2305 for (i = aggl + 1; i < vsil; i++)
2306 num_nodes[i] = 1;
2307
2308 /* Check if the aggregator subtree has any free node to add the VSI */
2309 for (i = 0; i < agg_node->num_children; i++) {
2310 parent = ice_sched_get_free_vsi_parent(pi->hw,
2311 agg_node->children[i],
2312 num_nodes);
2313 if (parent)
2314 goto move_nodes;
2315 }
2316
2317 /* add new nodes */
2318 parent = agg_node;
2319 for (i = aggl + 1; i < vsil; i++) {
2320 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
2321 num_nodes[i],
2322 &first_node_teid,
2323 &num_nodes_added);
2324 if (status || num_nodes[i] != num_nodes_added)
2325 return -EIO;
2326
2327 /* The newly added node can be a new parent for the next
2328 * layer nodes
2329 */
2330 if (num_nodes_added)
2331 parent = ice_sched_find_node_by_teid(tc_node,
2332 first_node_teid);
2333 else
2334 parent = parent->children[0];
2335
2336 if (!parent)
2337 return -EIO;
2338 }
2339
2340move_nodes:
2341 vsi_teid = le32_to_cpu(vsi_node->info.node_teid);
2342 return ice_sched_move_nodes(pi, parent, 1, &vsi_teid);
2343}
2344
2345/**
2346 * ice_move_all_vsi_to_dflt_agg - move all VSI(s) to default aggregator
2347 * @pi: port information structure
2348 * @agg_info: aggregator info
2349 * @tc: traffic class number
2350 * @rm_vsi_info: true or false
2351 *
2352 * This function move all the VSI(s) to the default aggregator and delete
2353 * aggregator VSI info based on passed in boolean parameter rm_vsi_info. The
2354 * caller holds the scheduler lock.
2355 */
2356static int
2357ice_move_all_vsi_to_dflt_agg(struct ice_port_info *pi,
2358 struct ice_sched_agg_info *agg_info, u8 tc,
2359 bool rm_vsi_info)
2360{
2361 struct ice_sched_agg_vsi_info *agg_vsi_info;
2362 struct ice_sched_agg_vsi_info *tmp;
2363 int status = 0;
2364
2365 list_for_each_entry_safe(agg_vsi_info, tmp, &agg_info->agg_vsi_list,
2366 list_entry) {
2367 u16 vsi_handle = agg_vsi_info->vsi_handle;
2368
2369 /* Move VSI to default aggregator */
2370 if (!ice_is_tc_ena(agg_vsi_info->tc_bitmap[0], tc))
2371 continue;
2372
2373 status = ice_sched_move_vsi_to_agg(pi, vsi_handle,
2374 ICE_DFLT_AGG_ID, tc);
2375 if (status)
2376 break;
2377
2378 clear_bit(tc, agg_vsi_info->tc_bitmap);
2379 if (rm_vsi_info && !agg_vsi_info->tc_bitmap[0]) {
2380 list_del(&agg_vsi_info->list_entry);
2381 devm_kfree(ice_hw_to_dev(pi->hw), agg_vsi_info);
2382 }
2383 }
2384
2385 return status;
2386}
2387
2388/**
2389 * ice_sched_is_agg_inuse - check whether the aggregator is in use or not
2390 * @pi: port information structure
2391 * @node: node pointer
2392 *
2393 * This function checks whether the aggregator is attached with any VSI or not.
2394 */
2395static bool
2396ice_sched_is_agg_inuse(struct ice_port_info *pi, struct ice_sched_node *node)
2397{
2398 u8 vsil, i;
2399
2400 vsil = ice_sched_get_vsi_layer(pi->hw);
2401 if (node->tx_sched_layer < vsil - 1) {
2402 for (i = 0; i < node->num_children; i++)
2403 if (ice_sched_is_agg_inuse(pi, node->children[i]))
2404 return true;
2405 return false;
2406 } else {
2407 return node->num_children ? true : false;
2408 }
2409}
2410
2411/**
2412 * ice_sched_rm_agg_cfg - remove the aggregator node
2413 * @pi: port information structure
2414 * @agg_id: aggregator ID
2415 * @tc: TC number
2416 *
2417 * This function removes the aggregator node and intermediate nodes if any
2418 * from the given TC
2419 */
2420static int
2421ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
2422{
2423 struct ice_sched_node *tc_node, *agg_node;
2424 struct ice_hw *hw = pi->hw;
2425
2426 tc_node = ice_sched_get_tc_node(pi, tc);
2427 if (!tc_node)
2428 return -EIO;
2429
2430 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
2431 if (!agg_node)
2432 return -ENOENT;
2433
2434 /* Can't remove the aggregator node if it has children */
2435 if (ice_sched_is_agg_inuse(pi, agg_node))
2436 return -EBUSY;
2437
2438 /* need to remove the whole subtree if aggregator node is the
2439 * only child.
2440 */
2441 while (agg_node->tx_sched_layer > hw->sw_entry_point_layer) {
2442 struct ice_sched_node *parent = agg_node->parent;
2443
2444 if (!parent)
2445 return -EIO;
2446
2447 if (parent->num_children > 1)
2448 break;
2449
2450 agg_node = parent;
2451 }
2452
2453 ice_free_sched_node(pi, agg_node);
2454 return 0;
2455}
2456
2457/**
2458 * ice_rm_agg_cfg_tc - remove aggregator configuration for TC
2459 * @pi: port information structure
2460 * @agg_info: aggregator ID
2461 * @tc: TC number
2462 * @rm_vsi_info: bool value true or false
2463 *
2464 * This function removes aggregator reference to VSI of given TC. It removes
2465 * the aggregator configuration completely for requested TC. The caller needs
2466 * to hold the scheduler lock.
2467 */
2468static int
2469ice_rm_agg_cfg_tc(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info,
2470 u8 tc, bool rm_vsi_info)
2471{
2472 int status = 0;
2473
2474 /* If nothing to remove - return success */
2475 if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
2476 goto exit_rm_agg_cfg_tc;
2477
2478 status = ice_move_all_vsi_to_dflt_agg(pi, agg_info, tc, rm_vsi_info);
2479 if (status)
2480 goto exit_rm_agg_cfg_tc;
2481
2482 /* Delete aggregator node(s) */
2483 status = ice_sched_rm_agg_cfg(pi, agg_info->agg_id, tc);
2484 if (status)
2485 goto exit_rm_agg_cfg_tc;
2486
2487 clear_bit(tc, agg_info->tc_bitmap);
2488exit_rm_agg_cfg_tc:
2489 return status;
2490}
2491
2492/**
2493 * ice_save_agg_tc_bitmap - save aggregator TC bitmap
2494 * @pi: port information structure
2495 * @agg_id: aggregator ID
2496 * @tc_bitmap: 8 bits TC bitmap
2497 *
2498 * Save aggregator TC bitmap. This function needs to be called with scheduler
2499 * lock held.
2500 */
2501static int
2502ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id,
2503 unsigned long *tc_bitmap)
2504{
2505 struct ice_sched_agg_info *agg_info;
2506
2507 agg_info = ice_get_agg_info(pi->hw, agg_id);
2508 if (!agg_info)
2509 return -EINVAL;
2510 bitmap_copy(agg_info->replay_tc_bitmap, tc_bitmap,
2511 ICE_MAX_TRAFFIC_CLASS);
2512 return 0;
2513}
2514
2515/**
2516 * ice_sched_add_agg_cfg - create an aggregator node
2517 * @pi: port information structure
2518 * @agg_id: aggregator ID
2519 * @tc: TC number
2520 *
2521 * This function creates an aggregator node and intermediate nodes if required
2522 * for the given TC
2523 */
2524static int
2525ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
2526{
2527 struct ice_sched_node *parent, *agg_node, *tc_node;
2528 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
2529 struct ice_hw *hw = pi->hw;
2530 u32 first_node_teid;
2531 u16 num_nodes_added;
2532 int status = 0;
2533 u8 i, aggl;
2534
2535 tc_node = ice_sched_get_tc_node(pi, tc);
2536 if (!tc_node)
2537 return -EIO;
2538
2539 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
2540 /* Does Agg node already exist ? */
2541 if (agg_node)
2542 return status;
2543
2544 aggl = ice_sched_get_agg_layer(hw);
2545
2546 /* need one node in Agg layer */
2547 num_nodes[aggl] = 1;
2548
2549 /* Check whether the intermediate nodes have space to add the
2550 * new aggregator. If they are full, then SW needs to allocate a new
2551 * intermediate node on those layers
2552 */
2553 for (i = hw->sw_entry_point_layer; i < aggl; i++) {
2554 parent = ice_sched_get_first_node(pi, tc_node, i);
2555
2556 /* scan all the siblings */
2557 while (parent) {
2558 if (parent->num_children < hw->max_children[i])
2559 break;
2560 parent = parent->sibling;
2561 }
2562
2563 /* all the nodes are full, reserve one for this layer */
2564 if (!parent)
2565 num_nodes[i]++;
2566 }
2567
2568 /* add the aggregator node */
2569 parent = tc_node;
2570 for (i = hw->sw_entry_point_layer; i <= aggl; i++) {
2571 if (!parent)
2572 return -EIO;
2573
2574 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
2575 num_nodes[i],
2576 &first_node_teid,
2577 &num_nodes_added);
2578 if (status || num_nodes[i] != num_nodes_added)
2579 return -EIO;
2580
2581 /* The newly added node can be a new parent for the next
2582 * layer nodes
2583 */
2584 if (num_nodes_added) {
2585 parent = ice_sched_find_node_by_teid(tc_node,
2586 first_node_teid);
2587 /* register aggregator ID with the aggregator node */
2588 if (parent && i == aggl)
2589 parent->agg_id = agg_id;
2590 } else {
2591 parent = parent->children[0];
2592 }
2593 }
2594
2595 return 0;
2596}
2597
2598/**
2599 * ice_sched_cfg_agg - configure aggregator node
2600 * @pi: port information structure
2601 * @agg_id: aggregator ID
2602 * @agg_type: aggregator type queue, VSI, or aggregator group
2603 * @tc_bitmap: bits TC bitmap
2604 *
2605 * It registers a unique aggregator node into scheduler services. It
2606 * allows a user to register with a unique ID to track it's resources.
2607 * The aggregator type determines if this is a queue group, VSI group
2608 * or aggregator group. It then creates the aggregator node(s) for requested
2609 * TC(s) or removes an existing aggregator node including its configuration
2610 * if indicated via tc_bitmap. Call ice_rm_agg_cfg to release aggregator
2611 * resources and remove aggregator ID.
2612 * This function needs to be called with scheduler lock held.
2613 */
2614static int
2615ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id,
2616 enum ice_agg_type agg_type, unsigned long *tc_bitmap)
2617{
2618 struct ice_sched_agg_info *agg_info;
2619 struct ice_hw *hw = pi->hw;
2620 int status = 0;
2621 u8 tc;
2622
2623 agg_info = ice_get_agg_info(hw, agg_id);
2624 if (!agg_info) {
2625 /* Create new entry for new aggregator ID */
2626 agg_info = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*agg_info),
2627 GFP_KERNEL);
2628 if (!agg_info)
2629 return -ENOMEM;
2630
2631 agg_info->agg_id = agg_id;
2632 agg_info->agg_type = agg_type;
2633 agg_info->tc_bitmap[0] = 0;
2634
2635 /* Initialize the aggregator VSI list head */
2636 INIT_LIST_HEAD(&agg_info->agg_vsi_list);
2637
2638 /* Add new entry in aggregator list */
2639 list_add(&agg_info->list_entry, &hw->agg_list);
2640 }
2641 /* Create aggregator node(s) for requested TC(s) */
2642 ice_for_each_traffic_class(tc) {
2643 if (!ice_is_tc_ena(*tc_bitmap, tc)) {
2644 /* Delete aggregator cfg TC if it exists previously */
2645 status = ice_rm_agg_cfg_tc(pi, agg_info, tc, false);
2646 if (status)
2647 break;
2648 continue;
2649 }
2650
2651 /* Check if aggregator node for TC already exists */
2652 if (ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
2653 continue;
2654
2655 /* Create new aggregator node for TC */
2656 status = ice_sched_add_agg_cfg(pi, agg_id, tc);
2657 if (status)
2658 break;
2659
2660 /* Save aggregator node's TC information */
2661 set_bit(tc, agg_info->tc_bitmap);
2662 }
2663
2664 return status;
2665}
2666
2667/**
2668 * ice_cfg_agg - config aggregator node
2669 * @pi: port information structure
2670 * @agg_id: aggregator ID
2671 * @agg_type: aggregator type queue, VSI, or aggregator group
2672 * @tc_bitmap: bits TC bitmap
2673 *
2674 * This function configures aggregator node(s).
2675 */
2676int
2677ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type,
2678 u8 tc_bitmap)
2679{
2680 unsigned long bitmap = tc_bitmap;
2681 int status;
2682
2683 mutex_lock(&pi->sched_lock);
2684 status = ice_sched_cfg_agg(pi, agg_id, agg_type, &bitmap);
2685 if (!status)
2686 status = ice_save_agg_tc_bitmap(pi, agg_id, &bitmap);
2687 mutex_unlock(&pi->sched_lock);
2688 return status;
2689}
2690
2691/**
2692 * ice_get_agg_vsi_info - get the aggregator ID
2693 * @agg_info: aggregator info
2694 * @vsi_handle: software VSI handle
2695 *
2696 * The function returns aggregator VSI info based on VSI handle. This function
2697 * needs to be called with scheduler lock held.
2698 */
2699static struct ice_sched_agg_vsi_info *
2700ice_get_agg_vsi_info(struct ice_sched_agg_info *agg_info, u16 vsi_handle)
2701{
2702 struct ice_sched_agg_vsi_info *agg_vsi_info;
2703
2704 list_for_each_entry(agg_vsi_info, &agg_info->agg_vsi_list, list_entry)
2705 if (agg_vsi_info->vsi_handle == vsi_handle)
2706 return agg_vsi_info;
2707
2708 return NULL;
2709}
2710
2711/**
2712 * ice_get_vsi_agg_info - get the aggregator info of VSI
2713 * @hw: pointer to the hardware structure
2714 * @vsi_handle: Sw VSI handle
2715 *
2716 * The function returns aggregator info of VSI represented via vsi_handle. The
2717 * VSI has in this case a different aggregator than the default one. This
2718 * function needs to be called with scheduler lock held.
2719 */
2720static struct ice_sched_agg_info *
2721ice_get_vsi_agg_info(struct ice_hw *hw, u16 vsi_handle)
2722{
2723 struct ice_sched_agg_info *agg_info;
2724
2725 list_for_each_entry(agg_info, &hw->agg_list, list_entry) {
2726 struct ice_sched_agg_vsi_info *agg_vsi_info;
2727
2728 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
2729 if (agg_vsi_info)
2730 return agg_info;
2731 }
2732 return NULL;
2733}
2734
2735/**
2736 * ice_save_agg_vsi_tc_bitmap - save aggregator VSI TC bitmap
2737 * @pi: port information structure
2738 * @agg_id: aggregator ID
2739 * @vsi_handle: software VSI handle
2740 * @tc_bitmap: TC bitmap of enabled TC(s)
2741 *
2742 * Save VSI to aggregator TC bitmap. This function needs to call with scheduler
2743 * lock held.
2744 */
2745static int
2746ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
2747 unsigned long *tc_bitmap)
2748{
2749 struct ice_sched_agg_vsi_info *agg_vsi_info;
2750 struct ice_sched_agg_info *agg_info;
2751
2752 agg_info = ice_get_agg_info(pi->hw, agg_id);
2753 if (!agg_info)
2754 return -EINVAL;
2755 /* check if entry already exist */
2756 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
2757 if (!agg_vsi_info)
2758 return -EINVAL;
2759 bitmap_copy(agg_vsi_info->replay_tc_bitmap, tc_bitmap,
2760 ICE_MAX_TRAFFIC_CLASS);
2761 return 0;
2762}
2763
2764/**
2765 * ice_sched_assoc_vsi_to_agg - associate/move VSI to new/default aggregator
2766 * @pi: port information structure
2767 * @agg_id: aggregator ID
2768 * @vsi_handle: software VSI handle
2769 * @tc_bitmap: TC bitmap of enabled TC(s)
2770 *
2771 * This function moves VSI to a new or default aggregator node. If VSI is
2772 * already associated to the aggregator node then no operation is performed on
2773 * the tree. This function needs to be called with scheduler lock held.
2774 */
2775static int
2776ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
2777 u16 vsi_handle, unsigned long *tc_bitmap)
2778{
2779 struct ice_sched_agg_vsi_info *agg_vsi_info, *iter, *old_agg_vsi_info = NULL;
2780 struct ice_sched_agg_info *agg_info, *old_agg_info;
2781 struct ice_hw *hw = pi->hw;
2782 int status = 0;
2783 u8 tc;
2784
2785 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
2786 return -EINVAL;
2787 agg_info = ice_get_agg_info(hw, agg_id);
2788 if (!agg_info)
2789 return -EINVAL;
2790 /* If the VSI is already part of another aggregator then update
2791 * its VSI info list
2792 */
2793 old_agg_info = ice_get_vsi_agg_info(hw, vsi_handle);
2794 if (old_agg_info && old_agg_info != agg_info) {
2795 struct ice_sched_agg_vsi_info *vtmp;
2796
2797 list_for_each_entry_safe(iter, vtmp,
2798 &old_agg_info->agg_vsi_list,
2799 list_entry)
2800 if (iter->vsi_handle == vsi_handle) {
2801 old_agg_vsi_info = iter;
2802 break;
2803 }
2804 }
2805
2806 /* check if entry already exist */
2807 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
2808 if (!agg_vsi_info) {
2809 /* Create new entry for VSI under aggregator list */
2810 agg_vsi_info = devm_kzalloc(ice_hw_to_dev(hw),
2811 sizeof(*agg_vsi_info), GFP_KERNEL);
2812 if (!agg_vsi_info)
2813 return -EINVAL;
2814
2815 /* add VSI ID into the aggregator list */
2816 agg_vsi_info->vsi_handle = vsi_handle;
2817 list_add(&agg_vsi_info->list_entry, &agg_info->agg_vsi_list);
2818 }
2819 /* Move VSI node to new aggregator node for requested TC(s) */
2820 ice_for_each_traffic_class(tc) {
2821 if (!ice_is_tc_ena(*tc_bitmap, tc))
2822 continue;
2823
2824 /* Move VSI to new aggregator */
2825 status = ice_sched_move_vsi_to_agg(pi, vsi_handle, agg_id, tc);
2826 if (status)
2827 break;
2828
2829 set_bit(tc, agg_vsi_info->tc_bitmap);
2830 if (old_agg_vsi_info)
2831 clear_bit(tc, old_agg_vsi_info->tc_bitmap);
2832 }
2833 if (old_agg_vsi_info && !old_agg_vsi_info->tc_bitmap[0]) {
2834 list_del(&old_agg_vsi_info->list_entry);
2835 devm_kfree(ice_hw_to_dev(pi->hw), old_agg_vsi_info);
2836 }
2837 return status;
2838}
2839
2840/**
2841 * ice_sched_rm_unused_rl_prof - remove unused RL profile
2842 * @pi: port information structure
2843 *
2844 * This function removes unused rate limit profiles from the HW and
2845 * SW DB. The caller needs to hold scheduler lock.
2846 */
2847static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi)
2848{
2849 u16 ln;
2850
2851 for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {
2852 struct ice_aqc_rl_profile_info *rl_prof_elem;
2853 struct ice_aqc_rl_profile_info *rl_prof_tmp;
2854
2855 list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp,
2856 &pi->rl_prof_list[ln], list_entry) {
2857 if (!ice_sched_del_rl_profile(pi->hw, rl_prof_elem))
2858 ice_debug(pi->hw, ICE_DBG_SCHED, "Removed rl profile\n");
2859 }
2860 }
2861}
2862
2863/**
2864 * ice_sched_update_elem - update element
2865 * @hw: pointer to the HW struct
2866 * @node: pointer to node
2867 * @info: node info to update
2868 *
2869 * Update the HW DB, and local SW DB of node. Update the scheduling
2870 * parameters of node from argument info data buffer (Info->data buf) and
2871 * returns success or error on config sched element failure. The caller
2872 * needs to hold scheduler lock.
2873 */
2874static int
2875ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
2876 struct ice_aqc_txsched_elem_data *info)
2877{
2878 struct ice_aqc_txsched_elem_data buf;
2879 u16 elem_cfgd = 0;
2880 u16 num_elems = 1;
2881 int status;
2882
2883 buf = *info;
2884 /* Parent TEID is reserved field in this aq call */
2885 buf.parent_teid = 0;
2886 /* Element type is reserved field in this aq call */
2887 buf.data.elem_type = 0;
2888 /* Flags is reserved field in this aq call */
2889 buf.data.flags = 0;
2890
2891 /* Update HW DB */
2892 /* Configure element node */
2893 status = ice_aq_cfg_sched_elems(hw, num_elems, &buf, sizeof(buf),
2894 &elem_cfgd, NULL);
2895 if (status || elem_cfgd != num_elems) {
2896 ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n");
2897 return -EIO;
2898 }
2899
2900 /* Config success case */
2901 /* Now update local SW DB */
2902 /* Only copy the data portion of info buffer */
2903 node->info.data = info->data;
2904 return status;
2905}
2906
2907/**
2908 * ice_sched_cfg_node_bw_alloc - configure node BW weight/alloc params
2909 * @hw: pointer to the HW struct
2910 * @node: sched node to configure
2911 * @rl_type: rate limit type CIR, EIR, or shared
2912 * @bw_alloc: BW weight/allocation
2913 *
2914 * This function configures node element's BW allocation.
2915 */
2916static int
2917ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
2918 enum ice_rl_type rl_type, u16 bw_alloc)
2919{
2920 struct ice_aqc_txsched_elem_data buf;
2921 struct ice_aqc_txsched_elem *data;
2922
2923 buf = node->info;
2924 data = &buf.data;
2925 if (rl_type == ICE_MIN_BW) {
2926 data->valid_sections |= ICE_AQC_ELEM_VALID_CIR;
2927 data->cir_bw.bw_alloc = cpu_to_le16(bw_alloc);
2928 } else if (rl_type == ICE_MAX_BW) {
2929 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
2930 data->eir_bw.bw_alloc = cpu_to_le16(bw_alloc);
2931 } else {
2932 return -EINVAL;
2933 }
2934
2935 /* Configure element */
2936 return ice_sched_update_elem(hw, node, &buf);
2937}
2938
2939/**
2940 * ice_move_vsi_to_agg - moves VSI to new or default aggregator
2941 * @pi: port information structure
2942 * @agg_id: aggregator ID
2943 * @vsi_handle: software VSI handle
2944 * @tc_bitmap: TC bitmap of enabled TC(s)
2945 *
2946 * Move or associate VSI to a new or default aggregator node.
2947 */
2948int
2949ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
2950 u8 tc_bitmap)
2951{
2952 unsigned long bitmap = tc_bitmap;
2953 int status;
2954
2955 mutex_lock(&pi->sched_lock);
2956 status = ice_sched_assoc_vsi_to_agg(pi, agg_id, vsi_handle,
2957 (unsigned long *)&bitmap);
2958 if (!status)
2959 status = ice_save_agg_vsi_tc_bitmap(pi, agg_id, vsi_handle,
2960 (unsigned long *)&bitmap);
2961 mutex_unlock(&pi->sched_lock);
2962 return status;
2963}
2964
2965/**
2966 * ice_set_clear_cir_bw - set or clear CIR BW
2967 * @bw_t_info: bandwidth type information structure
2968 * @bw: bandwidth in Kbps - Kilo bits per sec
2969 *
2970 * Save or clear CIR bandwidth (BW) in the passed param bw_t_info.
2971 */
2972static void ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
2973{
2974 if (bw == ICE_SCHED_DFLT_BW) {
2975 clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
2976 bw_t_info->cir_bw.bw = 0;
2977 } else {
2978 /* Save type of BW information */
2979 set_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
2980 bw_t_info->cir_bw.bw = bw;
2981 }
2982}
2983
2984/**
2985 * ice_set_clear_eir_bw - set or clear EIR BW
2986 * @bw_t_info: bandwidth type information structure
2987 * @bw: bandwidth in Kbps - Kilo bits per sec
2988 *
2989 * Save or clear EIR bandwidth (BW) in the passed param bw_t_info.
2990 */
2991static void ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
2992{
2993 if (bw == ICE_SCHED_DFLT_BW) {
2994 clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
2995 bw_t_info->eir_bw.bw = 0;
2996 } else {
2997 /* EIR BW and Shared BW profiles are mutually exclusive and
2998 * hence only one of them may be set for any given element.
2999 * First clear earlier saved shared BW information.
3000 */
3001 clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
3002 bw_t_info->shared_bw = 0;
3003 /* save EIR BW information */
3004 set_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
3005 bw_t_info->eir_bw.bw = bw;
3006 }
3007}
3008
3009/**
3010 * ice_set_clear_shared_bw - set or clear shared BW
3011 * @bw_t_info: bandwidth type information structure
3012 * @bw: bandwidth in Kbps - Kilo bits per sec
3013 *
3014 * Save or clear shared bandwidth (BW) in the passed param bw_t_info.
3015 */
3016static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
3017{
3018 if (bw == ICE_SCHED_DFLT_BW) {
3019 clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
3020 bw_t_info->shared_bw = 0;
3021 } else {
3022 /* EIR BW and Shared BW profiles are mutually exclusive and
3023 * hence only one of them may be set for any given element.
3024 * First clear earlier saved EIR BW information.
3025 */
3026 clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
3027 bw_t_info->eir_bw.bw = 0;
3028 /* save shared BW information */
3029 set_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
3030 bw_t_info->shared_bw = bw;
3031 }
3032}
3033
3034/**
3035 * ice_sched_save_vsi_bw - save VSI node's BW information
3036 * @pi: port information structure
3037 * @vsi_handle: sw VSI handle
3038 * @tc: traffic class
3039 * @rl_type: rate limit type min, max, or shared
3040 * @bw: bandwidth in Kbps - Kilo bits per sec
3041 *
3042 * Save BW information of VSI type node for post replay use.
3043 */
3044static int
3045ice_sched_save_vsi_bw(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
3046 enum ice_rl_type rl_type, u32 bw)
3047{
3048 struct ice_vsi_ctx *vsi_ctx;
3049
3050 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
3051 return -EINVAL;
3052 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
3053 if (!vsi_ctx)
3054 return -EINVAL;
3055 switch (rl_type) {
3056 case ICE_MIN_BW:
3057 ice_set_clear_cir_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
3058 break;
3059 case ICE_MAX_BW:
3060 ice_set_clear_eir_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
3061 break;
3062 case ICE_SHARED_BW:
3063 ice_set_clear_shared_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
3064 break;
3065 default:
3066 return -EINVAL;
3067 }
3068 return 0;
3069}
3070
3071/**
3072 * ice_sched_calc_wakeup - calculate RL profile wakeup parameter
3073 * @hw: pointer to the HW struct
3074 * @bw: bandwidth in Kbps
3075 *
3076 * This function calculates the wakeup parameter of RL profile.
3077 */
3078static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw)
3079{
3080 s64 bytes_per_sec, wakeup_int, wakeup_a, wakeup_b, wakeup_f;
3081 s32 wakeup_f_int;
3082 u16 wakeup = 0;
3083
3084 /* Get the wakeup integer value */
3085 bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE);
3086 wakeup_int = div64_long(hw->psm_clk_freq, bytes_per_sec);
3087 if (wakeup_int > 63) {
3088 wakeup = (u16)((1 << 15) | wakeup_int);
3089 } else {
3090 /* Calculate fraction value up to 4 decimals
3091 * Convert Integer value to a constant multiplier
3092 */
3093 wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int;
3094 wakeup_a = div64_long((s64)ICE_RL_PROF_MULTIPLIER *
3095 hw->psm_clk_freq, bytes_per_sec);
3096
3097 /* Get Fraction value */
3098 wakeup_f = wakeup_a - wakeup_b;
3099
3100 /* Round up the Fractional value via Ceil(Fractional value) */
3101 if (wakeup_f > div64_long(ICE_RL_PROF_MULTIPLIER, 2))
3102 wakeup_f += 1;
3103
3104 wakeup_f_int = (s32)div64_long(wakeup_f * ICE_RL_PROF_FRACTION,
3105 ICE_RL_PROF_MULTIPLIER);
3106 wakeup |= (u16)(wakeup_int << 9);
3107 wakeup |= (u16)(0x1ff & wakeup_f_int);
3108 }
3109
3110 return wakeup;
3111}
3112
3113/**
3114 * ice_sched_bw_to_rl_profile - convert BW to profile parameters
3115 * @hw: pointer to the HW struct
3116 * @bw: bandwidth in Kbps
3117 * @profile: profile parameters to return
3118 *
3119 * This function converts the BW to profile structure format.
3120 */
3121static int
3122ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw,
3123 struct ice_aqc_rl_profile_elem *profile)
3124{
3125 s64 bytes_per_sec, ts_rate, mv_tmp;
3126 int status = -EINVAL;
3127 bool found = false;
3128 s32 encode = 0;
3129 s64 mv = 0;
3130 s32 i;
3131
3132 /* Bw settings range is from 0.5Mb/sec to 100Gb/sec */
3133 if (bw < ICE_SCHED_MIN_BW || bw > ICE_SCHED_MAX_BW)
3134 return status;
3135
3136 /* Bytes per second from Kbps */
3137 bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE);
3138
3139 /* encode is 6 bits but really useful are 5 bits */
3140 for (i = 0; i < 64; i++) {
3141 u64 pow_result = BIT_ULL(i);
3142
3143 ts_rate = div64_long((s64)hw->psm_clk_freq,
3144 pow_result * ICE_RL_PROF_TS_MULTIPLIER);
3145 if (ts_rate <= 0)
3146 continue;
3147
3148 /* Multiplier value */
3149 mv_tmp = div64_long(bytes_per_sec * ICE_RL_PROF_MULTIPLIER,
3150 ts_rate);
3151
3152 /* Round to the nearest ICE_RL_PROF_MULTIPLIER */
3153 mv = round_up_64bit(mv_tmp, ICE_RL_PROF_MULTIPLIER);
3154
3155 /* First multiplier value greater than the given
3156 * accuracy bytes
3157 */
3158 if (mv > ICE_RL_PROF_ACCURACY_BYTES) {
3159 encode = i;
3160 found = true;
3161 break;
3162 }
3163 }
3164 if (found) {
3165 u16 wm;
3166
3167 wm = ice_sched_calc_wakeup(hw, bw);
3168 profile->rl_multiply = cpu_to_le16(mv);
3169 profile->wake_up_calc = cpu_to_le16(wm);
3170 profile->rl_encode = cpu_to_le16(encode);
3171 status = 0;
3172 } else {
3173 status = -ENOENT;
3174 }
3175
3176 return status;
3177}
3178
3179/**
3180 * ice_sched_add_rl_profile - add RL profile
3181 * @pi: port information structure
3182 * @rl_type: type of rate limit BW - min, max, or shared
3183 * @bw: bandwidth in Kbps - Kilo bits per sec
3184 * @layer_num: specifies in which layer to create profile
3185 *
3186 * This function first checks the existing list for corresponding BW
3187 * parameter. If it exists, it returns the associated profile otherwise
3188 * it creates a new rate limit profile for requested BW, and adds it to
3189 * the HW DB and local list. It returns the new profile or null on error.
3190 * The caller needs to hold the scheduler lock.
3191 */
3192static struct ice_aqc_rl_profile_info *
3193ice_sched_add_rl_profile(struct ice_port_info *pi,
3194 enum ice_rl_type rl_type, u32 bw, u8 layer_num)
3195{
3196 struct ice_aqc_rl_profile_info *rl_prof_elem;
3197 u16 profiles_added = 0, num_profiles = 1;
3198 struct ice_aqc_rl_profile_elem *buf;
3199 struct ice_hw *hw;
3200 u8 profile_type;
3201 int status;
3202
3203 if (!pi || layer_num >= pi->hw->num_tx_sched_layers)
3204 return NULL;
3205 switch (rl_type) {
3206 case ICE_MIN_BW:
3207 profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;
3208 break;
3209 case ICE_MAX_BW:
3210 profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;
3211 break;
3212 case ICE_SHARED_BW:
3213 profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;
3214 break;
3215 default:
3216 return NULL;
3217 }
3218
3219 hw = pi->hw;
3220 list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
3221 list_entry)
3222 if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
3223 profile_type && rl_prof_elem->bw == bw)
3224 /* Return existing profile ID info */
3225 return rl_prof_elem;
3226
3227 /* Create new profile ID */
3228 rl_prof_elem = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rl_prof_elem),
3229 GFP_KERNEL);
3230
3231 if (!rl_prof_elem)
3232 return NULL;
3233
3234 status = ice_sched_bw_to_rl_profile(hw, bw, &rl_prof_elem->profile);
3235 if (status)
3236 goto exit_add_rl_prof;
3237
3238 rl_prof_elem->bw = bw;
3239 /* layer_num is zero relative, and fw expects level from 1 to 9 */
3240 rl_prof_elem->profile.level = layer_num + 1;
3241 rl_prof_elem->profile.flags = profile_type;
3242 rl_prof_elem->profile.max_burst_size = cpu_to_le16(hw->max_burst_size);
3243
3244 /* Create new entry in HW DB */
3245 buf = &rl_prof_elem->profile;
3246 status = ice_aq_add_rl_profile(hw, num_profiles, buf, sizeof(*buf),
3247 &profiles_added, NULL);
3248 if (status || profiles_added != num_profiles)
3249 goto exit_add_rl_prof;
3250
3251 /* Good entry - add in the list */
3252 rl_prof_elem->prof_id_ref = 0;
3253 list_add(&rl_prof_elem->list_entry, &pi->rl_prof_list[layer_num]);
3254 return rl_prof_elem;
3255
3256exit_add_rl_prof:
3257 devm_kfree(ice_hw_to_dev(hw), rl_prof_elem);
3258 return NULL;
3259}
3260
3261/**
3262 * ice_sched_cfg_node_bw_lmt - configure node sched params
3263 * @hw: pointer to the HW struct
3264 * @node: sched node to configure
3265 * @rl_type: rate limit type CIR, EIR, or shared
3266 * @rl_prof_id: rate limit profile ID
3267 *
3268 * This function configures node element's BW limit.
3269 */
3270static int
3271ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
3272 enum ice_rl_type rl_type, u16 rl_prof_id)
3273{
3274 struct ice_aqc_txsched_elem_data buf;
3275 struct ice_aqc_txsched_elem *data;
3276
3277 buf = node->info;
3278 data = &buf.data;
3279 switch (rl_type) {
3280 case ICE_MIN_BW:
3281 data->valid_sections |= ICE_AQC_ELEM_VALID_CIR;
3282 data->cir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id);
3283 break;
3284 case ICE_MAX_BW:
3285 /* EIR BW and Shared BW profiles are mutually exclusive and
3286 * hence only one of them may be set for any given element
3287 */
3288 if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
3289 return -EIO;
3290 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
3291 data->eir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id);
3292 break;
3293 case ICE_SHARED_BW:
3294 /* Check for removing shared BW */
3295 if (rl_prof_id == ICE_SCHED_NO_SHARED_RL_PROF_ID) {
3296 /* remove shared profile */
3297 data->valid_sections &= ~ICE_AQC_ELEM_VALID_SHARED;
3298 data->srl_id = 0; /* clear SRL field */
3299
3300 /* enable back EIR to default profile */
3301 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
3302 data->eir_bw.bw_profile_idx =
3303 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
3304 break;
3305 }
3306 /* EIR BW and Shared BW profiles are mutually exclusive and
3307 * hence only one of them may be set for any given element
3308 */
3309 if ((data->valid_sections & ICE_AQC_ELEM_VALID_EIR) &&
3310 (le16_to_cpu(data->eir_bw.bw_profile_idx) !=
3311 ICE_SCHED_DFLT_RL_PROF_ID))
3312 return -EIO;
3313 /* EIR BW is set to default, disable it */
3314 data->valid_sections &= ~ICE_AQC_ELEM_VALID_EIR;
3315 /* Okay to enable shared BW now */
3316 data->valid_sections |= ICE_AQC_ELEM_VALID_SHARED;
3317 data->srl_id = cpu_to_le16(rl_prof_id);
3318 break;
3319 default:
3320 /* Unknown rate limit type */
3321 return -EINVAL;
3322 }
3323
3324 /* Configure element */
3325 return ice_sched_update_elem(hw, node, &buf);
3326}
3327
3328/**
3329 * ice_sched_get_node_rl_prof_id - get node's rate limit profile ID
3330 * @node: sched node
3331 * @rl_type: rate limit type
3332 *
3333 * If existing profile matches, it returns the corresponding rate
3334 * limit profile ID, otherwise it returns an invalid ID as error.
3335 */
3336static u16
3337ice_sched_get_node_rl_prof_id(struct ice_sched_node *node,
3338 enum ice_rl_type rl_type)
3339{
3340 u16 rl_prof_id = ICE_SCHED_INVAL_PROF_ID;
3341 struct ice_aqc_txsched_elem *data;
3342
3343 data = &node->info.data;
3344 switch (rl_type) {
3345 case ICE_MIN_BW:
3346 if (data->valid_sections & ICE_AQC_ELEM_VALID_CIR)
3347 rl_prof_id = le16_to_cpu(data->cir_bw.bw_profile_idx);
3348 break;
3349 case ICE_MAX_BW:
3350 if (data->valid_sections & ICE_AQC_ELEM_VALID_EIR)
3351 rl_prof_id = le16_to_cpu(data->eir_bw.bw_profile_idx);
3352 break;
3353 case ICE_SHARED_BW:
3354 if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
3355 rl_prof_id = le16_to_cpu(data->srl_id);
3356 break;
3357 default:
3358 break;
3359 }
3360
3361 return rl_prof_id;
3362}
3363
3364/**
3365 * ice_sched_get_rl_prof_layer - selects rate limit profile creation layer
3366 * @pi: port information structure
3367 * @rl_type: type of rate limit BW - min, max, or shared
3368 * @layer_index: layer index
3369 *
3370 * This function returns requested profile creation layer.
3371 */
3372static u8
3373ice_sched_get_rl_prof_layer(struct ice_port_info *pi, enum ice_rl_type rl_type,
3374 u8 layer_index)
3375{
3376 struct ice_hw *hw = pi->hw;
3377
3378 if (layer_index >= hw->num_tx_sched_layers)
3379 return ICE_SCHED_INVAL_LAYER_NUM;
3380 switch (rl_type) {
3381 case ICE_MIN_BW:
3382 if (hw->layer_info[layer_index].max_cir_rl_profiles)
3383 return layer_index;
3384 break;
3385 case ICE_MAX_BW:
3386 if (hw->layer_info[layer_index].max_eir_rl_profiles)
3387 return layer_index;
3388 break;
3389 case ICE_SHARED_BW:
3390 /* if current layer doesn't support SRL profile creation
3391 * then try a layer up or down.
3392 */
3393 if (hw->layer_info[layer_index].max_srl_profiles)
3394 return layer_index;
3395 else if (layer_index < hw->num_tx_sched_layers - 1 &&
3396 hw->layer_info[layer_index + 1].max_srl_profiles)
3397 return layer_index + 1;
3398 else if (layer_index > 0 &&
3399 hw->layer_info[layer_index - 1].max_srl_profiles)
3400 return layer_index - 1;
3401 break;
3402 default:
3403 break;
3404 }
3405 return ICE_SCHED_INVAL_LAYER_NUM;
3406}
3407
3408/**
3409 * ice_sched_get_srl_node - get shared rate limit node
3410 * @node: tree node
3411 * @srl_layer: shared rate limit layer
3412 *
3413 * This function returns SRL node to be used for shared rate limit purpose.
3414 * The caller needs to hold scheduler lock.
3415 */
3416static struct ice_sched_node *
3417ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer)
3418{
3419 if (srl_layer > node->tx_sched_layer)
3420 return node->children[0];
3421 else if (srl_layer < node->tx_sched_layer)
3422 /* Node can't be created without a parent. It will always
3423 * have a valid parent except root node.
3424 */
3425 return node->parent;
3426 else
3427 return node;
3428}
3429
3430/**
3431 * ice_sched_rm_rl_profile - remove RL profile ID
3432 * @pi: port information structure
3433 * @layer_num: layer number where profiles are saved
3434 * @profile_type: profile type like EIR, CIR, or SRL
3435 * @profile_id: profile ID to remove
3436 *
3437 * This function removes rate limit profile from layer 'layer_num' of type
3438 * 'profile_type' and profile ID as 'profile_id'. The caller needs to hold
3439 * scheduler lock.
3440 */
3441static int
3442ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
3443 u16 profile_id)
3444{
3445 struct ice_aqc_rl_profile_info *rl_prof_elem;
3446 int status = 0;
3447
3448 if (layer_num >= pi->hw->num_tx_sched_layers)
3449 return -EINVAL;
3450 /* Check the existing list for RL profile */
3451 list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
3452 list_entry)
3453 if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
3454 profile_type &&
3455 le16_to_cpu(rl_prof_elem->profile.profile_id) ==
3456 profile_id) {
3457 if (rl_prof_elem->prof_id_ref)
3458 rl_prof_elem->prof_id_ref--;
3459
3460 /* Remove old profile ID from database */
3461 status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem);
3462 if (status && status != -EBUSY)
3463 ice_debug(pi->hw, ICE_DBG_SCHED, "Remove rl profile failed\n");
3464 break;
3465 }
3466 if (status == -EBUSY)
3467 status = 0;
3468 return status;
3469}
3470
3471/**
3472 * ice_sched_set_node_bw_dflt - set node's bandwidth limit to default
3473 * @pi: port information structure
3474 * @node: pointer to node structure
3475 * @rl_type: rate limit type min, max, or shared
3476 * @layer_num: layer number where RL profiles are saved
3477 *
3478 * This function configures node element's BW rate limit profile ID of
3479 * type CIR, EIR, or SRL to default. This function needs to be called
3480 * with the scheduler lock held.
3481 */
3482static int
3483ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
3484 struct ice_sched_node *node,
3485 enum ice_rl_type rl_type, u8 layer_num)
3486{
3487 struct ice_hw *hw;
3488 u8 profile_type;
3489 u16 rl_prof_id;
3490 u16 old_id;
3491 int status;
3492
3493 hw = pi->hw;
3494 switch (rl_type) {
3495 case ICE_MIN_BW:
3496 profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;
3497 rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;
3498 break;
3499 case ICE_MAX_BW:
3500 profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;
3501 rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;
3502 break;
3503 case ICE_SHARED_BW:
3504 profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;
3505 /* No SRL is configured for default case */
3506 rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID;
3507 break;
3508 default:
3509 return -EINVAL;
3510 }
3511 /* Save existing RL prof ID for later clean up */
3512 old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
3513 /* Configure BW scheduling parameters */
3514 status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
3515 if (status)
3516 return status;
3517
3518 /* Remove stale RL profile ID */
3519 if (old_id == ICE_SCHED_DFLT_RL_PROF_ID ||
3520 old_id == ICE_SCHED_INVAL_PROF_ID)
3521 return 0;
3522
3523 return ice_sched_rm_rl_profile(pi, layer_num, profile_type, old_id);
3524}
3525
3526/**
3527 * ice_sched_set_eir_srl_excl - set EIR/SRL exclusiveness
3528 * @pi: port information structure
3529 * @node: pointer to node structure
3530 * @layer_num: layer number where rate limit profiles are saved
3531 * @rl_type: rate limit type min, max, or shared
3532 * @bw: bandwidth value
3533 *
3534 * This function prepares node element's bandwidth to SRL or EIR exclusively.
3535 * EIR BW and Shared BW profiles are mutually exclusive and hence only one of
3536 * them may be set for any given element. This function needs to be called
3537 * with the scheduler lock held.
3538 */
3539static int
3540ice_sched_set_eir_srl_excl(struct ice_port_info *pi,
3541 struct ice_sched_node *node,
3542 u8 layer_num, enum ice_rl_type rl_type, u32 bw)
3543{
3544 if (rl_type == ICE_SHARED_BW) {
3545 /* SRL node passed in this case, it may be different node */
3546 if (bw == ICE_SCHED_DFLT_BW)
3547 /* SRL being removed, ice_sched_cfg_node_bw_lmt()
3548 * enables EIR to default. EIR is not set in this
3549 * case, so no additional action is required.
3550 */
3551 return 0;
3552
3553 /* SRL being configured, set EIR to default here.
3554 * ice_sched_cfg_node_bw_lmt() disables EIR when it
3555 * configures SRL
3556 */
3557 return ice_sched_set_node_bw_dflt(pi, node, ICE_MAX_BW,
3558 layer_num);
3559 } else if (rl_type == ICE_MAX_BW &&
3560 node->info.data.valid_sections & ICE_AQC_ELEM_VALID_SHARED) {
3561 /* Remove Shared profile. Set default shared BW call
3562 * removes shared profile for a node.
3563 */
3564 return ice_sched_set_node_bw_dflt(pi, node,
3565 ICE_SHARED_BW,
3566 layer_num);
3567 }
3568 return 0;
3569}
3570
3571/**
3572 * ice_sched_set_node_bw - set node's bandwidth
3573 * @pi: port information structure
3574 * @node: tree node
3575 * @rl_type: rate limit type min, max, or shared
3576 * @bw: bandwidth in Kbps - Kilo bits per sec
3577 * @layer_num: layer number
3578 *
3579 * This function adds new profile corresponding to requested BW, configures
3580 * node's RL profile ID of type CIR, EIR, or SRL, and removes old profile
3581 * ID from local database. The caller needs to hold scheduler lock.
3582 */
3583int
3584ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
3585 enum ice_rl_type rl_type, u32 bw, u8 layer_num)
3586{
3587 struct ice_aqc_rl_profile_info *rl_prof_info;
3588 struct ice_hw *hw = pi->hw;
3589 u16 old_id, rl_prof_id;
3590 int status = -EINVAL;
3591
3592 rl_prof_info = ice_sched_add_rl_profile(pi, rl_type, bw, layer_num);
3593 if (!rl_prof_info)
3594 return status;
3595
3596 rl_prof_id = le16_to_cpu(rl_prof_info->profile.profile_id);
3597
3598 /* Save existing RL prof ID for later clean up */
3599 old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
3600 /* Configure BW scheduling parameters */
3601 status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
3602 if (status)
3603 return status;
3604
3605 /* New changes has been applied */
3606 /* Increment the profile ID reference count */
3607 rl_prof_info->prof_id_ref++;
3608
3609 /* Check for old ID removal */
3610 if ((old_id == ICE_SCHED_DFLT_RL_PROF_ID && rl_type != ICE_SHARED_BW) ||
3611 old_id == ICE_SCHED_INVAL_PROF_ID || old_id == rl_prof_id)
3612 return 0;
3613
3614 return ice_sched_rm_rl_profile(pi, layer_num,
3615 rl_prof_info->profile.flags &
3616 ICE_AQC_RL_PROFILE_TYPE_M, old_id);
3617}
3618
3619/**
3620 * ice_sched_set_node_priority - set node's priority
3621 * @pi: port information structure
3622 * @node: tree node
3623 * @priority: number 0-7 representing priority among siblings
3624 *
3625 * This function sets priority of a node among it's siblings.
3626 */
3627int
3628ice_sched_set_node_priority(struct ice_port_info *pi, struct ice_sched_node *node,
3629 u16 priority)
3630{
3631 struct ice_aqc_txsched_elem_data buf;
3632 struct ice_aqc_txsched_elem *data;
3633
3634 buf = node->info;
3635 data = &buf.data;
3636
3637 data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC;
3638 data->generic |= FIELD_PREP(ICE_AQC_ELEM_GENERIC_PRIO_M, priority);
3639
3640 return ice_sched_update_elem(pi->hw, node, &buf);
3641}
3642
3643/**
3644 * ice_sched_set_node_weight - set node's weight
3645 * @pi: port information structure
3646 * @node: tree node
3647 * @weight: number 1-200 representing weight for WFQ
3648 *
3649 * This function sets weight of the node for WFQ algorithm.
3650 */
3651int
3652ice_sched_set_node_weight(struct ice_port_info *pi, struct ice_sched_node *node, u16 weight)
3653{
3654 struct ice_aqc_txsched_elem_data buf;
3655 struct ice_aqc_txsched_elem *data;
3656
3657 buf = node->info;
3658 data = &buf.data;
3659
3660 data->valid_sections = ICE_AQC_ELEM_VALID_CIR | ICE_AQC_ELEM_VALID_EIR |
3661 ICE_AQC_ELEM_VALID_GENERIC;
3662 data->cir_bw.bw_alloc = cpu_to_le16(weight);
3663 data->eir_bw.bw_alloc = cpu_to_le16(weight);
3664
3665 data->generic |= FIELD_PREP(ICE_AQC_ELEM_GENERIC_SP_M, 0x0);
3666
3667 return ice_sched_update_elem(pi->hw, node, &buf);
3668}
3669
3670/**
3671 * ice_sched_set_node_bw_lmt - set node's BW limit
3672 * @pi: port information structure
3673 * @node: tree node
3674 * @rl_type: rate limit type min, max, or shared
3675 * @bw: bandwidth in Kbps - Kilo bits per sec
3676 *
3677 * It updates node's BW limit parameters like BW RL profile ID of type CIR,
3678 * EIR, or SRL. The caller needs to hold scheduler lock.
3679 */
3680int
3681ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
3682 enum ice_rl_type rl_type, u32 bw)
3683{
3684 struct ice_sched_node *cfg_node = node;
3685 int status;
3686
3687 struct ice_hw *hw;
3688 u8 layer_num;
3689
3690 if (!pi)
3691 return -EINVAL;
3692 hw = pi->hw;
3693 /* Remove unused RL profile IDs from HW and SW DB */
3694 ice_sched_rm_unused_rl_prof(pi);
3695 layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
3696 node->tx_sched_layer);
3697 if (layer_num >= hw->num_tx_sched_layers)
3698 return -EINVAL;
3699
3700 if (rl_type == ICE_SHARED_BW) {
3701 /* SRL node may be different */
3702 cfg_node = ice_sched_get_srl_node(node, layer_num);
3703 if (!cfg_node)
3704 return -EIO;
3705 }
3706 /* EIR BW and Shared BW profiles are mutually exclusive and
3707 * hence only one of them may be set for any given element
3708 */
3709 status = ice_sched_set_eir_srl_excl(pi, cfg_node, layer_num, rl_type,
3710 bw);
3711 if (status)
3712 return status;
3713 if (bw == ICE_SCHED_DFLT_BW)
3714 return ice_sched_set_node_bw_dflt(pi, cfg_node, rl_type,
3715 layer_num);
3716 return ice_sched_set_node_bw(pi, cfg_node, rl_type, bw, layer_num);
3717}
3718
3719/**
3720 * ice_sched_set_node_bw_dflt_lmt - set node's BW limit to default
3721 * @pi: port information structure
3722 * @node: pointer to node structure
3723 * @rl_type: rate limit type min, max, or shared
3724 *
3725 * This function configures node element's BW rate limit profile ID of
3726 * type CIR, EIR, or SRL to default. This function needs to be called
3727 * with the scheduler lock held.
3728 */
3729static int
3730ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi,
3731 struct ice_sched_node *node,
3732 enum ice_rl_type rl_type)
3733{
3734 return ice_sched_set_node_bw_lmt(pi, node, rl_type,
3735 ICE_SCHED_DFLT_BW);
3736}
3737
3738/**
3739 * ice_sched_validate_srl_node - Check node for SRL applicability
3740 * @node: sched node to configure
3741 * @sel_layer: selected SRL layer
3742 *
3743 * This function checks if the SRL can be applied to a selected layer node on
3744 * behalf of the requested node (first argument). This function needs to be
3745 * called with scheduler lock held.
3746 */
3747static int
3748ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
3749{
3750 /* SRL profiles are not available on all layers. Check if the
3751 * SRL profile can be applied to a node above or below the
3752 * requested node. SRL configuration is possible only if the
3753 * selected layer's node has single child.
3754 */
3755 if (sel_layer == node->tx_sched_layer ||
3756 ((sel_layer == node->tx_sched_layer + 1) &&
3757 node->num_children == 1) ||
3758 ((sel_layer == node->tx_sched_layer - 1) &&
3759 (node->parent && node->parent->num_children == 1)))
3760 return 0;
3761
3762 return -EIO;
3763}
3764
3765/**
3766 * ice_sched_save_q_bw - save queue node's BW information
3767 * @q_ctx: queue context structure
3768 * @rl_type: rate limit type min, max, or shared
3769 * @bw: bandwidth in Kbps - Kilo bits per sec
3770 *
3771 * Save BW information of queue type node for post replay use.
3772 */
3773static int
3774ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)
3775{
3776 switch (rl_type) {
3777 case ICE_MIN_BW:
3778 ice_set_clear_cir_bw(&q_ctx->bw_t_info, bw);
3779 break;
3780 case ICE_MAX_BW:
3781 ice_set_clear_eir_bw(&q_ctx->bw_t_info, bw);
3782 break;
3783 case ICE_SHARED_BW:
3784 ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw);
3785 break;
3786 default:
3787 return -EINVAL;
3788 }
3789 return 0;
3790}
3791
3792/**
3793 * ice_sched_set_q_bw_lmt - sets queue BW limit
3794 * @pi: port information structure
3795 * @vsi_handle: sw VSI handle
3796 * @tc: traffic class
3797 * @q_handle: software queue handle
3798 * @rl_type: min, max, or shared
3799 * @bw: bandwidth in Kbps
3800 *
3801 * This function sets BW limit of queue scheduling node.
3802 */
3803static int
3804ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
3805 u16 q_handle, enum ice_rl_type rl_type, u32 bw)
3806{
3807 struct ice_sched_node *node;
3808 struct ice_q_ctx *q_ctx;
3809 int status = -EINVAL;
3810
3811 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
3812 return -EINVAL;
3813 mutex_lock(&pi->sched_lock);
3814 q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle);
3815 if (!q_ctx)
3816 goto exit_q_bw_lmt;
3817 node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
3818 if (!node) {
3819 ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong q_teid\n");
3820 goto exit_q_bw_lmt;
3821 }
3822
3823 /* Return error if it is not a leaf node */
3824 if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF)
3825 goto exit_q_bw_lmt;
3826
3827 /* SRL bandwidth layer selection */
3828 if (rl_type == ICE_SHARED_BW) {
3829 u8 sel_layer; /* selected layer */
3830
3831 sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type,
3832 node->tx_sched_layer);
3833 if (sel_layer >= pi->hw->num_tx_sched_layers) {
3834 status = -EINVAL;
3835 goto exit_q_bw_lmt;
3836 }
3837 status = ice_sched_validate_srl_node(node, sel_layer);
3838 if (status)
3839 goto exit_q_bw_lmt;
3840 }
3841
3842 if (bw == ICE_SCHED_DFLT_BW)
3843 status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type);
3844 else
3845 status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw);
3846
3847 if (!status)
3848 status = ice_sched_save_q_bw(q_ctx, rl_type, bw);
3849
3850exit_q_bw_lmt:
3851 mutex_unlock(&pi->sched_lock);
3852 return status;
3853}
3854
3855/**
3856 * ice_cfg_q_bw_lmt - configure queue BW limit
3857 * @pi: port information structure
3858 * @vsi_handle: sw VSI handle
3859 * @tc: traffic class
3860 * @q_handle: software queue handle
3861 * @rl_type: min, max, or shared
3862 * @bw: bandwidth in Kbps
3863 *
3864 * This function configures BW limit of queue scheduling node.
3865 */
3866int
3867ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
3868 u16 q_handle, enum ice_rl_type rl_type, u32 bw)
3869{
3870 return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,
3871 bw);
3872}
3873
3874/**
3875 * ice_cfg_q_bw_dflt_lmt - configure queue BW default limit
3876 * @pi: port information structure
3877 * @vsi_handle: sw VSI handle
3878 * @tc: traffic class
3879 * @q_handle: software queue handle
3880 * @rl_type: min, max, or shared
3881 *
3882 * This function configures BW default limit of queue scheduling node.
3883 */
3884int
3885ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
3886 u16 q_handle, enum ice_rl_type rl_type)
3887{
3888 return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,
3889 ICE_SCHED_DFLT_BW);
3890}
3891
3892/**
3893 * ice_sched_get_node_by_id_type - get node from ID type
3894 * @pi: port information structure
3895 * @id: identifier
3896 * @agg_type: type of aggregator
3897 * @tc: traffic class
3898 *
3899 * This function returns node identified by ID of type aggregator, and
3900 * based on traffic class (TC). This function needs to be called with
3901 * the scheduler lock held.
3902 */
3903static struct ice_sched_node *
3904ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id,
3905 enum ice_agg_type agg_type, u8 tc)
3906{
3907 struct ice_sched_node *node = NULL;
3908
3909 switch (agg_type) {
3910 case ICE_AGG_TYPE_VSI: {
3911 struct ice_vsi_ctx *vsi_ctx;
3912 u16 vsi_handle = (u16)id;
3913
3914 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
3915 break;
3916 /* Get sched_vsi_info */
3917 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
3918 if (!vsi_ctx)
3919 break;
3920 node = vsi_ctx->sched.vsi_node[tc];
3921 break;
3922 }
3923
3924 case ICE_AGG_TYPE_AGG: {
3925 struct ice_sched_node *tc_node;
3926
3927 tc_node = ice_sched_get_tc_node(pi, tc);
3928 if (tc_node)
3929 node = ice_sched_get_agg_node(pi, tc_node, id);
3930 break;
3931 }
3932
3933 default:
3934 break;
3935 }
3936
3937 return node;
3938}
3939
3940/**
3941 * ice_sched_set_node_bw_lmt_per_tc - set node BW limit per TC
3942 * @pi: port information structure
3943 * @id: ID (software VSI handle or AGG ID)
3944 * @agg_type: aggregator type (VSI or AGG type node)
3945 * @tc: traffic class
3946 * @rl_type: min or max
3947 * @bw: bandwidth in Kbps
3948 *
3949 * This function sets BW limit of VSI or Aggregator scheduling node
3950 * based on TC information from passed in argument BW.
3951 */
3952static int
3953ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id,
3954 enum ice_agg_type agg_type, u8 tc,
3955 enum ice_rl_type rl_type, u32 bw)
3956{
3957 struct ice_sched_node *node;
3958 int status = -EINVAL;
3959
3960 if (!pi)
3961 return status;
3962
3963 if (rl_type == ICE_UNKNOWN_BW)
3964 return status;
3965
3966 mutex_lock(&pi->sched_lock);
3967 node = ice_sched_get_node_by_id_type(pi, id, agg_type, tc);
3968 if (!node) {
3969 ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong id, agg type, or tc\n");
3970 goto exit_set_node_bw_lmt_per_tc;
3971 }
3972 if (bw == ICE_SCHED_DFLT_BW)
3973 status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type);
3974 else
3975 status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw);
3976
3977exit_set_node_bw_lmt_per_tc:
3978 mutex_unlock(&pi->sched_lock);
3979 return status;
3980}
3981
3982/**
3983 * ice_cfg_vsi_bw_lmt_per_tc - configure VSI BW limit per TC
3984 * @pi: port information structure
3985 * @vsi_handle: software VSI handle
3986 * @tc: traffic class
3987 * @rl_type: min or max
3988 * @bw: bandwidth in Kbps
3989 *
3990 * This function configures BW limit of VSI scheduling node based on TC
3991 * information.
3992 */
3993int
3994ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
3995 enum ice_rl_type rl_type, u32 bw)
3996{
3997 int status;
3998
3999 status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle,
4000 ICE_AGG_TYPE_VSI,
4001 tc, rl_type, bw);
4002 if (!status) {
4003 mutex_lock(&pi->sched_lock);
4004 status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, bw);
4005 mutex_unlock(&pi->sched_lock);
4006 }
4007 return status;
4008}
4009
4010/**
4011 * ice_cfg_vsi_bw_dflt_lmt_per_tc - configure default VSI BW limit per TC
4012 * @pi: port information structure
4013 * @vsi_handle: software VSI handle
4014 * @tc: traffic class
4015 * @rl_type: min or max
4016 *
4017 * This function configures default BW limit of VSI scheduling node based on TC
4018 * information.
4019 */
4020int
4021ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
4022 enum ice_rl_type rl_type)
4023{
4024 int status;
4025
4026 status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle,
4027 ICE_AGG_TYPE_VSI,
4028 tc, rl_type,
4029 ICE_SCHED_DFLT_BW);
4030 if (!status) {
4031 mutex_lock(&pi->sched_lock);
4032 status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type,
4033 ICE_SCHED_DFLT_BW);
4034 mutex_unlock(&pi->sched_lock);
4035 }
4036 return status;
4037}
4038
4039/**
4040 * ice_cfg_rl_burst_size - Set burst size value
4041 * @hw: pointer to the HW struct
4042 * @bytes: burst size in bytes
4043 *
4044 * This function configures/set the burst size to requested new value. The new
4045 * burst size value is used for future rate limit calls. It doesn't change the
4046 * existing or previously created RL profiles.
4047 */
4048int ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
4049{
4050 u16 burst_size_to_prog;
4051
4052 if (bytes < ICE_MIN_BURST_SIZE_ALLOWED ||
4053 bytes > ICE_MAX_BURST_SIZE_ALLOWED)
4054 return -EINVAL;
4055 if (ice_round_to_num(bytes, 64) <=
4056 ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) {
4057 /* 64 byte granularity case */
4058 /* Disable MSB granularity bit */
4059 burst_size_to_prog = ICE_64_BYTE_GRANULARITY;
4060 /* round number to nearest 64 byte granularity */
4061 bytes = ice_round_to_num(bytes, 64);
4062 /* The value is in 64 byte chunks */
4063 burst_size_to_prog |= (u16)(bytes / 64);
4064 } else {
4065 /* k bytes granularity case */
4066 /* Enable MSB granularity bit */
4067 burst_size_to_prog = ICE_KBYTE_GRANULARITY;
4068 /* round number to nearest 1024 granularity */
4069 bytes = ice_round_to_num(bytes, 1024);
4070 /* check rounding doesn't go beyond allowed */
4071 if (bytes > ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY)
4072 bytes = ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY;
4073 /* The value is in k bytes */
4074 burst_size_to_prog |= (u16)(bytes / 1024);
4075 }
4076 hw->max_burst_size = burst_size_to_prog;
4077 return 0;
4078}
4079
4080/**
4081 * ice_sched_replay_node_prio - re-configure node priority
4082 * @hw: pointer to the HW struct
4083 * @node: sched node to configure
4084 * @priority: priority value
4085 *
4086 * This function configures node element's priority value. It
4087 * needs to be called with scheduler lock held.
4088 */
4089static int
4090ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node,
4091 u8 priority)
4092{
4093 struct ice_aqc_txsched_elem_data buf;
4094 struct ice_aqc_txsched_elem *data;
4095 int status;
4096
4097 buf = node->info;
4098 data = &buf.data;
4099 data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC;
4100 data->generic = priority;
4101
4102 /* Configure element */
4103 status = ice_sched_update_elem(hw, node, &buf);
4104 return status;
4105}
4106
4107/**
4108 * ice_sched_replay_node_bw - replay node(s) BW
4109 * @hw: pointer to the HW struct
4110 * @node: sched node to configure
4111 * @bw_t_info: BW type information
4112 *
4113 * This function restores node's BW from bw_t_info. The caller needs
4114 * to hold the scheduler lock.
4115 */
4116static int
4117ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node,
4118 struct ice_bw_type_info *bw_t_info)
4119{
4120 struct ice_port_info *pi = hw->port_info;
4121 int status = -EINVAL;
4122 u16 bw_alloc;
4123
4124 if (!node)
4125 return status;
4126 if (bitmap_empty(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CNT))
4127 return 0;
4128 if (test_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap)) {
4129 status = ice_sched_replay_node_prio(hw, node,
4130 bw_t_info->generic);
4131 if (status)
4132 return status;
4133 }
4134 if (test_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap)) {
4135 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW,
4136 bw_t_info->cir_bw.bw);
4137 if (status)
4138 return status;
4139 }
4140 if (test_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap)) {
4141 bw_alloc = bw_t_info->cir_bw.bw_alloc;
4142 status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MIN_BW,
4143 bw_alloc);
4144 if (status)
4145 return status;
4146 }
4147 if (test_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap)) {
4148 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW,
4149 bw_t_info->eir_bw.bw);
4150 if (status)
4151 return status;
4152 }
4153 if (test_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap)) {
4154 bw_alloc = bw_t_info->eir_bw.bw_alloc;
4155 status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MAX_BW,
4156 bw_alloc);
4157 if (status)
4158 return status;
4159 }
4160 if (test_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap))
4161 status = ice_sched_set_node_bw_lmt(pi, node, ICE_SHARED_BW,
4162 bw_t_info->shared_bw);
4163 return status;
4164}
4165
4166/**
4167 * ice_sched_get_ena_tc_bitmap - get enabled TC bitmap
4168 * @pi: port info struct
4169 * @tc_bitmap: 8 bits TC bitmap to check
4170 * @ena_tc_bitmap: 8 bits enabled TC bitmap to return
4171 *
4172 * This function returns enabled TC bitmap in variable ena_tc_bitmap. Some TCs
4173 * may be missing, it returns enabled TCs. This function needs to be called with
4174 * scheduler lock held.
4175 */
4176static void
4177ice_sched_get_ena_tc_bitmap(struct ice_port_info *pi,
4178 unsigned long *tc_bitmap,
4179 unsigned long *ena_tc_bitmap)
4180{
4181 u8 tc;
4182
4183 /* Some TC(s) may be missing after reset, adjust for replay */
4184 ice_for_each_traffic_class(tc)
4185 if (ice_is_tc_ena(*tc_bitmap, tc) &&
4186 (ice_sched_get_tc_node(pi, tc)))
4187 set_bit(tc, ena_tc_bitmap);
4188}
4189
4190/**
4191 * ice_sched_replay_agg - recreate aggregator node(s)
4192 * @hw: pointer to the HW struct
4193 *
4194 * This function recreate aggregator type nodes which are not replayed earlier.
4195 * It also replay aggregator BW information. These aggregator nodes are not
4196 * associated with VSI type node yet.
4197 */
4198void ice_sched_replay_agg(struct ice_hw *hw)
4199{
4200 struct ice_port_info *pi = hw->port_info;
4201 struct ice_sched_agg_info *agg_info;
4202
4203 mutex_lock(&pi->sched_lock);
4204 list_for_each_entry(agg_info, &hw->agg_list, list_entry)
4205 /* replay aggregator (re-create aggregator node) */
4206 if (!bitmap_equal(agg_info->tc_bitmap, agg_info->replay_tc_bitmap,
4207 ICE_MAX_TRAFFIC_CLASS)) {
4208 DECLARE_BITMAP(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
4209 int status;
4210
4211 bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
4212 ice_sched_get_ena_tc_bitmap(pi,
4213 agg_info->replay_tc_bitmap,
4214 replay_bitmap);
4215 status = ice_sched_cfg_agg(hw->port_info,
4216 agg_info->agg_id,
4217 ICE_AGG_TYPE_AGG,
4218 replay_bitmap);
4219 if (status) {
4220 dev_info(ice_hw_to_dev(hw),
4221 "Replay agg id[%d] failed\n",
4222 agg_info->agg_id);
4223 /* Move on to next one */
4224 continue;
4225 }
4226 }
4227 mutex_unlock(&pi->sched_lock);
4228}
4229
4230/**
4231 * ice_sched_replay_agg_vsi_preinit - Agg/VSI replay pre initialization
4232 * @hw: pointer to the HW struct
4233 *
4234 * This function initialize aggregator(s) TC bitmap to zero. A required
4235 * preinit step for replaying aggregators.
4236 */
4237void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw)
4238{
4239 struct ice_port_info *pi = hw->port_info;
4240 struct ice_sched_agg_info *agg_info;
4241
4242 mutex_lock(&pi->sched_lock);
4243 list_for_each_entry(agg_info, &hw->agg_list, list_entry) {
4244 struct ice_sched_agg_vsi_info *agg_vsi_info;
4245
4246 agg_info->tc_bitmap[0] = 0;
4247 list_for_each_entry(agg_vsi_info, &agg_info->agg_vsi_list,
4248 list_entry)
4249 agg_vsi_info->tc_bitmap[0] = 0;
4250 }
4251 mutex_unlock(&pi->sched_lock);
4252}
4253
4254/**
4255 * ice_sched_replay_vsi_agg - replay aggregator & VSI to aggregator node(s)
4256 * @hw: pointer to the HW struct
4257 * @vsi_handle: software VSI handle
4258 *
4259 * This function replays aggregator node, VSI to aggregator type nodes, and
4260 * their node bandwidth information. This function needs to be called with
4261 * scheduler lock held.
4262 */
4263static int ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
4264{
4265 DECLARE_BITMAP(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
4266 struct ice_sched_agg_vsi_info *agg_vsi_info;
4267 struct ice_port_info *pi = hw->port_info;
4268 struct ice_sched_agg_info *agg_info;
4269 int status;
4270
4271 bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
4272 if (!ice_is_vsi_valid(hw, vsi_handle))
4273 return -EINVAL;
4274 agg_info = ice_get_vsi_agg_info(hw, vsi_handle);
4275 if (!agg_info)
4276 return 0; /* Not present in list - default Agg case */
4277 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
4278 if (!agg_vsi_info)
4279 return 0; /* Not present in list - default Agg case */
4280 ice_sched_get_ena_tc_bitmap(pi, agg_info->replay_tc_bitmap,
4281 replay_bitmap);
4282 /* Replay aggregator node associated to vsi_handle */
4283 status = ice_sched_cfg_agg(hw->port_info, agg_info->agg_id,
4284 ICE_AGG_TYPE_AGG, replay_bitmap);
4285 if (status)
4286 return status;
4287
4288 bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
4289 ice_sched_get_ena_tc_bitmap(pi, agg_vsi_info->replay_tc_bitmap,
4290 replay_bitmap);
4291 /* Move this VSI (vsi_handle) to above aggregator */
4292 return ice_sched_assoc_vsi_to_agg(pi, agg_info->agg_id, vsi_handle,
4293 replay_bitmap);
4294}
4295
4296/**
4297 * ice_replay_vsi_agg - replay VSI to aggregator node
4298 * @hw: pointer to the HW struct
4299 * @vsi_handle: software VSI handle
4300 *
4301 * This function replays association of VSI to aggregator type nodes, and
4302 * node bandwidth information.
4303 */
4304int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
4305{
4306 struct ice_port_info *pi = hw->port_info;
4307 int status;
4308
4309 mutex_lock(&pi->sched_lock);
4310 status = ice_sched_replay_vsi_agg(hw, vsi_handle);
4311 mutex_unlock(&pi->sched_lock);
4312 return status;
4313}
4314
4315/**
4316 * ice_sched_replay_q_bw - replay queue type node BW
4317 * @pi: port information structure
4318 * @q_ctx: queue context structure
4319 *
4320 * This function replays queue type node bandwidth. This function needs to be
4321 * called with scheduler lock held.
4322 */
4323int ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx)
4324{
4325 struct ice_sched_node *q_node;
4326
4327 /* Following also checks the presence of node in tree */
4328 q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
4329 if (!q_node)
4330 return -EINVAL;
4331 return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info);
4332}
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4#include "ice_sched.h"
5
6/**
7 * ice_sched_add_root_node - Insert the Tx scheduler root node in SW DB
8 * @pi: port information structure
9 * @info: Scheduler element information from firmware
10 *
11 * This function inserts the root node of the scheduling tree topology
12 * to the SW DB.
13 */
14static enum ice_status
15ice_sched_add_root_node(struct ice_port_info *pi,
16 struct ice_aqc_txsched_elem_data *info)
17{
18 struct ice_sched_node *root;
19 struct ice_hw *hw;
20
21 if (!pi)
22 return ICE_ERR_PARAM;
23
24 hw = pi->hw;
25
26 root = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*root), GFP_KERNEL);
27 if (!root)
28 return ICE_ERR_NO_MEMORY;
29
30 /* coverity[suspicious_sizeof] */
31 root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0],
32 sizeof(*root), GFP_KERNEL);
33 if (!root->children) {
34 devm_kfree(ice_hw_to_dev(hw), root);
35 return ICE_ERR_NO_MEMORY;
36 }
37
38 memcpy(&root->info, info, sizeof(*info));
39 pi->root = root;
40 return 0;
41}
42
43/**
44 * ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB
45 * @start_node: pointer to the starting ice_sched_node struct in a sub-tree
46 * @teid: node TEID to search
47 *
48 * This function searches for a node matching the TEID in the scheduling tree
49 * from the SW DB. The search is recursive and is restricted by the number of
50 * layers it has searched through; stopping at the max supported layer.
51 *
52 * This function needs to be called when holding the port_info->sched_lock
53 */
54struct ice_sched_node *
55ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
56{
57 u16 i;
58
59 /* The TEID is same as that of the start_node */
60 if (ICE_TXSCHED_GET_NODE_TEID(start_node) == teid)
61 return start_node;
62
63 /* The node has no children or is at the max layer */
64 if (!start_node->num_children ||
65 start_node->tx_sched_layer >= ICE_AQC_TOPO_MAX_LEVEL_NUM ||
66 start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF)
67 return NULL;
68
69 /* Check if TEID matches to any of the children nodes */
70 for (i = 0; i < start_node->num_children; i++)
71 if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid)
72 return start_node->children[i];
73
74 /* Search within each child's sub-tree */
75 for (i = 0; i < start_node->num_children; i++) {
76 struct ice_sched_node *tmp;
77
78 tmp = ice_sched_find_node_by_teid(start_node->children[i],
79 teid);
80 if (tmp)
81 return tmp;
82 }
83
84 return NULL;
85}
86
87/**
88 * ice_aqc_send_sched_elem_cmd - send scheduling elements cmd
89 * @hw: pointer to the HW struct
90 * @cmd_opc: cmd opcode
91 * @elems_req: number of elements to request
92 * @buf: pointer to buffer
93 * @buf_size: buffer size in bytes
94 * @elems_resp: returns total number of elements response
95 * @cd: pointer to command details structure or NULL
96 *
97 * This function sends a scheduling elements cmd (cmd_opc)
98 */
99static enum ice_status
100ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
101 u16 elems_req, void *buf, u16 buf_size,
102 u16 *elems_resp, struct ice_sq_cd *cd)
103{
104 struct ice_aqc_sched_elem_cmd *cmd;
105 struct ice_aq_desc desc;
106 enum ice_status status;
107
108 cmd = &desc.params.sched_elem_cmd;
109 ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc);
110 cmd->num_elem_req = cpu_to_le16(elems_req);
111 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
112 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
113 if (!status && elems_resp)
114 *elems_resp = le16_to_cpu(cmd->num_elem_resp);
115
116 return status;
117}
118
119/**
120 * ice_aq_query_sched_elems - query scheduler elements
121 * @hw: pointer to the HW struct
122 * @elems_req: number of elements to query
123 * @buf: pointer to buffer
124 * @buf_size: buffer size in bytes
125 * @elems_ret: returns total number of elements returned
126 * @cd: pointer to command details structure or NULL
127 *
128 * Query scheduling elements (0x0404)
129 */
130enum ice_status
131ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
132 struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
133 u16 *elems_ret, struct ice_sq_cd *cd)
134{
135 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_get_sched_elems,
136 elems_req, (void *)buf, buf_size,
137 elems_ret, cd);
138}
139
140/**
141 * ice_sched_add_node - Insert the Tx scheduler node in SW DB
142 * @pi: port information structure
143 * @layer: Scheduler layer of the node
144 * @info: Scheduler element information from firmware
145 *
146 * This function inserts a scheduler node to the SW DB.
147 */
148enum ice_status
149ice_sched_add_node(struct ice_port_info *pi, u8 layer,
150 struct ice_aqc_txsched_elem_data *info)
151{
152 struct ice_aqc_txsched_elem_data elem;
153 struct ice_sched_node *parent;
154 struct ice_sched_node *node;
155 enum ice_status status;
156 struct ice_hw *hw;
157
158 if (!pi)
159 return ICE_ERR_PARAM;
160
161 hw = pi->hw;
162
163 /* A valid parent node should be there */
164 parent = ice_sched_find_node_by_teid(pi->root,
165 le32_to_cpu(info->parent_teid));
166 if (!parent) {
167 ice_debug(hw, ICE_DBG_SCHED,
168 "Parent Node not found for parent_teid=0x%x\n",
169 le32_to_cpu(info->parent_teid));
170 return ICE_ERR_PARAM;
171 }
172
173 /* query the current node information from FW before adding it
174 * to the SW DB
175 */
176 status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), &elem);
177 if (status)
178 return status;
179
180 node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL);
181 if (!node)
182 return ICE_ERR_NO_MEMORY;
183 if (hw->max_children[layer]) {
184 /* coverity[suspicious_sizeof] */
185 node->children = devm_kcalloc(ice_hw_to_dev(hw),
186 hw->max_children[layer],
187 sizeof(*node), GFP_KERNEL);
188 if (!node->children) {
189 devm_kfree(ice_hw_to_dev(hw), node);
190 return ICE_ERR_NO_MEMORY;
191 }
192 }
193
194 node->in_use = true;
195 node->parent = parent;
196 node->tx_sched_layer = layer;
197 parent->children[parent->num_children++] = node;
198 node->info = elem;
199 return 0;
200}
201
202/**
203 * ice_aq_delete_sched_elems - delete scheduler elements
204 * @hw: pointer to the HW struct
205 * @grps_req: number of groups to delete
206 * @buf: pointer to buffer
207 * @buf_size: buffer size in bytes
208 * @grps_del: returns total number of elements deleted
209 * @cd: pointer to command details structure or NULL
210 *
211 * Delete scheduling elements (0x040F)
212 */
213static enum ice_status
214ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
215 struct ice_aqc_delete_elem *buf, u16 buf_size,
216 u16 *grps_del, struct ice_sq_cd *cd)
217{
218 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_delete_sched_elems,
219 grps_req, (void *)buf, buf_size,
220 grps_del, cd);
221}
222
223/**
224 * ice_sched_remove_elems - remove nodes from HW
225 * @hw: pointer to the HW struct
226 * @parent: pointer to the parent node
227 * @num_nodes: number of nodes
228 * @node_teids: array of node teids to be deleted
229 *
230 * This function remove nodes from HW
231 */
232static enum ice_status
233ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
234 u16 num_nodes, u32 *node_teids)
235{
236 struct ice_aqc_delete_elem *buf;
237 u16 i, num_groups_removed = 0;
238 enum ice_status status;
239 u16 buf_size;
240
241 buf_size = struct_size(buf, teid, num_nodes);
242 buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
243 if (!buf)
244 return ICE_ERR_NO_MEMORY;
245
246 buf->hdr.parent_teid = parent->info.node_teid;
247 buf->hdr.num_elems = cpu_to_le16(num_nodes);
248 for (i = 0; i < num_nodes; i++)
249 buf->teid[i] = cpu_to_le32(node_teids[i]);
250
251 status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
252 &num_groups_removed, NULL);
253 if (status || num_groups_removed != 1)
254 ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n",
255 hw->adminq.sq_last_status);
256
257 devm_kfree(ice_hw_to_dev(hw), buf);
258 return status;
259}
260
261/**
262 * ice_sched_get_first_node - get the first node of the given layer
263 * @pi: port information structure
264 * @parent: pointer the base node of the subtree
265 * @layer: layer number
266 *
267 * This function retrieves the first node of the given layer from the subtree
268 */
269static struct ice_sched_node *
270ice_sched_get_first_node(struct ice_port_info *pi,
271 struct ice_sched_node *parent, u8 layer)
272{
273 return pi->sib_head[parent->tc_num][layer];
274}
275
276/**
277 * ice_sched_get_tc_node - get pointer to TC node
278 * @pi: port information structure
279 * @tc: TC number
280 *
281 * This function returns the TC node pointer
282 */
283struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc)
284{
285 u8 i;
286
287 if (!pi || !pi->root)
288 return NULL;
289 for (i = 0; i < pi->root->num_children; i++)
290 if (pi->root->children[i]->tc_num == tc)
291 return pi->root->children[i];
292 return NULL;
293}
294
295/**
296 * ice_free_sched_node - Free a Tx scheduler node from SW DB
297 * @pi: port information structure
298 * @node: pointer to the ice_sched_node struct
299 *
300 * This function frees up a node from SW DB as well as from HW
301 *
302 * This function needs to be called with the port_info->sched_lock held
303 */
304void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
305{
306 struct ice_sched_node *parent;
307 struct ice_hw *hw = pi->hw;
308 u8 i, j;
309
310 /* Free the children before freeing up the parent node
311 * The parent array is updated below and that shifts the nodes
312 * in the array. So always pick the first child if num children > 0
313 */
314 while (node->num_children)
315 ice_free_sched_node(pi, node->children[0]);
316
317 /* Leaf, TC and root nodes can't be deleted by SW */
318 if (node->tx_sched_layer >= hw->sw_entry_point_layer &&
319 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
320 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT &&
321 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) {
322 u32 teid = le32_to_cpu(node->info.node_teid);
323
324 ice_sched_remove_elems(hw, node->parent, 1, &teid);
325 }
326 parent = node->parent;
327 /* root has no parent */
328 if (parent) {
329 struct ice_sched_node *p;
330
331 /* update the parent */
332 for (i = 0; i < parent->num_children; i++)
333 if (parent->children[i] == node) {
334 for (j = i + 1; j < parent->num_children; j++)
335 parent->children[j - 1] =
336 parent->children[j];
337 parent->num_children--;
338 break;
339 }
340
341 p = ice_sched_get_first_node(pi, node, node->tx_sched_layer);
342 while (p) {
343 if (p->sibling == node) {
344 p->sibling = node->sibling;
345 break;
346 }
347 p = p->sibling;
348 }
349
350 /* update the sibling head if head is getting removed */
351 if (pi->sib_head[node->tc_num][node->tx_sched_layer] == node)
352 pi->sib_head[node->tc_num][node->tx_sched_layer] =
353 node->sibling;
354 }
355
356 /* leaf nodes have no children */
357 if (node->children)
358 devm_kfree(ice_hw_to_dev(hw), node->children);
359 devm_kfree(ice_hw_to_dev(hw), node);
360}
361
362/**
363 * ice_aq_get_dflt_topo - gets default scheduler topology
364 * @hw: pointer to the HW struct
365 * @lport: logical port number
366 * @buf: pointer to buffer
367 * @buf_size: buffer size in bytes
368 * @num_branches: returns total number of queue to port branches
369 * @cd: pointer to command details structure or NULL
370 *
371 * Get default scheduler topology (0x400)
372 */
373static enum ice_status
374ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
375 struct ice_aqc_get_topo_elem *buf, u16 buf_size,
376 u8 *num_branches, struct ice_sq_cd *cd)
377{
378 struct ice_aqc_get_topo *cmd;
379 struct ice_aq_desc desc;
380 enum ice_status status;
381
382 cmd = &desc.params.get_topo;
383 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo);
384 cmd->port_num = lport;
385 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
386 if (!status && num_branches)
387 *num_branches = cmd->num_branches;
388
389 return status;
390}
391
392/**
393 * ice_aq_add_sched_elems - adds scheduling element
394 * @hw: pointer to the HW struct
395 * @grps_req: the number of groups that are requested to be added
396 * @buf: pointer to buffer
397 * @buf_size: buffer size in bytes
398 * @grps_added: returns total number of groups added
399 * @cd: pointer to command details structure or NULL
400 *
401 * Add scheduling elements (0x0401)
402 */
403static enum ice_status
404ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
405 struct ice_aqc_add_elem *buf, u16 buf_size,
406 u16 *grps_added, struct ice_sq_cd *cd)
407{
408 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_add_sched_elems,
409 grps_req, (void *)buf, buf_size,
410 grps_added, cd);
411}
412
413/**
414 * ice_aq_cfg_sched_elems - configures scheduler elements
415 * @hw: pointer to the HW struct
416 * @elems_req: number of elements to configure
417 * @buf: pointer to buffer
418 * @buf_size: buffer size in bytes
419 * @elems_cfgd: returns total number of elements configured
420 * @cd: pointer to command details structure or NULL
421 *
422 * Configure scheduling elements (0x0403)
423 */
424static enum ice_status
425ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
426 struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
427 u16 *elems_cfgd, struct ice_sq_cd *cd)
428{
429 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_cfg_sched_elems,
430 elems_req, (void *)buf, buf_size,
431 elems_cfgd, cd);
432}
433
434/**
435 * ice_aq_suspend_sched_elems - suspend scheduler elements
436 * @hw: pointer to the HW struct
437 * @elems_req: number of elements to suspend
438 * @buf: pointer to buffer
439 * @buf_size: buffer size in bytes
440 * @elems_ret: returns total number of elements suspended
441 * @cd: pointer to command details structure or NULL
442 *
443 * Suspend scheduling elements (0x0409)
444 */
445static enum ice_status
446ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
447 u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
448{
449 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_suspend_sched_elems,
450 elems_req, (void *)buf, buf_size,
451 elems_ret, cd);
452}
453
454/**
455 * ice_aq_resume_sched_elems - resume scheduler elements
456 * @hw: pointer to the HW struct
457 * @elems_req: number of elements to resume
458 * @buf: pointer to buffer
459 * @buf_size: buffer size in bytes
460 * @elems_ret: returns total number of elements resumed
461 * @cd: pointer to command details structure or NULL
462 *
463 * resume scheduling elements (0x040A)
464 */
465static enum ice_status
466ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
467 u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
468{
469 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_resume_sched_elems,
470 elems_req, (void *)buf, buf_size,
471 elems_ret, cd);
472}
473
474/**
475 * ice_aq_query_sched_res - query scheduler resource
476 * @hw: pointer to the HW struct
477 * @buf_size: buffer size in bytes
478 * @buf: pointer to buffer
479 * @cd: pointer to command details structure or NULL
480 *
481 * Query scheduler resource allocation (0x0412)
482 */
483static enum ice_status
484ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
485 struct ice_aqc_query_txsched_res_resp *buf,
486 struct ice_sq_cd *cd)
487{
488 struct ice_aq_desc desc;
489
490 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res);
491 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
492}
493
494/**
495 * ice_sched_suspend_resume_elems - suspend or resume HW nodes
496 * @hw: pointer to the HW struct
497 * @num_nodes: number of nodes
498 * @node_teids: array of node teids to be suspended or resumed
499 * @suspend: true means suspend / false means resume
500 *
501 * This function suspends or resumes HW nodes
502 */
503static enum ice_status
504ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
505 bool suspend)
506{
507 u16 i, buf_size, num_elem_ret = 0;
508 enum ice_status status;
509 __le32 *buf;
510
511 buf_size = sizeof(*buf) * num_nodes;
512 buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
513 if (!buf)
514 return ICE_ERR_NO_MEMORY;
515
516 for (i = 0; i < num_nodes; i++)
517 buf[i] = cpu_to_le32(node_teids[i]);
518
519 if (suspend)
520 status = ice_aq_suspend_sched_elems(hw, num_nodes, buf,
521 buf_size, &num_elem_ret,
522 NULL);
523 else
524 status = ice_aq_resume_sched_elems(hw, num_nodes, buf,
525 buf_size, &num_elem_ret,
526 NULL);
527 if (status || num_elem_ret != num_nodes)
528 ice_debug(hw, ICE_DBG_SCHED, "suspend/resume failed\n");
529
530 devm_kfree(ice_hw_to_dev(hw), buf);
531 return status;
532}
533
534/**
535 * ice_alloc_lan_q_ctx - allocate LAN queue contexts for the given VSI and TC
536 * @hw: pointer to the HW struct
537 * @vsi_handle: VSI handle
538 * @tc: TC number
539 * @new_numqs: number of queues
540 */
541static enum ice_status
542ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
543{
544 struct ice_vsi_ctx *vsi_ctx;
545 struct ice_q_ctx *q_ctx;
546
547 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
548 if (!vsi_ctx)
549 return ICE_ERR_PARAM;
550 /* allocate LAN queue contexts */
551 if (!vsi_ctx->lan_q_ctx[tc]) {
552 vsi_ctx->lan_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
553 new_numqs,
554 sizeof(*q_ctx),
555 GFP_KERNEL);
556 if (!vsi_ctx->lan_q_ctx[tc])
557 return ICE_ERR_NO_MEMORY;
558 vsi_ctx->num_lan_q_entries[tc] = new_numqs;
559 return 0;
560 }
561 /* num queues are increased, update the queue contexts */
562 if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) {
563 u16 prev_num = vsi_ctx->num_lan_q_entries[tc];
564
565 q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
566 sizeof(*q_ctx), GFP_KERNEL);
567 if (!q_ctx)
568 return ICE_ERR_NO_MEMORY;
569 memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc],
570 prev_num * sizeof(*q_ctx));
571 devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]);
572 vsi_ctx->lan_q_ctx[tc] = q_ctx;
573 vsi_ctx->num_lan_q_entries[tc] = new_numqs;
574 }
575 return 0;
576}
577
578/**
579 * ice_aq_rl_profile - performs a rate limiting task
580 * @hw: pointer to the HW struct
581 * @opcode: opcode for add, query, or remove profile(s)
582 * @num_profiles: the number of profiles
583 * @buf: pointer to buffer
584 * @buf_size: buffer size in bytes
585 * @num_processed: number of processed add or remove profile(s) to return
586 * @cd: pointer to command details structure
587 *
588 * RL profile function to add, query, or remove profile(s)
589 */
590static enum ice_status
591ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
592 u16 num_profiles, struct ice_aqc_rl_profile_elem *buf,
593 u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd)
594{
595 struct ice_aqc_rl_profile *cmd;
596 struct ice_aq_desc desc;
597 enum ice_status status;
598
599 cmd = &desc.params.rl_profile;
600
601 ice_fill_dflt_direct_cmd_desc(&desc, opcode);
602 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
603 cmd->num_profiles = cpu_to_le16(num_profiles);
604 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
605 if (!status && num_processed)
606 *num_processed = le16_to_cpu(cmd->num_processed);
607 return status;
608}
609
610/**
611 * ice_aq_add_rl_profile - adds rate limiting profile(s)
612 * @hw: pointer to the HW struct
613 * @num_profiles: the number of profile(s) to be add
614 * @buf: pointer to buffer
615 * @buf_size: buffer size in bytes
616 * @num_profiles_added: total number of profiles added to return
617 * @cd: pointer to command details structure
618 *
619 * Add RL profile (0x0410)
620 */
621static enum ice_status
622ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
623 struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
624 u16 *num_profiles_added, struct ice_sq_cd *cd)
625{
626 return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles, num_profiles,
627 buf, buf_size, num_profiles_added, cd);
628}
629
630/**
631 * ice_aq_remove_rl_profile - removes RL profile(s)
632 * @hw: pointer to the HW struct
633 * @num_profiles: the number of profile(s) to remove
634 * @buf: pointer to buffer
635 * @buf_size: buffer size in bytes
636 * @num_profiles_removed: total number of profiles removed to return
637 * @cd: pointer to command details structure or NULL
638 *
639 * Remove RL profile (0x0415)
640 */
641static enum ice_status
642ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,
643 struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
644 u16 *num_profiles_removed, struct ice_sq_cd *cd)
645{
646 return ice_aq_rl_profile(hw, ice_aqc_opc_remove_rl_profiles,
647 num_profiles, buf, buf_size,
648 num_profiles_removed, cd);
649}
650
651/**
652 * ice_sched_del_rl_profile - remove RL profile
653 * @hw: pointer to the HW struct
654 * @rl_info: rate limit profile information
655 *
656 * If the profile ID is not referenced anymore, it removes profile ID with
657 * its associated parameters from HW DB,and locally. The caller needs to
658 * hold scheduler lock.
659 */
660static enum ice_status
661ice_sched_del_rl_profile(struct ice_hw *hw,
662 struct ice_aqc_rl_profile_info *rl_info)
663{
664 struct ice_aqc_rl_profile_elem *buf;
665 u16 num_profiles_removed;
666 enum ice_status status;
667 u16 num_profiles = 1;
668
669 if (rl_info->prof_id_ref != 0)
670 return ICE_ERR_IN_USE;
671
672 /* Safe to remove profile ID */
673 buf = &rl_info->profile;
674 status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf),
675 &num_profiles_removed, NULL);
676 if (status || num_profiles_removed != num_profiles)
677 return ICE_ERR_CFG;
678
679 /* Delete stale entry now */
680 list_del(&rl_info->list_entry);
681 devm_kfree(ice_hw_to_dev(hw), rl_info);
682 return status;
683}
684
685/**
686 * ice_sched_clear_rl_prof - clears RL prof entries
687 * @pi: port information structure
688 *
689 * This function removes all RL profile from HW as well as from SW DB.
690 */
691static void ice_sched_clear_rl_prof(struct ice_port_info *pi)
692{
693 u16 ln;
694
695 for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {
696 struct ice_aqc_rl_profile_info *rl_prof_elem;
697 struct ice_aqc_rl_profile_info *rl_prof_tmp;
698
699 list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp,
700 &pi->rl_prof_list[ln], list_entry) {
701 struct ice_hw *hw = pi->hw;
702 enum ice_status status;
703
704 rl_prof_elem->prof_id_ref = 0;
705 status = ice_sched_del_rl_profile(hw, rl_prof_elem);
706 if (status) {
707 ice_debug(hw, ICE_DBG_SCHED,
708 "Remove rl profile failed\n");
709 /* On error, free mem required */
710 list_del(&rl_prof_elem->list_entry);
711 devm_kfree(ice_hw_to_dev(hw), rl_prof_elem);
712 }
713 }
714 }
715}
716
717/**
718 * ice_sched_clear_agg - clears the aggregator related information
719 * @hw: pointer to the hardware structure
720 *
721 * This function removes aggregator list and free up aggregator related memory
722 * previously allocated.
723 */
724void ice_sched_clear_agg(struct ice_hw *hw)
725{
726 struct ice_sched_agg_info *agg_info;
727 struct ice_sched_agg_info *atmp;
728
729 list_for_each_entry_safe(agg_info, atmp, &hw->agg_list, list_entry) {
730 struct ice_sched_agg_vsi_info *agg_vsi_info;
731 struct ice_sched_agg_vsi_info *vtmp;
732
733 list_for_each_entry_safe(agg_vsi_info, vtmp,
734 &agg_info->agg_vsi_list, list_entry) {
735 list_del(&agg_vsi_info->list_entry);
736 devm_kfree(ice_hw_to_dev(hw), agg_vsi_info);
737 }
738 list_del(&agg_info->list_entry);
739 devm_kfree(ice_hw_to_dev(hw), agg_info);
740 }
741}
742
743/**
744 * ice_sched_clear_tx_topo - clears the scheduler tree nodes
745 * @pi: port information structure
746 *
747 * This function removes all the nodes from HW as well as from SW DB.
748 */
749static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
750{
751 if (!pi)
752 return;
753 /* remove RL profiles related lists */
754 ice_sched_clear_rl_prof(pi);
755 if (pi->root) {
756 ice_free_sched_node(pi, pi->root);
757 pi->root = NULL;
758 }
759}
760
761/**
762 * ice_sched_clear_port - clear the scheduler elements from SW DB for a port
763 * @pi: port information structure
764 *
765 * Cleanup scheduling elements from SW DB
766 */
767void ice_sched_clear_port(struct ice_port_info *pi)
768{
769 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
770 return;
771
772 pi->port_state = ICE_SCHED_PORT_STATE_INIT;
773 mutex_lock(&pi->sched_lock);
774 ice_sched_clear_tx_topo(pi);
775 mutex_unlock(&pi->sched_lock);
776 mutex_destroy(&pi->sched_lock);
777}
778
779/**
780 * ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports
781 * @hw: pointer to the HW struct
782 *
783 * Cleanup scheduling elements from SW DB for all the ports
784 */
785void ice_sched_cleanup_all(struct ice_hw *hw)
786{
787 if (!hw)
788 return;
789
790 if (hw->layer_info) {
791 devm_kfree(ice_hw_to_dev(hw), hw->layer_info);
792 hw->layer_info = NULL;
793 }
794
795 ice_sched_clear_port(hw->port_info);
796
797 hw->num_tx_sched_layers = 0;
798 hw->num_tx_sched_phys_layers = 0;
799 hw->flattened_layers = 0;
800 hw->max_cgds = 0;
801}
802
803/**
804 * ice_sched_add_elems - add nodes to HW and SW DB
805 * @pi: port information structure
806 * @tc_node: pointer to the branch node
807 * @parent: pointer to the parent node
808 * @layer: layer number to add nodes
809 * @num_nodes: number of nodes
810 * @num_nodes_added: pointer to num nodes added
811 * @first_node_teid: if new nodes are added then return the TEID of first node
812 *
813 * This function add nodes to HW as well as to SW DB for a given layer
814 */
815static enum ice_status
816ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
817 struct ice_sched_node *parent, u8 layer, u16 num_nodes,
818 u16 *num_nodes_added, u32 *first_node_teid)
819{
820 struct ice_sched_node *prev, *new_node;
821 struct ice_aqc_add_elem *buf;
822 u16 i, num_groups_added = 0;
823 enum ice_status status = 0;
824 struct ice_hw *hw = pi->hw;
825 size_t buf_size;
826 u32 teid;
827
828 buf_size = struct_size(buf, generic, num_nodes);
829 buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
830 if (!buf)
831 return ICE_ERR_NO_MEMORY;
832
833 buf->hdr.parent_teid = parent->info.node_teid;
834 buf->hdr.num_elems = cpu_to_le16(num_nodes);
835 for (i = 0; i < num_nodes; i++) {
836 buf->generic[i].parent_teid = parent->info.node_teid;
837 buf->generic[i].data.elem_type = ICE_AQC_ELEM_TYPE_SE_GENERIC;
838 buf->generic[i].data.valid_sections =
839 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
840 ICE_AQC_ELEM_VALID_EIR;
841 buf->generic[i].data.generic = 0;
842 buf->generic[i].data.cir_bw.bw_profile_idx =
843 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
844 buf->generic[i].data.cir_bw.bw_alloc =
845 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
846 buf->generic[i].data.eir_bw.bw_profile_idx =
847 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
848 buf->generic[i].data.eir_bw.bw_alloc =
849 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
850 }
851
852 status = ice_aq_add_sched_elems(hw, 1, buf, buf_size,
853 &num_groups_added, NULL);
854 if (status || num_groups_added != 1) {
855 ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n",
856 hw->adminq.sq_last_status);
857 devm_kfree(ice_hw_to_dev(hw), buf);
858 return ICE_ERR_CFG;
859 }
860
861 *num_nodes_added = num_nodes;
862 /* add nodes to the SW DB */
863 for (i = 0; i < num_nodes; i++) {
864 status = ice_sched_add_node(pi, layer, &buf->generic[i]);
865 if (status) {
866 ice_debug(hw, ICE_DBG_SCHED,
867 "add nodes in SW DB failed status =%d\n",
868 status);
869 break;
870 }
871
872 teid = le32_to_cpu(buf->generic[i].node_teid);
873 new_node = ice_sched_find_node_by_teid(parent, teid);
874 if (!new_node) {
875 ice_debug(hw, ICE_DBG_SCHED,
876 "Node is missing for teid =%d\n", teid);
877 break;
878 }
879
880 new_node->sibling = NULL;
881 new_node->tc_num = tc_node->tc_num;
882
883 /* add it to previous node sibling pointer */
884 /* Note: siblings are not linked across branches */
885 prev = ice_sched_get_first_node(pi, tc_node, layer);
886 if (prev && prev != new_node) {
887 while (prev->sibling)
888 prev = prev->sibling;
889 prev->sibling = new_node;
890 }
891
892 /* initialize the sibling head */
893 if (!pi->sib_head[tc_node->tc_num][layer])
894 pi->sib_head[tc_node->tc_num][layer] = new_node;
895
896 if (i == 0)
897 *first_node_teid = teid;
898 }
899
900 devm_kfree(ice_hw_to_dev(hw), buf);
901 return status;
902}
903
904/**
905 * ice_sched_add_nodes_to_layer - Add nodes to a given layer
906 * @pi: port information structure
907 * @tc_node: pointer to TC node
908 * @parent: pointer to parent node
909 * @layer: layer number to add nodes
910 * @num_nodes: number of nodes to be added
911 * @first_node_teid: pointer to the first node TEID
912 * @num_nodes_added: pointer to number of nodes added
913 *
914 * This function add nodes to a given layer.
915 */
916static enum ice_status
917ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
918 struct ice_sched_node *tc_node,
919 struct ice_sched_node *parent, u8 layer,
920 u16 num_nodes, u32 *first_node_teid,
921 u16 *num_nodes_added)
922{
923 u32 *first_teid_ptr = first_node_teid;
924 u16 new_num_nodes, max_child_nodes;
925 enum ice_status status = 0;
926 struct ice_hw *hw = pi->hw;
927 u16 num_added = 0;
928 u32 temp;
929
930 *num_nodes_added = 0;
931
932 if (!num_nodes)
933 return status;
934
935 if (!parent || layer < hw->sw_entry_point_layer)
936 return ICE_ERR_PARAM;
937
938 /* max children per node per layer */
939 max_child_nodes = hw->max_children[parent->tx_sched_layer];
940
941 /* current number of children + required nodes exceed max children ? */
942 if ((parent->num_children + num_nodes) > max_child_nodes) {
943 /* Fail if the parent is a TC node */
944 if (parent == tc_node)
945 return ICE_ERR_CFG;
946
947 /* utilize all the spaces if the parent is not full */
948 if (parent->num_children < max_child_nodes) {
949 new_num_nodes = max_child_nodes - parent->num_children;
950 /* this recursion is intentional, and wouldn't
951 * go more than 2 calls
952 */
953 status = ice_sched_add_nodes_to_layer(pi, tc_node,
954 parent, layer,
955 new_num_nodes,
956 first_node_teid,
957 &num_added);
958 if (status)
959 return status;
960
961 *num_nodes_added += num_added;
962 }
963 /* Don't modify the first node TEID memory if the first node was
964 * added already in the above call. Instead send some temp
965 * memory for all other recursive calls.
966 */
967 if (num_added)
968 first_teid_ptr = &temp;
969
970 new_num_nodes = num_nodes - num_added;
971
972 /* This parent is full, try the next sibling */
973 parent = parent->sibling;
974
975 /* this recursion is intentional, for 1024 queues
976 * per VSI, it goes max of 16 iterations.
977 * 1024 / 8 = 128 layer 8 nodes
978 * 128 /8 = 16 (add 8 nodes per iteration)
979 */
980 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
981 layer, new_num_nodes,
982 first_teid_ptr,
983 &num_added);
984 *num_nodes_added += num_added;
985 return status;
986 }
987
988 status = ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes,
989 num_nodes_added, first_node_teid);
990 return status;
991}
992
993/**
994 * ice_sched_get_qgrp_layer - get the current queue group layer number
995 * @hw: pointer to the HW struct
996 *
997 * This function returns the current queue group layer number
998 */
999static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw)
1000{
1001 /* It's always total layers - 1, the array is 0 relative so -2 */
1002 return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET;
1003}
1004
1005/**
1006 * ice_sched_get_vsi_layer - get the current VSI layer number
1007 * @hw: pointer to the HW struct
1008 *
1009 * This function returns the current VSI layer number
1010 */
1011static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
1012{
1013 /* Num Layers VSI layer
1014 * 9 6
1015 * 7 4
1016 * 5 or less sw_entry_point_layer
1017 */
1018 /* calculate the VSI layer based on number of layers. */
1019 if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) {
1020 u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
1021
1022 if (layer > hw->sw_entry_point_layer)
1023 return layer;
1024 }
1025 return hw->sw_entry_point_layer;
1026}
1027
1028/**
1029 * ice_rm_dflt_leaf_node - remove the default leaf node in the tree
1030 * @pi: port information structure
1031 *
1032 * This function removes the leaf node that was created by the FW
1033 * during initialization
1034 */
1035static void ice_rm_dflt_leaf_node(struct ice_port_info *pi)
1036{
1037 struct ice_sched_node *node;
1038
1039 node = pi->root;
1040 while (node) {
1041 if (!node->num_children)
1042 break;
1043 node = node->children[0];
1044 }
1045 if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) {
1046 u32 teid = le32_to_cpu(node->info.node_teid);
1047 enum ice_status status;
1048
1049 /* remove the default leaf node */
1050 status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid);
1051 if (!status)
1052 ice_free_sched_node(pi, node);
1053 }
1054}
1055
1056/**
1057 * ice_sched_rm_dflt_nodes - free the default nodes in the tree
1058 * @pi: port information structure
1059 *
1060 * This function frees all the nodes except root and TC that were created by
1061 * the FW during initialization
1062 */
1063static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
1064{
1065 struct ice_sched_node *node;
1066
1067 ice_rm_dflt_leaf_node(pi);
1068
1069 /* remove the default nodes except TC and root nodes */
1070 node = pi->root;
1071 while (node) {
1072 if (node->tx_sched_layer >= pi->hw->sw_entry_point_layer &&
1073 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
1074 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT) {
1075 ice_free_sched_node(pi, node);
1076 break;
1077 }
1078
1079 if (!node->num_children)
1080 break;
1081 node = node->children[0];
1082 }
1083}
1084
1085/**
1086 * ice_sched_init_port - Initialize scheduler by querying information from FW
1087 * @pi: port info structure for the tree to cleanup
1088 *
1089 * This function is the initial call to find the total number of Tx scheduler
1090 * resources, default topology created by firmware and storing the information
1091 * in SW DB.
1092 */
1093enum ice_status ice_sched_init_port(struct ice_port_info *pi)
1094{
1095 struct ice_aqc_get_topo_elem *buf;
1096 enum ice_status status;
1097 struct ice_hw *hw;
1098 u8 num_branches;
1099 u16 num_elems;
1100 u8 i, j;
1101
1102 if (!pi)
1103 return ICE_ERR_PARAM;
1104 hw = pi->hw;
1105
1106 /* Query the Default Topology from FW */
1107 buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
1108 if (!buf)
1109 return ICE_ERR_NO_MEMORY;
1110
1111 /* Query default scheduling tree topology */
1112 status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN,
1113 &num_branches, NULL);
1114 if (status)
1115 goto err_init_port;
1116
1117 /* num_branches should be between 1-8 */
1118 if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) {
1119 ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n",
1120 num_branches);
1121 status = ICE_ERR_PARAM;
1122 goto err_init_port;
1123 }
1124
1125 /* get the number of elements on the default/first branch */
1126 num_elems = le16_to_cpu(buf[0].hdr.num_elems);
1127
1128 /* num_elems should always be between 1-9 */
1129 if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) {
1130 ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n",
1131 num_elems);
1132 status = ICE_ERR_PARAM;
1133 goto err_init_port;
1134 }
1135
1136 /* If the last node is a leaf node then the index of the queue group
1137 * layer is two less than the number of elements.
1138 */
1139 if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type ==
1140 ICE_AQC_ELEM_TYPE_LEAF)
1141 pi->last_node_teid =
1142 le32_to_cpu(buf[0].generic[num_elems - 2].node_teid);
1143 else
1144 pi->last_node_teid =
1145 le32_to_cpu(buf[0].generic[num_elems - 1].node_teid);
1146
1147 /* Insert the Tx Sched root node */
1148 status = ice_sched_add_root_node(pi, &buf[0].generic[0]);
1149 if (status)
1150 goto err_init_port;
1151
1152 /* Parse the default tree and cache the information */
1153 for (i = 0; i < num_branches; i++) {
1154 num_elems = le16_to_cpu(buf[i].hdr.num_elems);
1155
1156 /* Skip root element as already inserted */
1157 for (j = 1; j < num_elems; j++) {
1158 /* update the sw entry point */
1159 if (buf[0].generic[j].data.elem_type ==
1160 ICE_AQC_ELEM_TYPE_ENTRY_POINT)
1161 hw->sw_entry_point_layer = j;
1162
1163 status = ice_sched_add_node(pi, j, &buf[i].generic[j]);
1164 if (status)
1165 goto err_init_port;
1166 }
1167 }
1168
1169 /* Remove the default nodes. */
1170 if (pi->root)
1171 ice_sched_rm_dflt_nodes(pi);
1172
1173 /* initialize the port for handling the scheduler tree */
1174 pi->port_state = ICE_SCHED_PORT_STATE_READY;
1175 mutex_init(&pi->sched_lock);
1176 for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
1177 INIT_LIST_HEAD(&pi->rl_prof_list[i]);
1178
1179err_init_port:
1180 if (status && pi->root) {
1181 ice_free_sched_node(pi, pi->root);
1182 pi->root = NULL;
1183 }
1184
1185 devm_kfree(ice_hw_to_dev(hw), buf);
1186 return status;
1187}
1188
1189/**
1190 * ice_sched_query_res_alloc - query the FW for num of logical sched layers
1191 * @hw: pointer to the HW struct
1192 *
1193 * query FW for allocated scheduler resources and store in HW struct
1194 */
1195enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
1196{
1197 struct ice_aqc_query_txsched_res_resp *buf;
1198 enum ice_status status = 0;
1199 __le16 max_sibl;
1200 u16 i;
1201
1202 if (hw->layer_info)
1203 return status;
1204
1205 buf = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*buf), GFP_KERNEL);
1206 if (!buf)
1207 return ICE_ERR_NO_MEMORY;
1208
1209 status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL);
1210 if (status)
1211 goto sched_query_out;
1212
1213 hw->num_tx_sched_layers = le16_to_cpu(buf->sched_props.logical_levels);
1214 hw->num_tx_sched_phys_layers =
1215 le16_to_cpu(buf->sched_props.phys_levels);
1216 hw->flattened_layers = buf->sched_props.flattening_bitmap;
1217 hw->max_cgds = buf->sched_props.max_pf_cgds;
1218
1219 /* max sibling group size of current layer refers to the max children
1220 * of the below layer node.
1221 * layer 1 node max children will be layer 2 max sibling group size
1222 * layer 2 node max children will be layer 3 max sibling group size
1223 * and so on. This array will be populated from root (index 0) to
1224 * qgroup layer 7. Leaf node has no children.
1225 */
1226 for (i = 0; i < hw->num_tx_sched_layers - 1; i++) {
1227 max_sibl = buf->layer_props[i + 1].max_sibl_grp_sz;
1228 hw->max_children[i] = le16_to_cpu(max_sibl);
1229 }
1230
1231 hw->layer_info = devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props,
1232 (hw->num_tx_sched_layers *
1233 sizeof(*hw->layer_info)),
1234 GFP_KERNEL);
1235 if (!hw->layer_info) {
1236 status = ICE_ERR_NO_MEMORY;
1237 goto sched_query_out;
1238 }
1239
1240sched_query_out:
1241 devm_kfree(ice_hw_to_dev(hw), buf);
1242 return status;
1243}
1244
1245/**
1246 * ice_sched_find_node_in_subtree - Find node in part of base node subtree
1247 * @hw: pointer to the HW struct
1248 * @base: pointer to the base node
1249 * @node: pointer to the node to search
1250 *
1251 * This function checks whether a given node is part of the base node
1252 * subtree or not
1253 */
1254static bool
1255ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
1256 struct ice_sched_node *node)
1257{
1258 u8 i;
1259
1260 for (i = 0; i < base->num_children; i++) {
1261 struct ice_sched_node *child = base->children[i];
1262
1263 if (node == child)
1264 return true;
1265
1266 if (child->tx_sched_layer > node->tx_sched_layer)
1267 return false;
1268
1269 /* this recursion is intentional, and wouldn't
1270 * go more than 8 calls
1271 */
1272 if (ice_sched_find_node_in_subtree(hw, child, node))
1273 return true;
1274 }
1275 return false;
1276}
1277
1278/**
1279 * ice_sched_get_free_qgrp - Scan all queue group siblings and find a free node
1280 * @pi: port information structure
1281 * @vsi_node: software VSI handle
1282 * @qgrp_node: first queue group node identified for scanning
1283 * @owner: LAN or RDMA
1284 *
1285 * This function retrieves a free LAN or RDMA queue group node by scanning
1286 * qgrp_node and its siblings for the queue group with the fewest number
1287 * of queues currently assigned.
1288 */
1289static struct ice_sched_node *
1290ice_sched_get_free_qgrp(struct ice_port_info *pi,
1291 struct ice_sched_node *vsi_node,
1292 struct ice_sched_node *qgrp_node, u8 owner)
1293{
1294 struct ice_sched_node *min_qgrp;
1295 u8 min_children;
1296
1297 if (!qgrp_node)
1298 return qgrp_node;
1299 min_children = qgrp_node->num_children;
1300 if (!min_children)
1301 return qgrp_node;
1302 min_qgrp = qgrp_node;
1303 /* scan all queue groups until find a node which has less than the
1304 * minimum number of children. This way all queue group nodes get
1305 * equal number of shares and active. The bandwidth will be equally
1306 * distributed across all queues.
1307 */
1308 while (qgrp_node) {
1309 /* make sure the qgroup node is part of the VSI subtree */
1310 if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
1311 if (qgrp_node->num_children < min_children &&
1312 qgrp_node->owner == owner) {
1313 /* replace the new min queue group node */
1314 min_qgrp = qgrp_node;
1315 min_children = min_qgrp->num_children;
1316 /* break if it has no children, */
1317 if (!min_children)
1318 break;
1319 }
1320 qgrp_node = qgrp_node->sibling;
1321 }
1322 return min_qgrp;
1323}
1324
1325/**
1326 * ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node
1327 * @pi: port information structure
1328 * @vsi_handle: software VSI handle
1329 * @tc: branch number
1330 * @owner: LAN or RDMA
1331 *
1332 * This function retrieves a free LAN or RDMA queue group node
1333 */
1334struct ice_sched_node *
1335ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
1336 u8 owner)
1337{
1338 struct ice_sched_node *vsi_node, *qgrp_node;
1339 struct ice_vsi_ctx *vsi_ctx;
1340 u16 max_children;
1341 u8 qgrp_layer;
1342
1343 qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
1344 max_children = pi->hw->max_children[qgrp_layer];
1345
1346 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
1347 if (!vsi_ctx)
1348 return NULL;
1349 vsi_node = vsi_ctx->sched.vsi_node[tc];
1350 /* validate invalid VSI ID */
1351 if (!vsi_node)
1352 return NULL;
1353
1354 /* get the first queue group node from VSI sub-tree */
1355 qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
1356 while (qgrp_node) {
1357 /* make sure the qgroup node is part of the VSI subtree */
1358 if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
1359 if (qgrp_node->num_children < max_children &&
1360 qgrp_node->owner == owner)
1361 break;
1362 qgrp_node = qgrp_node->sibling;
1363 }
1364
1365 /* Select the best queue group */
1366 return ice_sched_get_free_qgrp(pi, vsi_node, qgrp_node, owner);
1367}
1368
1369/**
1370 * ice_sched_get_vsi_node - Get a VSI node based on VSI ID
1371 * @hw: pointer to the HW struct
1372 * @tc_node: pointer to the TC node
1373 * @vsi_handle: software VSI handle
1374 *
1375 * This function retrieves a VSI node for a given VSI ID from a given
1376 * TC branch
1377 */
1378static struct ice_sched_node *
1379ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node,
1380 u16 vsi_handle)
1381{
1382 struct ice_sched_node *node;
1383 u8 vsi_layer;
1384
1385 vsi_layer = ice_sched_get_vsi_layer(hw);
1386 node = ice_sched_get_first_node(hw->port_info, tc_node, vsi_layer);
1387
1388 /* Check whether it already exists */
1389 while (node) {
1390 if (node->vsi_handle == vsi_handle)
1391 return node;
1392 node = node->sibling;
1393 }
1394
1395 return node;
1396}
1397
1398/**
1399 * ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes
1400 * @hw: pointer to the HW struct
1401 * @num_qs: number of queues
1402 * @num_nodes: num nodes array
1403 *
1404 * This function calculates the number of VSI child nodes based on the
1405 * number of queues.
1406 */
1407static void
1408ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
1409{
1410 u16 num = num_qs;
1411 u8 i, qgl, vsil;
1412
1413 qgl = ice_sched_get_qgrp_layer(hw);
1414 vsil = ice_sched_get_vsi_layer(hw);
1415
1416 /* calculate num nodes from queue group to VSI layer */
1417 for (i = qgl; i > vsil; i--) {
1418 /* round to the next integer if there is a remainder */
1419 num = DIV_ROUND_UP(num, hw->max_children[i]);
1420
1421 /* need at least one node */
1422 num_nodes[i] = num ? num : 1;
1423 }
1424}
1425
1426/**
1427 * ice_sched_add_vsi_child_nodes - add VSI child nodes to tree
1428 * @pi: port information structure
1429 * @vsi_handle: software VSI handle
1430 * @tc_node: pointer to the TC node
1431 * @num_nodes: pointer to the num nodes that needs to be added per layer
1432 * @owner: node owner (LAN or RDMA)
1433 *
1434 * This function adds the VSI child nodes to tree. It gets called for
1435 * LAN and RDMA separately.
1436 */
1437static enum ice_status
1438ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
1439 struct ice_sched_node *tc_node, u16 *num_nodes,
1440 u8 owner)
1441{
1442 struct ice_sched_node *parent, *node;
1443 struct ice_hw *hw = pi->hw;
1444 enum ice_status status;
1445 u32 first_node_teid;
1446 u16 num_added = 0;
1447 u8 i, qgl, vsil;
1448
1449 qgl = ice_sched_get_qgrp_layer(hw);
1450 vsil = ice_sched_get_vsi_layer(hw);
1451 parent = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
1452 for (i = vsil + 1; i <= qgl; i++) {
1453 if (!parent)
1454 return ICE_ERR_CFG;
1455
1456 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
1457 num_nodes[i],
1458 &first_node_teid,
1459 &num_added);
1460 if (status || num_nodes[i] != num_added)
1461 return ICE_ERR_CFG;
1462
1463 /* The newly added node can be a new parent for the next
1464 * layer nodes
1465 */
1466 if (num_added) {
1467 parent = ice_sched_find_node_by_teid(tc_node,
1468 first_node_teid);
1469 node = parent;
1470 while (node) {
1471 node->owner = owner;
1472 node = node->sibling;
1473 }
1474 } else {
1475 parent = parent->children[0];
1476 }
1477 }
1478
1479 return 0;
1480}
1481
1482/**
1483 * ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes
1484 * @hw: pointer to the HW struct
1485 * @tc_node: pointer to TC node
1486 * @num_nodes: pointer to num nodes array
1487 *
1488 * This function calculates the number of supported nodes needed to add this
1489 * VSI into Tx tree including the VSI, parent and intermediate nodes in below
1490 * layers
1491 */
1492static void
1493ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
1494 struct ice_sched_node *tc_node, u16 *num_nodes)
1495{
1496 struct ice_sched_node *node;
1497 u8 vsil;
1498 int i;
1499
1500 vsil = ice_sched_get_vsi_layer(hw);
1501 for (i = vsil; i >= hw->sw_entry_point_layer; i--)
1502 /* Add intermediate nodes if TC has no children and
1503 * need at least one node for VSI
1504 */
1505 if (!tc_node->num_children || i == vsil) {
1506 num_nodes[i]++;
1507 } else {
1508 /* If intermediate nodes are reached max children
1509 * then add a new one.
1510 */
1511 node = ice_sched_get_first_node(hw->port_info, tc_node,
1512 (u8)i);
1513 /* scan all the siblings */
1514 while (node) {
1515 if (node->num_children < hw->max_children[i])
1516 break;
1517 node = node->sibling;
1518 }
1519
1520 /* tree has one intermediate node to add this new VSI.
1521 * So no need to calculate supported nodes for below
1522 * layers.
1523 */
1524 if (node)
1525 break;
1526 /* all the nodes are full, allocate a new one */
1527 num_nodes[i]++;
1528 }
1529}
1530
1531/**
1532 * ice_sched_add_vsi_support_nodes - add VSI supported nodes into Tx tree
1533 * @pi: port information structure
1534 * @vsi_handle: software VSI handle
1535 * @tc_node: pointer to TC node
1536 * @num_nodes: pointer to num nodes array
1537 *
1538 * This function adds the VSI supported nodes into Tx tree including the
1539 * VSI, its parent and intermediate nodes in below layers
1540 */
1541static enum ice_status
1542ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
1543 struct ice_sched_node *tc_node, u16 *num_nodes)
1544{
1545 struct ice_sched_node *parent = tc_node;
1546 enum ice_status status;
1547 u32 first_node_teid;
1548 u16 num_added = 0;
1549 u8 i, vsil;
1550
1551 if (!pi)
1552 return ICE_ERR_PARAM;
1553
1554 vsil = ice_sched_get_vsi_layer(pi->hw);
1555 for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
1556 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
1557 i, num_nodes[i],
1558 &first_node_teid,
1559 &num_added);
1560 if (status || num_nodes[i] != num_added)
1561 return ICE_ERR_CFG;
1562
1563 /* The newly added node can be a new parent for the next
1564 * layer nodes
1565 */
1566 if (num_added)
1567 parent = ice_sched_find_node_by_teid(tc_node,
1568 first_node_teid);
1569 else
1570 parent = parent->children[0];
1571
1572 if (!parent)
1573 return ICE_ERR_CFG;
1574
1575 if (i == vsil)
1576 parent->vsi_handle = vsi_handle;
1577 }
1578
1579 return 0;
1580}
1581
1582/**
1583 * ice_sched_add_vsi_to_topo - add a new VSI into tree
1584 * @pi: port information structure
1585 * @vsi_handle: software VSI handle
1586 * @tc: TC number
1587 *
1588 * This function adds a new VSI into scheduler tree
1589 */
1590static enum ice_status
1591ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
1592{
1593 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
1594 struct ice_sched_node *tc_node;
1595 struct ice_hw *hw = pi->hw;
1596
1597 tc_node = ice_sched_get_tc_node(pi, tc);
1598 if (!tc_node)
1599 return ICE_ERR_PARAM;
1600
1601 /* calculate number of supported nodes needed for this VSI */
1602 ice_sched_calc_vsi_support_nodes(hw, tc_node, num_nodes);
1603
1604 /* add VSI supported nodes to TC subtree */
1605 return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node,
1606 num_nodes);
1607}
1608
1609/**
1610 * ice_sched_update_vsi_child_nodes - update VSI child nodes
1611 * @pi: port information structure
1612 * @vsi_handle: software VSI handle
1613 * @tc: TC number
1614 * @new_numqs: new number of max queues
1615 * @owner: owner of this subtree
1616 *
1617 * This function updates the VSI child nodes based on the number of queues
1618 */
1619static enum ice_status
1620ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
1621 u8 tc, u16 new_numqs, u8 owner)
1622{
1623 u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
1624 struct ice_sched_node *vsi_node;
1625 struct ice_sched_node *tc_node;
1626 struct ice_vsi_ctx *vsi_ctx;
1627 enum ice_status status = 0;
1628 struct ice_hw *hw = pi->hw;
1629 u16 prev_numqs;
1630
1631 tc_node = ice_sched_get_tc_node(pi, tc);
1632 if (!tc_node)
1633 return ICE_ERR_CFG;
1634
1635 vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
1636 if (!vsi_node)
1637 return ICE_ERR_CFG;
1638
1639 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1640 if (!vsi_ctx)
1641 return ICE_ERR_PARAM;
1642
1643 prev_numqs = vsi_ctx->sched.max_lanq[tc];
1644 /* num queues are not changed or less than the previous number */
1645 if (new_numqs <= prev_numqs)
1646 return status;
1647 status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);
1648 if (status)
1649 return status;
1650
1651 if (new_numqs)
1652 ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
1653 /* Keep the max number of queue configuration all the time. Update the
1654 * tree only if number of queues > previous number of queues. This may
1655 * leave some extra nodes in the tree if number of queues < previous
1656 * number but that wouldn't harm anything. Removing those extra nodes
1657 * may complicate the code if those nodes are part of SRL or
1658 * individually rate limited.
1659 */
1660 status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
1661 new_num_nodes, owner);
1662 if (status)
1663 return status;
1664 vsi_ctx->sched.max_lanq[tc] = new_numqs;
1665
1666 return 0;
1667}
1668
1669/**
1670 * ice_sched_cfg_vsi - configure the new/existing VSI
1671 * @pi: port information structure
1672 * @vsi_handle: software VSI handle
1673 * @tc: TC number
1674 * @maxqs: max number of queues
1675 * @owner: LAN or RDMA
1676 * @enable: TC enabled or disabled
1677 *
1678 * This function adds/updates VSI nodes based on the number of queues. If TC is
1679 * enabled and VSI is in suspended state then resume the VSI back. If TC is
1680 * disabled then suspend the VSI if it is not already.
1681 */
1682enum ice_status
1683ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
1684 u8 owner, bool enable)
1685{
1686 struct ice_sched_node *vsi_node, *tc_node;
1687 struct ice_vsi_ctx *vsi_ctx;
1688 enum ice_status status = 0;
1689 struct ice_hw *hw = pi->hw;
1690
1691 ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle);
1692 tc_node = ice_sched_get_tc_node(pi, tc);
1693 if (!tc_node)
1694 return ICE_ERR_PARAM;
1695 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1696 if (!vsi_ctx)
1697 return ICE_ERR_PARAM;
1698 vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
1699
1700 /* suspend the VSI if TC is not enabled */
1701 if (!enable) {
1702 if (vsi_node && vsi_node->in_use) {
1703 u32 teid = le32_to_cpu(vsi_node->info.node_teid);
1704
1705 status = ice_sched_suspend_resume_elems(hw, 1, &teid,
1706 true);
1707 if (!status)
1708 vsi_node->in_use = false;
1709 }
1710 return status;
1711 }
1712
1713 /* TC is enabled, if it is a new VSI then add it to the tree */
1714 if (!vsi_node) {
1715 status = ice_sched_add_vsi_to_topo(pi, vsi_handle, tc);
1716 if (status)
1717 return status;
1718
1719 vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
1720 if (!vsi_node)
1721 return ICE_ERR_CFG;
1722
1723 vsi_ctx->sched.vsi_node[tc] = vsi_node;
1724 vsi_node->in_use = true;
1725 /* invalidate the max queues whenever VSI gets added first time
1726 * into the scheduler tree (boot or after reset). We need to
1727 * recreate the child nodes all the time in these cases.
1728 */
1729 vsi_ctx->sched.max_lanq[tc] = 0;
1730 }
1731
1732 /* update the VSI child nodes */
1733 status = ice_sched_update_vsi_child_nodes(pi, vsi_handle, tc, maxqs,
1734 owner);
1735 if (status)
1736 return status;
1737
1738 /* TC is enabled, resume the VSI if it is in the suspend state */
1739 if (!vsi_node->in_use) {
1740 u32 teid = le32_to_cpu(vsi_node->info.node_teid);
1741
1742 status = ice_sched_suspend_resume_elems(hw, 1, &teid, false);
1743 if (!status)
1744 vsi_node->in_use = true;
1745 }
1746
1747 return status;
1748}
1749
1750/**
1751 * ice_sched_rm_agg_vsi_entry - remove aggregator related VSI info entry
1752 * @pi: port information structure
1753 * @vsi_handle: software VSI handle
1754 *
1755 * This function removes single aggregator VSI info entry from
1756 * aggregator list.
1757 */
1758static void ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
1759{
1760 struct ice_sched_agg_info *agg_info;
1761 struct ice_sched_agg_info *atmp;
1762
1763 list_for_each_entry_safe(agg_info, atmp, &pi->hw->agg_list,
1764 list_entry) {
1765 struct ice_sched_agg_vsi_info *agg_vsi_info;
1766 struct ice_sched_agg_vsi_info *vtmp;
1767
1768 list_for_each_entry_safe(agg_vsi_info, vtmp,
1769 &agg_info->agg_vsi_list, list_entry)
1770 if (agg_vsi_info->vsi_handle == vsi_handle) {
1771 list_del(&agg_vsi_info->list_entry);
1772 devm_kfree(ice_hw_to_dev(pi->hw),
1773 agg_vsi_info);
1774 return;
1775 }
1776 }
1777}
1778
1779/**
1780 * ice_sched_is_leaf_node_present - check for a leaf node in the sub-tree
1781 * @node: pointer to the sub-tree node
1782 *
1783 * This function checks for a leaf node presence in a given sub-tree node.
1784 */
1785static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node)
1786{
1787 u8 i;
1788
1789 for (i = 0; i < node->num_children; i++)
1790 if (ice_sched_is_leaf_node_present(node->children[i]))
1791 return true;
1792 /* check for a leaf node */
1793 return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF);
1794}
1795
1796/**
1797 * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes
1798 * @pi: port information structure
1799 * @vsi_handle: software VSI handle
1800 * @owner: LAN or RDMA
1801 *
1802 * This function removes the VSI and its LAN or RDMA children nodes from the
1803 * scheduler tree.
1804 */
1805static enum ice_status
1806ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
1807{
1808 enum ice_status status = ICE_ERR_PARAM;
1809 struct ice_vsi_ctx *vsi_ctx;
1810 u8 i;
1811
1812 ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle);
1813 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
1814 return status;
1815 mutex_lock(&pi->sched_lock);
1816 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
1817 if (!vsi_ctx)
1818 goto exit_sched_rm_vsi_cfg;
1819
1820 ice_for_each_traffic_class(i) {
1821 struct ice_sched_node *vsi_node, *tc_node;
1822 u8 j = 0;
1823
1824 tc_node = ice_sched_get_tc_node(pi, i);
1825 if (!tc_node)
1826 continue;
1827
1828 vsi_node = ice_sched_get_vsi_node(pi->hw, tc_node, vsi_handle);
1829 if (!vsi_node)
1830 continue;
1831
1832 if (ice_sched_is_leaf_node_present(vsi_node)) {
1833 ice_debug(pi->hw, ICE_DBG_SCHED,
1834 "VSI has leaf nodes in TC %d\n", i);
1835 status = ICE_ERR_IN_USE;
1836 goto exit_sched_rm_vsi_cfg;
1837 }
1838 while (j < vsi_node->num_children) {
1839 if (vsi_node->children[j]->owner == owner) {
1840 ice_free_sched_node(pi, vsi_node->children[j]);
1841
1842 /* reset the counter again since the num
1843 * children will be updated after node removal
1844 */
1845 j = 0;
1846 } else {
1847 j++;
1848 }
1849 }
1850 /* remove the VSI if it has no children */
1851 if (!vsi_node->num_children) {
1852 ice_free_sched_node(pi, vsi_node);
1853 vsi_ctx->sched.vsi_node[i] = NULL;
1854
1855 /* clean up aggregator related VSI info if any */
1856 ice_sched_rm_agg_vsi_info(pi, vsi_handle);
1857 }
1858 if (owner == ICE_SCHED_NODE_OWNER_LAN)
1859 vsi_ctx->sched.max_lanq[i] = 0;
1860 }
1861 status = 0;
1862
1863exit_sched_rm_vsi_cfg:
1864 mutex_unlock(&pi->sched_lock);
1865 return status;
1866}
1867
1868/**
1869 * ice_rm_vsi_lan_cfg - remove VSI and its LAN children nodes
1870 * @pi: port information structure
1871 * @vsi_handle: software VSI handle
1872 *
1873 * This function clears the VSI and its LAN children nodes from scheduler tree
1874 * for all TCs.
1875 */
1876enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
1877{
1878 return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
1879}
1880
1881/**
1882 * ice_sched_rm_unused_rl_prof - remove unused RL profile
1883 * @pi: port information structure
1884 *
1885 * This function removes unused rate limit profiles from the HW and
1886 * SW DB. The caller needs to hold scheduler lock.
1887 */
1888static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi)
1889{
1890 u16 ln;
1891
1892 for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {
1893 struct ice_aqc_rl_profile_info *rl_prof_elem;
1894 struct ice_aqc_rl_profile_info *rl_prof_tmp;
1895
1896 list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp,
1897 &pi->rl_prof_list[ln], list_entry) {
1898 if (!ice_sched_del_rl_profile(pi->hw, rl_prof_elem))
1899 ice_debug(pi->hw, ICE_DBG_SCHED,
1900 "Removed rl profile\n");
1901 }
1902 }
1903}
1904
1905/**
1906 * ice_sched_update_elem - update element
1907 * @hw: pointer to the HW struct
1908 * @node: pointer to node
1909 * @info: node info to update
1910 *
1911 * Update the HW DB, and local SW DB of node. Update the scheduling
1912 * parameters of node from argument info data buffer (Info->data buf) and
1913 * returns success or error on config sched element failure. The caller
1914 * needs to hold scheduler lock.
1915 */
1916static enum ice_status
1917ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
1918 struct ice_aqc_txsched_elem_data *info)
1919{
1920 struct ice_aqc_txsched_elem_data buf;
1921 enum ice_status status;
1922 u16 elem_cfgd = 0;
1923 u16 num_elems = 1;
1924
1925 buf = *info;
1926 /* Parent TEID is reserved field in this aq call */
1927 buf.parent_teid = 0;
1928 /* Element type is reserved field in this aq call */
1929 buf.data.elem_type = 0;
1930 /* Flags is reserved field in this aq call */
1931 buf.data.flags = 0;
1932
1933 /* Update HW DB */
1934 /* Configure element node */
1935 status = ice_aq_cfg_sched_elems(hw, num_elems, &buf, sizeof(buf),
1936 &elem_cfgd, NULL);
1937 if (status || elem_cfgd != num_elems) {
1938 ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n");
1939 return ICE_ERR_CFG;
1940 }
1941
1942 /* Config success case */
1943 /* Now update local SW DB */
1944 /* Only copy the data portion of info buffer */
1945 node->info.data = info->data;
1946 return status;
1947}
1948
1949/**
1950 * ice_sched_cfg_node_bw_alloc - configure node BW weight/alloc params
1951 * @hw: pointer to the HW struct
1952 * @node: sched node to configure
1953 * @rl_type: rate limit type CIR, EIR, or shared
1954 * @bw_alloc: BW weight/allocation
1955 *
1956 * This function configures node element's BW allocation.
1957 */
1958static enum ice_status
1959ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
1960 enum ice_rl_type rl_type, u16 bw_alloc)
1961{
1962 struct ice_aqc_txsched_elem_data buf;
1963 struct ice_aqc_txsched_elem *data;
1964 enum ice_status status;
1965
1966 buf = node->info;
1967 data = &buf.data;
1968 if (rl_type == ICE_MIN_BW) {
1969 data->valid_sections |= ICE_AQC_ELEM_VALID_CIR;
1970 data->cir_bw.bw_alloc = cpu_to_le16(bw_alloc);
1971 } else if (rl_type == ICE_MAX_BW) {
1972 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
1973 data->eir_bw.bw_alloc = cpu_to_le16(bw_alloc);
1974 } else {
1975 return ICE_ERR_PARAM;
1976 }
1977
1978 /* Configure element */
1979 status = ice_sched_update_elem(hw, node, &buf);
1980 return status;
1981}
1982
1983/**
1984 * ice_set_clear_cir_bw - set or clear CIR BW
1985 * @bw_t_info: bandwidth type information structure
1986 * @bw: bandwidth in Kbps - Kilo bits per sec
1987 *
1988 * Save or clear CIR bandwidth (BW) in the passed param bw_t_info.
1989 */
1990static void ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
1991{
1992 if (bw == ICE_SCHED_DFLT_BW) {
1993 clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
1994 bw_t_info->cir_bw.bw = 0;
1995 } else {
1996 /* Save type of BW information */
1997 set_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
1998 bw_t_info->cir_bw.bw = bw;
1999 }
2000}
2001
2002/**
2003 * ice_set_clear_eir_bw - set or clear EIR BW
2004 * @bw_t_info: bandwidth type information structure
2005 * @bw: bandwidth in Kbps - Kilo bits per sec
2006 *
2007 * Save or clear EIR bandwidth (BW) in the passed param bw_t_info.
2008 */
2009static void ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
2010{
2011 if (bw == ICE_SCHED_DFLT_BW) {
2012 clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
2013 bw_t_info->eir_bw.bw = 0;
2014 } else {
2015 /* EIR BW and Shared BW profiles are mutually exclusive and
2016 * hence only one of them may be set for any given element.
2017 * First clear earlier saved shared BW information.
2018 */
2019 clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
2020 bw_t_info->shared_bw = 0;
2021 /* save EIR BW information */
2022 set_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
2023 bw_t_info->eir_bw.bw = bw;
2024 }
2025}
2026
2027/**
2028 * ice_set_clear_shared_bw - set or clear shared BW
2029 * @bw_t_info: bandwidth type information structure
2030 * @bw: bandwidth in Kbps - Kilo bits per sec
2031 *
2032 * Save or clear shared bandwidth (BW) in the passed param bw_t_info.
2033 */
2034static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
2035{
2036 if (bw == ICE_SCHED_DFLT_BW) {
2037 clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
2038 bw_t_info->shared_bw = 0;
2039 } else {
2040 /* EIR BW and Shared BW profiles are mutually exclusive and
2041 * hence only one of them may be set for any given element.
2042 * First clear earlier saved EIR BW information.
2043 */
2044 clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
2045 bw_t_info->eir_bw.bw = 0;
2046 /* save shared BW information */
2047 set_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
2048 bw_t_info->shared_bw = bw;
2049 }
2050}
2051
2052/**
2053 * ice_sched_calc_wakeup - calculate RL profile wakeup parameter
2054 * @bw: bandwidth in Kbps
2055 *
2056 * This function calculates the wakeup parameter of RL profile.
2057 */
2058static u16 ice_sched_calc_wakeup(s32 bw)
2059{
2060 s64 bytes_per_sec, wakeup_int, wakeup_a, wakeup_b, wakeup_f;
2061 s32 wakeup_f_int;
2062 u16 wakeup = 0;
2063
2064 /* Get the wakeup integer value */
2065 bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE);
2066 wakeup_int = div64_long(ICE_RL_PROF_FREQUENCY, bytes_per_sec);
2067 if (wakeup_int > 63) {
2068 wakeup = (u16)((1 << 15) | wakeup_int);
2069 } else {
2070 /* Calculate fraction value up to 4 decimals
2071 * Convert Integer value to a constant multiplier
2072 */
2073 wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int;
2074 wakeup_a = div64_long((s64)ICE_RL_PROF_MULTIPLIER *
2075 ICE_RL_PROF_FREQUENCY,
2076 bytes_per_sec);
2077
2078 /* Get Fraction value */
2079 wakeup_f = wakeup_a - wakeup_b;
2080
2081 /* Round up the Fractional value via Ceil(Fractional value) */
2082 if (wakeup_f > div64_long(ICE_RL_PROF_MULTIPLIER, 2))
2083 wakeup_f += 1;
2084
2085 wakeup_f_int = (s32)div64_long(wakeup_f * ICE_RL_PROF_FRACTION,
2086 ICE_RL_PROF_MULTIPLIER);
2087 wakeup |= (u16)(wakeup_int << 9);
2088 wakeup |= (u16)(0x1ff & wakeup_f_int);
2089 }
2090
2091 return wakeup;
2092}
2093
2094/**
2095 * ice_sched_bw_to_rl_profile - convert BW to profile parameters
2096 * @bw: bandwidth in Kbps
2097 * @profile: profile parameters to return
2098 *
2099 * This function converts the BW to profile structure format.
2100 */
2101static enum ice_status
2102ice_sched_bw_to_rl_profile(u32 bw, struct ice_aqc_rl_profile_elem *profile)
2103{
2104 enum ice_status status = ICE_ERR_PARAM;
2105 s64 bytes_per_sec, ts_rate, mv_tmp;
2106 bool found = false;
2107 s32 encode = 0;
2108 s64 mv = 0;
2109 s32 i;
2110
2111 /* Bw settings range is from 0.5Mb/sec to 100Gb/sec */
2112 if (bw < ICE_SCHED_MIN_BW || bw > ICE_SCHED_MAX_BW)
2113 return status;
2114
2115 /* Bytes per second from Kbps */
2116 bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE);
2117
2118 /* encode is 6 bits but really useful are 5 bits */
2119 for (i = 0; i < 64; i++) {
2120 u64 pow_result = BIT_ULL(i);
2121
2122 ts_rate = div64_long((s64)ICE_RL_PROF_FREQUENCY,
2123 pow_result * ICE_RL_PROF_TS_MULTIPLIER);
2124 if (ts_rate <= 0)
2125 continue;
2126
2127 /* Multiplier value */
2128 mv_tmp = div64_long(bytes_per_sec * ICE_RL_PROF_MULTIPLIER,
2129 ts_rate);
2130
2131 /* Round to the nearest ICE_RL_PROF_MULTIPLIER */
2132 mv = round_up_64bit(mv_tmp, ICE_RL_PROF_MULTIPLIER);
2133
2134 /* First multiplier value greater than the given
2135 * accuracy bytes
2136 */
2137 if (mv > ICE_RL_PROF_ACCURACY_BYTES) {
2138 encode = i;
2139 found = true;
2140 break;
2141 }
2142 }
2143 if (found) {
2144 u16 wm;
2145
2146 wm = ice_sched_calc_wakeup(bw);
2147 profile->rl_multiply = cpu_to_le16(mv);
2148 profile->wake_up_calc = cpu_to_le16(wm);
2149 profile->rl_encode = cpu_to_le16(encode);
2150 status = 0;
2151 } else {
2152 status = ICE_ERR_DOES_NOT_EXIST;
2153 }
2154
2155 return status;
2156}
2157
2158/**
2159 * ice_sched_add_rl_profile - add RL profile
2160 * @pi: port information structure
2161 * @rl_type: type of rate limit BW - min, max, or shared
2162 * @bw: bandwidth in Kbps - Kilo bits per sec
2163 * @layer_num: specifies in which layer to create profile
2164 *
2165 * This function first checks the existing list for corresponding BW
2166 * parameter. If it exists, it returns the associated profile otherwise
2167 * it creates a new rate limit profile for requested BW, and adds it to
2168 * the HW DB and local list. It returns the new profile or null on error.
2169 * The caller needs to hold the scheduler lock.
2170 */
2171static struct ice_aqc_rl_profile_info *
2172ice_sched_add_rl_profile(struct ice_port_info *pi,
2173 enum ice_rl_type rl_type, u32 bw, u8 layer_num)
2174{
2175 struct ice_aqc_rl_profile_info *rl_prof_elem;
2176 u16 profiles_added = 0, num_profiles = 1;
2177 struct ice_aqc_rl_profile_elem *buf;
2178 enum ice_status status;
2179 struct ice_hw *hw;
2180 u8 profile_type;
2181
2182 if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
2183 return NULL;
2184 switch (rl_type) {
2185 case ICE_MIN_BW:
2186 profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;
2187 break;
2188 case ICE_MAX_BW:
2189 profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;
2190 break;
2191 case ICE_SHARED_BW:
2192 profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;
2193 break;
2194 default:
2195 return NULL;
2196 }
2197
2198 if (!pi)
2199 return NULL;
2200 hw = pi->hw;
2201 list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
2202 list_entry)
2203 if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
2204 profile_type && rl_prof_elem->bw == bw)
2205 /* Return existing profile ID info */
2206 return rl_prof_elem;
2207
2208 /* Create new profile ID */
2209 rl_prof_elem = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rl_prof_elem),
2210 GFP_KERNEL);
2211
2212 if (!rl_prof_elem)
2213 return NULL;
2214
2215 status = ice_sched_bw_to_rl_profile(bw, &rl_prof_elem->profile);
2216 if (status)
2217 goto exit_add_rl_prof;
2218
2219 rl_prof_elem->bw = bw;
2220 /* layer_num is zero relative, and fw expects level from 1 to 9 */
2221 rl_prof_elem->profile.level = layer_num + 1;
2222 rl_prof_elem->profile.flags = profile_type;
2223 rl_prof_elem->profile.max_burst_size = cpu_to_le16(hw->max_burst_size);
2224
2225 /* Create new entry in HW DB */
2226 buf = &rl_prof_elem->profile;
2227 status = ice_aq_add_rl_profile(hw, num_profiles, buf, sizeof(*buf),
2228 &profiles_added, NULL);
2229 if (status || profiles_added != num_profiles)
2230 goto exit_add_rl_prof;
2231
2232 /* Good entry - add in the list */
2233 rl_prof_elem->prof_id_ref = 0;
2234 list_add(&rl_prof_elem->list_entry, &pi->rl_prof_list[layer_num]);
2235 return rl_prof_elem;
2236
2237exit_add_rl_prof:
2238 devm_kfree(ice_hw_to_dev(hw), rl_prof_elem);
2239 return NULL;
2240}
2241
2242/**
2243 * ice_sched_cfg_node_bw_lmt - configure node sched params
2244 * @hw: pointer to the HW struct
2245 * @node: sched node to configure
2246 * @rl_type: rate limit type CIR, EIR, or shared
2247 * @rl_prof_id: rate limit profile ID
2248 *
2249 * This function configures node element's BW limit.
2250 */
2251static enum ice_status
2252ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
2253 enum ice_rl_type rl_type, u16 rl_prof_id)
2254{
2255 struct ice_aqc_txsched_elem_data buf;
2256 struct ice_aqc_txsched_elem *data;
2257
2258 buf = node->info;
2259 data = &buf.data;
2260 switch (rl_type) {
2261 case ICE_MIN_BW:
2262 data->valid_sections |= ICE_AQC_ELEM_VALID_CIR;
2263 data->cir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id);
2264 break;
2265 case ICE_MAX_BW:
2266 /* EIR BW and Shared BW profiles are mutually exclusive and
2267 * hence only one of them may be set for any given element
2268 */
2269 if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
2270 return ICE_ERR_CFG;
2271 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
2272 data->eir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id);
2273 break;
2274 case ICE_SHARED_BW:
2275 /* Check for removing shared BW */
2276 if (rl_prof_id == ICE_SCHED_NO_SHARED_RL_PROF_ID) {
2277 /* remove shared profile */
2278 data->valid_sections &= ~ICE_AQC_ELEM_VALID_SHARED;
2279 data->srl_id = 0; /* clear SRL field */
2280
2281 /* enable back EIR to default profile */
2282 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
2283 data->eir_bw.bw_profile_idx =
2284 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
2285 break;
2286 }
2287 /* EIR BW and Shared BW profiles are mutually exclusive and
2288 * hence only one of them may be set for any given element
2289 */
2290 if ((data->valid_sections & ICE_AQC_ELEM_VALID_EIR) &&
2291 (le16_to_cpu(data->eir_bw.bw_profile_idx) !=
2292 ICE_SCHED_DFLT_RL_PROF_ID))
2293 return ICE_ERR_CFG;
2294 /* EIR BW is set to default, disable it */
2295 data->valid_sections &= ~ICE_AQC_ELEM_VALID_EIR;
2296 /* Okay to enable shared BW now */
2297 data->valid_sections |= ICE_AQC_ELEM_VALID_SHARED;
2298 data->srl_id = cpu_to_le16(rl_prof_id);
2299 break;
2300 default:
2301 /* Unknown rate limit type */
2302 return ICE_ERR_PARAM;
2303 }
2304
2305 /* Configure element */
2306 return ice_sched_update_elem(hw, node, &buf);
2307}
2308
2309/**
2310 * ice_sched_get_node_rl_prof_id - get node's rate limit profile ID
2311 * @node: sched node
2312 * @rl_type: rate limit type
2313 *
2314 * If existing profile matches, it returns the corresponding rate
2315 * limit profile ID, otherwise it returns an invalid ID as error.
2316 */
2317static u16
2318ice_sched_get_node_rl_prof_id(struct ice_sched_node *node,
2319 enum ice_rl_type rl_type)
2320{
2321 u16 rl_prof_id = ICE_SCHED_INVAL_PROF_ID;
2322 struct ice_aqc_txsched_elem *data;
2323
2324 data = &node->info.data;
2325 switch (rl_type) {
2326 case ICE_MIN_BW:
2327 if (data->valid_sections & ICE_AQC_ELEM_VALID_CIR)
2328 rl_prof_id = le16_to_cpu(data->cir_bw.bw_profile_idx);
2329 break;
2330 case ICE_MAX_BW:
2331 if (data->valid_sections & ICE_AQC_ELEM_VALID_EIR)
2332 rl_prof_id = le16_to_cpu(data->eir_bw.bw_profile_idx);
2333 break;
2334 case ICE_SHARED_BW:
2335 if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
2336 rl_prof_id = le16_to_cpu(data->srl_id);
2337 break;
2338 default:
2339 break;
2340 }
2341
2342 return rl_prof_id;
2343}
2344
2345/**
2346 * ice_sched_get_rl_prof_layer - selects rate limit profile creation layer
2347 * @pi: port information structure
2348 * @rl_type: type of rate limit BW - min, max, or shared
2349 * @layer_index: layer index
2350 *
2351 * This function returns requested profile creation layer.
2352 */
2353static u8
2354ice_sched_get_rl_prof_layer(struct ice_port_info *pi, enum ice_rl_type rl_type,
2355 u8 layer_index)
2356{
2357 struct ice_hw *hw = pi->hw;
2358
2359 if (layer_index >= hw->num_tx_sched_layers)
2360 return ICE_SCHED_INVAL_LAYER_NUM;
2361 switch (rl_type) {
2362 case ICE_MIN_BW:
2363 if (hw->layer_info[layer_index].max_cir_rl_profiles)
2364 return layer_index;
2365 break;
2366 case ICE_MAX_BW:
2367 if (hw->layer_info[layer_index].max_eir_rl_profiles)
2368 return layer_index;
2369 break;
2370 case ICE_SHARED_BW:
2371 /* if current layer doesn't support SRL profile creation
2372 * then try a layer up or down.
2373 */
2374 if (hw->layer_info[layer_index].max_srl_profiles)
2375 return layer_index;
2376 else if (layer_index < hw->num_tx_sched_layers - 1 &&
2377 hw->layer_info[layer_index + 1].max_srl_profiles)
2378 return layer_index + 1;
2379 else if (layer_index > 0 &&
2380 hw->layer_info[layer_index - 1].max_srl_profiles)
2381 return layer_index - 1;
2382 break;
2383 default:
2384 break;
2385 }
2386 return ICE_SCHED_INVAL_LAYER_NUM;
2387}
2388
2389/**
2390 * ice_sched_get_srl_node - get shared rate limit node
2391 * @node: tree node
2392 * @srl_layer: shared rate limit layer
2393 *
2394 * This function returns SRL node to be used for shared rate limit purpose.
2395 * The caller needs to hold scheduler lock.
2396 */
2397static struct ice_sched_node *
2398ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer)
2399{
2400 if (srl_layer > node->tx_sched_layer)
2401 return node->children[0];
2402 else if (srl_layer < node->tx_sched_layer)
2403 /* Node can't be created without a parent. It will always
2404 * have a valid parent except root node.
2405 */
2406 return node->parent;
2407 else
2408 return node;
2409}
2410
2411/**
2412 * ice_sched_rm_rl_profile - remove RL profile ID
2413 * @pi: port information structure
2414 * @layer_num: layer number where profiles are saved
2415 * @profile_type: profile type like EIR, CIR, or SRL
2416 * @profile_id: profile ID to remove
2417 *
2418 * This function removes rate limit profile from layer 'layer_num' of type
2419 * 'profile_type' and profile ID as 'profile_id'. The caller needs to hold
2420 * scheduler lock.
2421 */
2422static enum ice_status
2423ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
2424 u16 profile_id)
2425{
2426 struct ice_aqc_rl_profile_info *rl_prof_elem;
2427 enum ice_status status = 0;
2428
2429 if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
2430 return ICE_ERR_PARAM;
2431 /* Check the existing list for RL profile */
2432 list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
2433 list_entry)
2434 if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
2435 profile_type &&
2436 le16_to_cpu(rl_prof_elem->profile.profile_id) ==
2437 profile_id) {
2438 if (rl_prof_elem->prof_id_ref)
2439 rl_prof_elem->prof_id_ref--;
2440
2441 /* Remove old profile ID from database */
2442 status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem);
2443 if (status && status != ICE_ERR_IN_USE)
2444 ice_debug(pi->hw, ICE_DBG_SCHED,
2445 "Remove rl profile failed\n");
2446 break;
2447 }
2448 if (status == ICE_ERR_IN_USE)
2449 status = 0;
2450 return status;
2451}
2452
2453/**
2454 * ice_sched_set_node_bw_dflt - set node's bandwidth limit to default
2455 * @pi: port information structure
2456 * @node: pointer to node structure
2457 * @rl_type: rate limit type min, max, or shared
2458 * @layer_num: layer number where RL profiles are saved
2459 *
2460 * This function configures node element's BW rate limit profile ID of
2461 * type CIR, EIR, or SRL to default. This function needs to be called
2462 * with the scheduler lock held.
2463 */
2464static enum ice_status
2465ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
2466 struct ice_sched_node *node,
2467 enum ice_rl_type rl_type, u8 layer_num)
2468{
2469 enum ice_status status;
2470 struct ice_hw *hw;
2471 u8 profile_type;
2472 u16 rl_prof_id;
2473 u16 old_id;
2474
2475 hw = pi->hw;
2476 switch (rl_type) {
2477 case ICE_MIN_BW:
2478 profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;
2479 rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;
2480 break;
2481 case ICE_MAX_BW:
2482 profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;
2483 rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;
2484 break;
2485 case ICE_SHARED_BW:
2486 profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;
2487 /* No SRL is configured for default case */
2488 rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID;
2489 break;
2490 default:
2491 return ICE_ERR_PARAM;
2492 }
2493 /* Save existing RL prof ID for later clean up */
2494 old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
2495 /* Configure BW scheduling parameters */
2496 status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
2497 if (status)
2498 return status;
2499
2500 /* Remove stale RL profile ID */
2501 if (old_id == ICE_SCHED_DFLT_RL_PROF_ID ||
2502 old_id == ICE_SCHED_INVAL_PROF_ID)
2503 return 0;
2504
2505 return ice_sched_rm_rl_profile(pi, layer_num, profile_type, old_id);
2506}
2507
2508/**
2509 * ice_sched_set_eir_srl_excl - set EIR/SRL exclusiveness
2510 * @pi: port information structure
2511 * @node: pointer to node structure
2512 * @layer_num: layer number where rate limit profiles are saved
2513 * @rl_type: rate limit type min, max, or shared
2514 * @bw: bandwidth value
2515 *
2516 * This function prepares node element's bandwidth to SRL or EIR exclusively.
2517 * EIR BW and Shared BW profiles are mutually exclusive and hence only one of
2518 * them may be set for any given element. This function needs to be called
2519 * with the scheduler lock held.
2520 */
2521static enum ice_status
2522ice_sched_set_eir_srl_excl(struct ice_port_info *pi,
2523 struct ice_sched_node *node,
2524 u8 layer_num, enum ice_rl_type rl_type, u32 bw)
2525{
2526 if (rl_type == ICE_SHARED_BW) {
2527 /* SRL node passed in this case, it may be different node */
2528 if (bw == ICE_SCHED_DFLT_BW)
2529 /* SRL being removed, ice_sched_cfg_node_bw_lmt()
2530 * enables EIR to default. EIR is not set in this
2531 * case, so no additional action is required.
2532 */
2533 return 0;
2534
2535 /* SRL being configured, set EIR to default here.
2536 * ice_sched_cfg_node_bw_lmt() disables EIR when it
2537 * configures SRL
2538 */
2539 return ice_sched_set_node_bw_dflt(pi, node, ICE_MAX_BW,
2540 layer_num);
2541 } else if (rl_type == ICE_MAX_BW &&
2542 node->info.data.valid_sections & ICE_AQC_ELEM_VALID_SHARED) {
2543 /* Remove Shared profile. Set default shared BW call
2544 * removes shared profile for a node.
2545 */
2546 return ice_sched_set_node_bw_dflt(pi, node,
2547 ICE_SHARED_BW,
2548 layer_num);
2549 }
2550 return 0;
2551}
2552
2553/**
2554 * ice_sched_set_node_bw - set node's bandwidth
2555 * @pi: port information structure
2556 * @node: tree node
2557 * @rl_type: rate limit type min, max, or shared
2558 * @bw: bandwidth in Kbps - Kilo bits per sec
2559 * @layer_num: layer number
2560 *
2561 * This function adds new profile corresponding to requested BW, configures
2562 * node's RL profile ID of type CIR, EIR, or SRL, and removes old profile
2563 * ID from local database. The caller needs to hold scheduler lock.
2564 */
2565static enum ice_status
2566ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
2567 enum ice_rl_type rl_type, u32 bw, u8 layer_num)
2568{
2569 struct ice_aqc_rl_profile_info *rl_prof_info;
2570 enum ice_status status = ICE_ERR_PARAM;
2571 struct ice_hw *hw = pi->hw;
2572 u16 old_id, rl_prof_id;
2573
2574 rl_prof_info = ice_sched_add_rl_profile(pi, rl_type, bw, layer_num);
2575 if (!rl_prof_info)
2576 return status;
2577
2578 rl_prof_id = le16_to_cpu(rl_prof_info->profile.profile_id);
2579
2580 /* Save existing RL prof ID for later clean up */
2581 old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
2582 /* Configure BW scheduling parameters */
2583 status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
2584 if (status)
2585 return status;
2586
2587 /* New changes has been applied */
2588 /* Increment the profile ID reference count */
2589 rl_prof_info->prof_id_ref++;
2590
2591 /* Check for old ID removal */
2592 if ((old_id == ICE_SCHED_DFLT_RL_PROF_ID && rl_type != ICE_SHARED_BW) ||
2593 old_id == ICE_SCHED_INVAL_PROF_ID || old_id == rl_prof_id)
2594 return 0;
2595
2596 return ice_sched_rm_rl_profile(pi, layer_num,
2597 rl_prof_info->profile.flags &
2598 ICE_AQC_RL_PROFILE_TYPE_M, old_id);
2599}
2600
2601/**
2602 * ice_sched_set_node_bw_lmt - set node's BW limit
2603 * @pi: port information structure
2604 * @node: tree node
2605 * @rl_type: rate limit type min, max, or shared
2606 * @bw: bandwidth in Kbps - Kilo bits per sec
2607 *
2608 * It updates node's BW limit parameters like BW RL profile ID of type CIR,
2609 * EIR, or SRL. The caller needs to hold scheduler lock.
2610 */
2611static enum ice_status
2612ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
2613 enum ice_rl_type rl_type, u32 bw)
2614{
2615 struct ice_sched_node *cfg_node = node;
2616 enum ice_status status;
2617
2618 struct ice_hw *hw;
2619 u8 layer_num;
2620
2621 if (!pi)
2622 return ICE_ERR_PARAM;
2623 hw = pi->hw;
2624 /* Remove unused RL profile IDs from HW and SW DB */
2625 ice_sched_rm_unused_rl_prof(pi);
2626 layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
2627 node->tx_sched_layer);
2628 if (layer_num >= hw->num_tx_sched_layers)
2629 return ICE_ERR_PARAM;
2630
2631 if (rl_type == ICE_SHARED_BW) {
2632 /* SRL node may be different */
2633 cfg_node = ice_sched_get_srl_node(node, layer_num);
2634 if (!cfg_node)
2635 return ICE_ERR_CFG;
2636 }
2637 /* EIR BW and Shared BW profiles are mutually exclusive and
2638 * hence only one of them may be set for any given element
2639 */
2640 status = ice_sched_set_eir_srl_excl(pi, cfg_node, layer_num, rl_type,
2641 bw);
2642 if (status)
2643 return status;
2644 if (bw == ICE_SCHED_DFLT_BW)
2645 return ice_sched_set_node_bw_dflt(pi, cfg_node, rl_type,
2646 layer_num);
2647 return ice_sched_set_node_bw(pi, cfg_node, rl_type, bw, layer_num);
2648}
2649
2650/**
2651 * ice_sched_set_node_bw_dflt_lmt - set node's BW limit to default
2652 * @pi: port information structure
2653 * @node: pointer to node structure
2654 * @rl_type: rate limit type min, max, or shared
2655 *
2656 * This function configures node element's BW rate limit profile ID of
2657 * type CIR, EIR, or SRL to default. This function needs to be called
2658 * with the scheduler lock held.
2659 */
2660static enum ice_status
2661ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi,
2662 struct ice_sched_node *node,
2663 enum ice_rl_type rl_type)
2664{
2665 return ice_sched_set_node_bw_lmt(pi, node, rl_type,
2666 ICE_SCHED_DFLT_BW);
2667}
2668
2669/**
2670 * ice_sched_validate_srl_node - Check node for SRL applicability
2671 * @node: sched node to configure
2672 * @sel_layer: selected SRL layer
2673 *
2674 * This function checks if the SRL can be applied to a selected layer node on
2675 * behalf of the requested node (first argument). This function needs to be
2676 * called with scheduler lock held.
2677 */
2678static enum ice_status
2679ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
2680{
2681 /* SRL profiles are not available on all layers. Check if the
2682 * SRL profile can be applied to a node above or below the
2683 * requested node. SRL configuration is possible only if the
2684 * selected layer's node has single child.
2685 */
2686 if (sel_layer == node->tx_sched_layer ||
2687 ((sel_layer == node->tx_sched_layer + 1) &&
2688 node->num_children == 1) ||
2689 ((sel_layer == node->tx_sched_layer - 1) &&
2690 (node->parent && node->parent->num_children == 1)))
2691 return 0;
2692
2693 return ICE_ERR_CFG;
2694}
2695
2696/**
2697 * ice_sched_save_q_bw - save queue node's BW information
2698 * @q_ctx: queue context structure
2699 * @rl_type: rate limit type min, max, or shared
2700 * @bw: bandwidth in Kbps - Kilo bits per sec
2701 *
2702 * Save BW information of queue type node for post replay use.
2703 */
2704static enum ice_status
2705ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)
2706{
2707 switch (rl_type) {
2708 case ICE_MIN_BW:
2709 ice_set_clear_cir_bw(&q_ctx->bw_t_info, bw);
2710 break;
2711 case ICE_MAX_BW:
2712 ice_set_clear_eir_bw(&q_ctx->bw_t_info, bw);
2713 break;
2714 case ICE_SHARED_BW:
2715 ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw);
2716 break;
2717 default:
2718 return ICE_ERR_PARAM;
2719 }
2720 return 0;
2721}
2722
2723/**
2724 * ice_sched_set_q_bw_lmt - sets queue BW limit
2725 * @pi: port information structure
2726 * @vsi_handle: sw VSI handle
2727 * @tc: traffic class
2728 * @q_handle: software queue handle
2729 * @rl_type: min, max, or shared
2730 * @bw: bandwidth in Kbps
2731 *
2732 * This function sets BW limit of queue scheduling node.
2733 */
2734static enum ice_status
2735ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
2736 u16 q_handle, enum ice_rl_type rl_type, u32 bw)
2737{
2738 enum ice_status status = ICE_ERR_PARAM;
2739 struct ice_sched_node *node;
2740 struct ice_q_ctx *q_ctx;
2741
2742 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
2743 return ICE_ERR_PARAM;
2744 mutex_lock(&pi->sched_lock);
2745 q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle);
2746 if (!q_ctx)
2747 goto exit_q_bw_lmt;
2748 node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
2749 if (!node) {
2750 ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong q_teid\n");
2751 goto exit_q_bw_lmt;
2752 }
2753
2754 /* Return error if it is not a leaf node */
2755 if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF)
2756 goto exit_q_bw_lmt;
2757
2758 /* SRL bandwidth layer selection */
2759 if (rl_type == ICE_SHARED_BW) {
2760 u8 sel_layer; /* selected layer */
2761
2762 sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type,
2763 node->tx_sched_layer);
2764 if (sel_layer >= pi->hw->num_tx_sched_layers) {
2765 status = ICE_ERR_PARAM;
2766 goto exit_q_bw_lmt;
2767 }
2768 status = ice_sched_validate_srl_node(node, sel_layer);
2769 if (status)
2770 goto exit_q_bw_lmt;
2771 }
2772
2773 if (bw == ICE_SCHED_DFLT_BW)
2774 status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type);
2775 else
2776 status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw);
2777
2778 if (!status)
2779 status = ice_sched_save_q_bw(q_ctx, rl_type, bw);
2780
2781exit_q_bw_lmt:
2782 mutex_unlock(&pi->sched_lock);
2783 return status;
2784}
2785
2786/**
2787 * ice_cfg_q_bw_lmt - configure queue BW limit
2788 * @pi: port information structure
2789 * @vsi_handle: sw VSI handle
2790 * @tc: traffic class
2791 * @q_handle: software queue handle
2792 * @rl_type: min, max, or shared
2793 * @bw: bandwidth in Kbps
2794 *
2795 * This function configures BW limit of queue scheduling node.
2796 */
2797enum ice_status
2798ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
2799 u16 q_handle, enum ice_rl_type rl_type, u32 bw)
2800{
2801 return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,
2802 bw);
2803}
2804
2805/**
2806 * ice_cfg_q_bw_dflt_lmt - configure queue BW default limit
2807 * @pi: port information structure
2808 * @vsi_handle: sw VSI handle
2809 * @tc: traffic class
2810 * @q_handle: software queue handle
2811 * @rl_type: min, max, or shared
2812 *
2813 * This function configures BW default limit of queue scheduling node.
2814 */
2815enum ice_status
2816ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
2817 u16 q_handle, enum ice_rl_type rl_type)
2818{
2819 return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,
2820 ICE_SCHED_DFLT_BW);
2821}
2822
2823/**
2824 * ice_cfg_rl_burst_size - Set burst size value
2825 * @hw: pointer to the HW struct
2826 * @bytes: burst size in bytes
2827 *
2828 * This function configures/set the burst size to requested new value. The new
2829 * burst size value is used for future rate limit calls. It doesn't change the
2830 * existing or previously created RL profiles.
2831 */
2832enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
2833{
2834 u16 burst_size_to_prog;
2835
2836 if (bytes < ICE_MIN_BURST_SIZE_ALLOWED ||
2837 bytes > ICE_MAX_BURST_SIZE_ALLOWED)
2838 return ICE_ERR_PARAM;
2839 if (ice_round_to_num(bytes, 64) <=
2840 ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) {
2841 /* 64 byte granularity case */
2842 /* Disable MSB granularity bit */
2843 burst_size_to_prog = ICE_64_BYTE_GRANULARITY;
2844 /* round number to nearest 64 byte granularity */
2845 bytes = ice_round_to_num(bytes, 64);
2846 /* The value is in 64 byte chunks */
2847 burst_size_to_prog |= (u16)(bytes / 64);
2848 } else {
2849 /* k bytes granularity case */
2850 /* Enable MSB granularity bit */
2851 burst_size_to_prog = ICE_KBYTE_GRANULARITY;
2852 /* round number to nearest 1024 granularity */
2853 bytes = ice_round_to_num(bytes, 1024);
2854 /* check rounding doesn't go beyond allowed */
2855 if (bytes > ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY)
2856 bytes = ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY;
2857 /* The value is in k bytes */
2858 burst_size_to_prog |= (u16)(bytes / 1024);
2859 }
2860 hw->max_burst_size = burst_size_to_prog;
2861 return 0;
2862}
2863
2864/**
2865 * ice_sched_replay_node_prio - re-configure node priority
2866 * @hw: pointer to the HW struct
2867 * @node: sched node to configure
2868 * @priority: priority value
2869 *
2870 * This function configures node element's priority value. It
2871 * needs to be called with scheduler lock held.
2872 */
2873static enum ice_status
2874ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node,
2875 u8 priority)
2876{
2877 struct ice_aqc_txsched_elem_data buf;
2878 struct ice_aqc_txsched_elem *data;
2879 enum ice_status status;
2880
2881 buf = node->info;
2882 data = &buf.data;
2883 data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC;
2884 data->generic = priority;
2885
2886 /* Configure element */
2887 status = ice_sched_update_elem(hw, node, &buf);
2888 return status;
2889}
2890
2891/**
2892 * ice_sched_replay_node_bw - replay node(s) BW
2893 * @hw: pointer to the HW struct
2894 * @node: sched node to configure
2895 * @bw_t_info: BW type information
2896 *
2897 * This function restores node's BW from bw_t_info. The caller needs
2898 * to hold the scheduler lock.
2899 */
2900static enum ice_status
2901ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node,
2902 struct ice_bw_type_info *bw_t_info)
2903{
2904 struct ice_port_info *pi = hw->port_info;
2905 enum ice_status status = ICE_ERR_PARAM;
2906 u16 bw_alloc;
2907
2908 if (!node)
2909 return status;
2910 if (bitmap_empty(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CNT))
2911 return 0;
2912 if (test_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap)) {
2913 status = ice_sched_replay_node_prio(hw, node,
2914 bw_t_info->generic);
2915 if (status)
2916 return status;
2917 }
2918 if (test_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap)) {
2919 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW,
2920 bw_t_info->cir_bw.bw);
2921 if (status)
2922 return status;
2923 }
2924 if (test_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap)) {
2925 bw_alloc = bw_t_info->cir_bw.bw_alloc;
2926 status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MIN_BW,
2927 bw_alloc);
2928 if (status)
2929 return status;
2930 }
2931 if (test_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap)) {
2932 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW,
2933 bw_t_info->eir_bw.bw);
2934 if (status)
2935 return status;
2936 }
2937 if (test_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap)) {
2938 bw_alloc = bw_t_info->eir_bw.bw_alloc;
2939 status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MAX_BW,
2940 bw_alloc);
2941 if (status)
2942 return status;
2943 }
2944 if (test_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap))
2945 status = ice_sched_set_node_bw_lmt(pi, node, ICE_SHARED_BW,
2946 bw_t_info->shared_bw);
2947 return status;
2948}
2949
2950/**
2951 * ice_sched_replay_q_bw - replay queue type node BW
2952 * @pi: port information structure
2953 * @q_ctx: queue context structure
2954 *
2955 * This function replays queue type node bandwidth. This function needs to be
2956 * called with scheduler lock held.
2957 */
2958enum ice_status
2959ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx)
2960{
2961 struct ice_sched_node *q_node;
2962
2963 /* Following also checks the presence of node in tree */
2964 q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
2965 if (!q_node)
2966 return ICE_ERR_PARAM;
2967 return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info);
2968}