Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4#include <net/devlink.h>
5#include "ice_sched.h"
6
7/**
8 * ice_sched_add_root_node - Insert the Tx scheduler root node in SW DB
9 * @pi: port information structure
10 * @info: Scheduler element information from firmware
11 *
12 * This function inserts the root node of the scheduling tree topology
13 * to the SW DB.
14 */
15static int
16ice_sched_add_root_node(struct ice_port_info *pi,
17 struct ice_aqc_txsched_elem_data *info)
18{
19 struct ice_sched_node *root;
20 struct ice_hw *hw;
21
22 if (!pi)
23 return -EINVAL;
24
25 hw = pi->hw;
26
27 root = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*root), GFP_KERNEL);
28 if (!root)
29 return -ENOMEM;
30
31 /* coverity[suspicious_sizeof] */
32 root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0],
33 sizeof(*root), GFP_KERNEL);
34 if (!root->children) {
35 devm_kfree(ice_hw_to_dev(hw), root);
36 return -ENOMEM;
37 }
38
39 memcpy(&root->info, info, sizeof(*info));
40 pi->root = root;
41 return 0;
42}
43
44/**
45 * ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB
46 * @start_node: pointer to the starting ice_sched_node struct in a sub-tree
47 * @teid: node TEID to search
48 *
49 * This function searches for a node matching the TEID in the scheduling tree
50 * from the SW DB. The search is recursive and is restricted by the number of
51 * layers it has searched through; stopping at the max supported layer.
52 *
53 * This function needs to be called when holding the port_info->sched_lock
54 */
55struct ice_sched_node *
56ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
57{
58 u16 i;
59
60 /* The TEID is same as that of the start_node */
61 if (ICE_TXSCHED_GET_NODE_TEID(start_node) == teid)
62 return start_node;
63
64 /* The node has no children or is at the max layer */
65 if (!start_node->num_children ||
66 start_node->tx_sched_layer >= ICE_AQC_TOPO_MAX_LEVEL_NUM ||
67 start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF)
68 return NULL;
69
70 /* Check if TEID matches to any of the children nodes */
71 for (i = 0; i < start_node->num_children; i++)
72 if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid)
73 return start_node->children[i];
74
75 /* Search within each child's sub-tree */
76 for (i = 0; i < start_node->num_children; i++) {
77 struct ice_sched_node *tmp;
78
79 tmp = ice_sched_find_node_by_teid(start_node->children[i],
80 teid);
81 if (tmp)
82 return tmp;
83 }
84
85 return NULL;
86}
87
88/**
89 * ice_aqc_send_sched_elem_cmd - send scheduling elements cmd
90 * @hw: pointer to the HW struct
91 * @cmd_opc: cmd opcode
92 * @elems_req: number of elements to request
93 * @buf: pointer to buffer
94 * @buf_size: buffer size in bytes
95 * @elems_resp: returns total number of elements response
96 * @cd: pointer to command details structure or NULL
97 *
98 * This function sends a scheduling elements cmd (cmd_opc)
99 */
100static int
101ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
102 u16 elems_req, void *buf, u16 buf_size,
103 u16 *elems_resp, struct ice_sq_cd *cd)
104{
105 struct ice_aqc_sched_elem_cmd *cmd;
106 struct ice_aq_desc desc;
107 int status;
108
109 cmd = &desc.params.sched_elem_cmd;
110 ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc);
111 cmd->num_elem_req = cpu_to_le16(elems_req);
112 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
113 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
114 if (!status && elems_resp)
115 *elems_resp = le16_to_cpu(cmd->num_elem_resp);
116
117 return status;
118}
119
120/**
121 * ice_aq_query_sched_elems - query scheduler elements
122 * @hw: pointer to the HW struct
123 * @elems_req: number of elements to query
124 * @buf: pointer to buffer
125 * @buf_size: buffer size in bytes
126 * @elems_ret: returns total number of elements returned
127 * @cd: pointer to command details structure or NULL
128 *
129 * Query scheduling elements (0x0404)
130 */
131int
132ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
133 struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
134 u16 *elems_ret, struct ice_sq_cd *cd)
135{
136 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_get_sched_elems,
137 elems_req, (void *)buf, buf_size,
138 elems_ret, cd);
139}
140
141/**
142 * ice_sched_add_node - Insert the Tx scheduler node in SW DB
143 * @pi: port information structure
144 * @layer: Scheduler layer of the node
145 * @info: Scheduler element information from firmware
146 * @prealloc_node: preallocated ice_sched_node struct for SW DB
147 *
148 * This function inserts a scheduler node to the SW DB.
149 */
150int
151ice_sched_add_node(struct ice_port_info *pi, u8 layer,
152 struct ice_aqc_txsched_elem_data *info,
153 struct ice_sched_node *prealloc_node)
154{
155 struct ice_aqc_txsched_elem_data elem;
156 struct ice_sched_node *parent;
157 struct ice_sched_node *node;
158 struct ice_hw *hw;
159 int status;
160
161 if (!pi)
162 return -EINVAL;
163
164 hw = pi->hw;
165
166 /* A valid parent node should be there */
167 parent = ice_sched_find_node_by_teid(pi->root,
168 le32_to_cpu(info->parent_teid));
169 if (!parent) {
170 ice_debug(hw, ICE_DBG_SCHED, "Parent Node not found for parent_teid=0x%x\n",
171 le32_to_cpu(info->parent_teid));
172 return -EINVAL;
173 }
174
175 /* query the current node information from FW before adding it
176 * to the SW DB
177 */
178 status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), &elem);
179 if (status)
180 return status;
181
182 if (prealloc_node)
183 node = prealloc_node;
184 else
185 node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL);
186 if (!node)
187 return -ENOMEM;
188 if (hw->max_children[layer]) {
189 /* coverity[suspicious_sizeof] */
190 node->children = devm_kcalloc(ice_hw_to_dev(hw),
191 hw->max_children[layer],
192 sizeof(*node), GFP_KERNEL);
193 if (!node->children) {
194 devm_kfree(ice_hw_to_dev(hw), node);
195 return -ENOMEM;
196 }
197 }
198
199 node->in_use = true;
200 node->parent = parent;
201 node->tx_sched_layer = layer;
202 parent->children[parent->num_children++] = node;
203 node->info = elem;
204 return 0;
205}
206
207/**
208 * ice_aq_delete_sched_elems - delete scheduler elements
209 * @hw: pointer to the HW struct
210 * @grps_req: number of groups to delete
211 * @buf: pointer to buffer
212 * @buf_size: buffer size in bytes
213 * @grps_del: returns total number of elements deleted
214 * @cd: pointer to command details structure or NULL
215 *
216 * Delete scheduling elements (0x040F)
217 */
218static int
219ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
220 struct ice_aqc_delete_elem *buf, u16 buf_size,
221 u16 *grps_del, struct ice_sq_cd *cd)
222{
223 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_delete_sched_elems,
224 grps_req, (void *)buf, buf_size,
225 grps_del, cd);
226}
227
228/**
229 * ice_sched_remove_elems - remove nodes from HW
230 * @hw: pointer to the HW struct
231 * @parent: pointer to the parent node
232 * @node_teid: node teid to be deleted
233 *
234 * This function remove nodes from HW
235 */
236static int
237ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
238 u32 node_teid)
239{
240 DEFINE_FLEX(struct ice_aqc_delete_elem, buf, teid, 1);
241 u16 buf_size = __struct_size(buf);
242 u16 num_groups_removed = 0;
243 int status;
244
245 buf->hdr.parent_teid = parent->info.node_teid;
246 buf->hdr.num_elems = cpu_to_le16(1);
247 buf->teid[0] = cpu_to_le32(node_teid);
248
249 status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
250 &num_groups_removed, NULL);
251 if (status || num_groups_removed != 1)
252 ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n",
253 hw->adminq.sq_last_status);
254
255 return status;
256}
257
258/**
259 * ice_sched_get_first_node - get the first node of the given layer
260 * @pi: port information structure
261 * @parent: pointer the base node of the subtree
262 * @layer: layer number
263 *
264 * This function retrieves the first node of the given layer from the subtree
265 */
266static struct ice_sched_node *
267ice_sched_get_first_node(struct ice_port_info *pi,
268 struct ice_sched_node *parent, u8 layer)
269{
270 return pi->sib_head[parent->tc_num][layer];
271}
272
273/**
274 * ice_sched_get_tc_node - get pointer to TC node
275 * @pi: port information structure
276 * @tc: TC number
277 *
278 * This function returns the TC node pointer
279 */
280struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc)
281{
282 u8 i;
283
284 if (!pi || !pi->root)
285 return NULL;
286 for (i = 0; i < pi->root->num_children; i++)
287 if (pi->root->children[i]->tc_num == tc)
288 return pi->root->children[i];
289 return NULL;
290}
291
292/**
293 * ice_free_sched_node - Free a Tx scheduler node from SW DB
294 * @pi: port information structure
295 * @node: pointer to the ice_sched_node struct
296 *
297 * This function frees up a node from SW DB as well as from HW
298 *
299 * This function needs to be called with the port_info->sched_lock held
300 */
301void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
302{
303 struct ice_sched_node *parent;
304 struct ice_hw *hw = pi->hw;
305 u8 i, j;
306
307 /* Free the children before freeing up the parent node
308 * The parent array is updated below and that shifts the nodes
309 * in the array. So always pick the first child if num children > 0
310 */
311 while (node->num_children)
312 ice_free_sched_node(pi, node->children[0]);
313
314 /* Leaf, TC and root nodes can't be deleted by SW */
315 if (node->tx_sched_layer >= hw->sw_entry_point_layer &&
316 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
317 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT &&
318 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) {
319 u32 teid = le32_to_cpu(node->info.node_teid);
320
321 ice_sched_remove_elems(hw, node->parent, teid);
322 }
323 parent = node->parent;
324 /* root has no parent */
325 if (parent) {
326 struct ice_sched_node *p;
327
328 /* update the parent */
329 for (i = 0; i < parent->num_children; i++)
330 if (parent->children[i] == node) {
331 for (j = i + 1; j < parent->num_children; j++)
332 parent->children[j - 1] =
333 parent->children[j];
334 parent->num_children--;
335 break;
336 }
337
338 p = ice_sched_get_first_node(pi, node, node->tx_sched_layer);
339 while (p) {
340 if (p->sibling == node) {
341 p->sibling = node->sibling;
342 break;
343 }
344 p = p->sibling;
345 }
346
347 /* update the sibling head if head is getting removed */
348 if (pi->sib_head[node->tc_num][node->tx_sched_layer] == node)
349 pi->sib_head[node->tc_num][node->tx_sched_layer] =
350 node->sibling;
351 }
352
353 devm_kfree(ice_hw_to_dev(hw), node->children);
354 kfree(node->name);
355 xa_erase(&pi->sched_node_ids, node->id);
356 devm_kfree(ice_hw_to_dev(hw), node);
357}
358
359/**
360 * ice_aq_get_dflt_topo - gets default scheduler topology
361 * @hw: pointer to the HW struct
362 * @lport: logical port number
363 * @buf: pointer to buffer
364 * @buf_size: buffer size in bytes
365 * @num_branches: returns total number of queue to port branches
366 * @cd: pointer to command details structure or NULL
367 *
368 * Get default scheduler topology (0x400)
369 */
370static int
371ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
372 struct ice_aqc_get_topo_elem *buf, u16 buf_size,
373 u8 *num_branches, struct ice_sq_cd *cd)
374{
375 struct ice_aqc_get_topo *cmd;
376 struct ice_aq_desc desc;
377 int status;
378
379 cmd = &desc.params.get_topo;
380 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo);
381 cmd->port_num = lport;
382 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
383 if (!status && num_branches)
384 *num_branches = cmd->num_branches;
385
386 return status;
387}
388
389/**
390 * ice_aq_add_sched_elems - adds scheduling element
391 * @hw: pointer to the HW struct
392 * @grps_req: the number of groups that are requested to be added
393 * @buf: pointer to buffer
394 * @buf_size: buffer size in bytes
395 * @grps_added: returns total number of groups added
396 * @cd: pointer to command details structure or NULL
397 *
398 * Add scheduling elements (0x0401)
399 */
400static int
401ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
402 struct ice_aqc_add_elem *buf, u16 buf_size,
403 u16 *grps_added, struct ice_sq_cd *cd)
404{
405 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_add_sched_elems,
406 grps_req, (void *)buf, buf_size,
407 grps_added, cd);
408}
409
410/**
411 * ice_aq_cfg_sched_elems - configures scheduler elements
412 * @hw: pointer to the HW struct
413 * @elems_req: number of elements to configure
414 * @buf: pointer to buffer
415 * @buf_size: buffer size in bytes
416 * @elems_cfgd: returns total number of elements configured
417 * @cd: pointer to command details structure or NULL
418 *
419 * Configure scheduling elements (0x0403)
420 */
421static int
422ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
423 struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
424 u16 *elems_cfgd, struct ice_sq_cd *cd)
425{
426 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_cfg_sched_elems,
427 elems_req, (void *)buf, buf_size,
428 elems_cfgd, cd);
429}
430
431/**
432 * ice_aq_move_sched_elems - move scheduler element (just 1 group)
433 * @hw: pointer to the HW struct
434 * @buf: pointer to buffer
435 * @buf_size: buffer size in bytes
436 * @grps_movd: returns total number of groups moved
437 *
438 * Move scheduling elements (0x0408)
439 */
440int
441ice_aq_move_sched_elems(struct ice_hw *hw, struct ice_aqc_move_elem *buf,
442 u16 buf_size, u16 *grps_movd)
443{
444 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_move_sched_elems,
445 1, buf, buf_size, grps_movd, NULL);
446}
447
448/**
449 * ice_aq_suspend_sched_elems - suspend scheduler elements
450 * @hw: pointer to the HW struct
451 * @elems_req: number of elements to suspend
452 * @buf: pointer to buffer
453 * @buf_size: buffer size in bytes
454 * @elems_ret: returns total number of elements suspended
455 * @cd: pointer to command details structure or NULL
456 *
457 * Suspend scheduling elements (0x0409)
458 */
459static int
460ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
461 u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
462{
463 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_suspend_sched_elems,
464 elems_req, (void *)buf, buf_size,
465 elems_ret, cd);
466}
467
468/**
469 * ice_aq_resume_sched_elems - resume scheduler elements
470 * @hw: pointer to the HW struct
471 * @elems_req: number of elements to resume
472 * @buf: pointer to buffer
473 * @buf_size: buffer size in bytes
474 * @elems_ret: returns total number of elements resumed
475 * @cd: pointer to command details structure or NULL
476 *
477 * resume scheduling elements (0x040A)
478 */
479static int
480ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
481 u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
482{
483 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_resume_sched_elems,
484 elems_req, (void *)buf, buf_size,
485 elems_ret, cd);
486}
487
488/**
489 * ice_aq_query_sched_res - query scheduler resource
490 * @hw: pointer to the HW struct
491 * @buf_size: buffer size in bytes
492 * @buf: pointer to buffer
493 * @cd: pointer to command details structure or NULL
494 *
495 * Query scheduler resource allocation (0x0412)
496 */
497static int
498ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
499 struct ice_aqc_query_txsched_res_resp *buf,
500 struct ice_sq_cd *cd)
501{
502 struct ice_aq_desc desc;
503
504 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res);
505 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
506}
507
508/**
509 * ice_sched_suspend_resume_elems - suspend or resume HW nodes
510 * @hw: pointer to the HW struct
511 * @num_nodes: number of nodes
512 * @node_teids: array of node teids to be suspended or resumed
513 * @suspend: true means suspend / false means resume
514 *
515 * This function suspends or resumes HW nodes
516 */
517int
518ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
519 bool suspend)
520{
521 u16 i, buf_size, num_elem_ret = 0;
522 __le32 *buf;
523 int status;
524
525 buf_size = sizeof(*buf) * num_nodes;
526 buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
527 if (!buf)
528 return -ENOMEM;
529
530 for (i = 0; i < num_nodes; i++)
531 buf[i] = cpu_to_le32(node_teids[i]);
532
533 if (suspend)
534 status = ice_aq_suspend_sched_elems(hw, num_nodes, buf,
535 buf_size, &num_elem_ret,
536 NULL);
537 else
538 status = ice_aq_resume_sched_elems(hw, num_nodes, buf,
539 buf_size, &num_elem_ret,
540 NULL);
541 if (status || num_elem_ret != num_nodes)
542 ice_debug(hw, ICE_DBG_SCHED, "suspend/resume failed\n");
543
544 devm_kfree(ice_hw_to_dev(hw), buf);
545 return status;
546}
547
548/**
549 * ice_alloc_lan_q_ctx - allocate LAN queue contexts for the given VSI and TC
550 * @hw: pointer to the HW struct
551 * @vsi_handle: VSI handle
552 * @tc: TC number
553 * @new_numqs: number of queues
554 */
555static int
556ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
557{
558 struct ice_vsi_ctx *vsi_ctx;
559 struct ice_q_ctx *q_ctx;
560 u16 idx;
561
562 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
563 if (!vsi_ctx)
564 return -EINVAL;
565 /* allocate LAN queue contexts */
566 if (!vsi_ctx->lan_q_ctx[tc]) {
567 q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
568 sizeof(*q_ctx), GFP_KERNEL);
569 if (!q_ctx)
570 return -ENOMEM;
571
572 for (idx = 0; idx < new_numqs; idx++) {
573 q_ctx[idx].q_handle = ICE_INVAL_Q_HANDLE;
574 q_ctx[idx].q_teid = ICE_INVAL_TEID;
575 }
576
577 vsi_ctx->lan_q_ctx[tc] = q_ctx;
578 vsi_ctx->num_lan_q_entries[tc] = new_numqs;
579 return 0;
580 }
581 /* num queues are increased, update the queue contexts */
582 if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) {
583 u16 prev_num = vsi_ctx->num_lan_q_entries[tc];
584
585 q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
586 sizeof(*q_ctx), GFP_KERNEL);
587 if (!q_ctx)
588 return -ENOMEM;
589
590 memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc],
591 prev_num * sizeof(*q_ctx));
592 devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]);
593
594 for (idx = prev_num; idx < new_numqs; idx++) {
595 q_ctx[idx].q_handle = ICE_INVAL_Q_HANDLE;
596 q_ctx[idx].q_teid = ICE_INVAL_TEID;
597 }
598
599 vsi_ctx->lan_q_ctx[tc] = q_ctx;
600 vsi_ctx->num_lan_q_entries[tc] = new_numqs;
601 }
602 return 0;
603}
604
605/**
606 * ice_alloc_rdma_q_ctx - allocate RDMA queue contexts for the given VSI and TC
607 * @hw: pointer to the HW struct
608 * @vsi_handle: VSI handle
609 * @tc: TC number
610 * @new_numqs: number of queues
611 */
612static int
613ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
614{
615 struct ice_vsi_ctx *vsi_ctx;
616 struct ice_q_ctx *q_ctx;
617
618 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
619 if (!vsi_ctx)
620 return -EINVAL;
621 /* allocate RDMA queue contexts */
622 if (!vsi_ctx->rdma_q_ctx[tc]) {
623 vsi_ctx->rdma_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
624 new_numqs,
625 sizeof(*q_ctx),
626 GFP_KERNEL);
627 if (!vsi_ctx->rdma_q_ctx[tc])
628 return -ENOMEM;
629 vsi_ctx->num_rdma_q_entries[tc] = new_numqs;
630 return 0;
631 }
632 /* num queues are increased, update the queue contexts */
633 if (new_numqs > vsi_ctx->num_rdma_q_entries[tc]) {
634 u16 prev_num = vsi_ctx->num_rdma_q_entries[tc];
635
636 q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
637 sizeof(*q_ctx), GFP_KERNEL);
638 if (!q_ctx)
639 return -ENOMEM;
640 memcpy(q_ctx, vsi_ctx->rdma_q_ctx[tc],
641 prev_num * sizeof(*q_ctx));
642 devm_kfree(ice_hw_to_dev(hw), vsi_ctx->rdma_q_ctx[tc]);
643 vsi_ctx->rdma_q_ctx[tc] = q_ctx;
644 vsi_ctx->num_rdma_q_entries[tc] = new_numqs;
645 }
646 return 0;
647}
648
649/**
650 * ice_aq_rl_profile - performs a rate limiting task
651 * @hw: pointer to the HW struct
652 * @opcode: opcode for add, query, or remove profile(s)
653 * @num_profiles: the number of profiles
654 * @buf: pointer to buffer
655 * @buf_size: buffer size in bytes
656 * @num_processed: number of processed add or remove profile(s) to return
657 * @cd: pointer to command details structure
658 *
659 * RL profile function to add, query, or remove profile(s)
660 */
661static int
662ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
663 u16 num_profiles, struct ice_aqc_rl_profile_elem *buf,
664 u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd)
665{
666 struct ice_aqc_rl_profile *cmd;
667 struct ice_aq_desc desc;
668 int status;
669
670 cmd = &desc.params.rl_profile;
671
672 ice_fill_dflt_direct_cmd_desc(&desc, opcode);
673 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
674 cmd->num_profiles = cpu_to_le16(num_profiles);
675 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
676 if (!status && num_processed)
677 *num_processed = le16_to_cpu(cmd->num_processed);
678 return status;
679}
680
681/**
682 * ice_aq_add_rl_profile - adds rate limiting profile(s)
683 * @hw: pointer to the HW struct
684 * @num_profiles: the number of profile(s) to be add
685 * @buf: pointer to buffer
686 * @buf_size: buffer size in bytes
687 * @num_profiles_added: total number of profiles added to return
688 * @cd: pointer to command details structure
689 *
690 * Add RL profile (0x0410)
691 */
692static int
693ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
694 struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
695 u16 *num_profiles_added, struct ice_sq_cd *cd)
696{
697 return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles, num_profiles,
698 buf, buf_size, num_profiles_added, cd);
699}
700
701/**
702 * ice_aq_remove_rl_profile - removes RL profile(s)
703 * @hw: pointer to the HW struct
704 * @num_profiles: the number of profile(s) to remove
705 * @buf: pointer to buffer
706 * @buf_size: buffer size in bytes
707 * @num_profiles_removed: total number of profiles removed to return
708 * @cd: pointer to command details structure or NULL
709 *
710 * Remove RL profile (0x0415)
711 */
712static int
713ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,
714 struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
715 u16 *num_profiles_removed, struct ice_sq_cd *cd)
716{
717 return ice_aq_rl_profile(hw, ice_aqc_opc_remove_rl_profiles,
718 num_profiles, buf, buf_size,
719 num_profiles_removed, cd);
720}
721
722/**
723 * ice_sched_del_rl_profile - remove RL profile
724 * @hw: pointer to the HW struct
725 * @rl_info: rate limit profile information
726 *
727 * If the profile ID is not referenced anymore, it removes profile ID with
728 * its associated parameters from HW DB,and locally. The caller needs to
729 * hold scheduler lock.
730 */
731static int
732ice_sched_del_rl_profile(struct ice_hw *hw,
733 struct ice_aqc_rl_profile_info *rl_info)
734{
735 struct ice_aqc_rl_profile_elem *buf;
736 u16 num_profiles_removed;
737 u16 num_profiles = 1;
738 int status;
739
740 if (rl_info->prof_id_ref != 0)
741 return -EBUSY;
742
743 /* Safe to remove profile ID */
744 buf = &rl_info->profile;
745 status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf),
746 &num_profiles_removed, NULL);
747 if (status || num_profiles_removed != num_profiles)
748 return -EIO;
749
750 /* Delete stale entry now */
751 list_del(&rl_info->list_entry);
752 devm_kfree(ice_hw_to_dev(hw), rl_info);
753 return status;
754}
755
756/**
757 * ice_sched_clear_rl_prof - clears RL prof entries
758 * @pi: port information structure
759 *
760 * This function removes all RL profile from HW as well as from SW DB.
761 */
762static void ice_sched_clear_rl_prof(struct ice_port_info *pi)
763{
764 u16 ln;
765
766 for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {
767 struct ice_aqc_rl_profile_info *rl_prof_elem;
768 struct ice_aqc_rl_profile_info *rl_prof_tmp;
769
770 list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp,
771 &pi->rl_prof_list[ln], list_entry) {
772 struct ice_hw *hw = pi->hw;
773 int status;
774
775 rl_prof_elem->prof_id_ref = 0;
776 status = ice_sched_del_rl_profile(hw, rl_prof_elem);
777 if (status) {
778 ice_debug(hw, ICE_DBG_SCHED, "Remove rl profile failed\n");
779 /* On error, free mem required */
780 list_del(&rl_prof_elem->list_entry);
781 devm_kfree(ice_hw_to_dev(hw), rl_prof_elem);
782 }
783 }
784 }
785}
786
787/**
788 * ice_sched_clear_agg - clears the aggregator related information
789 * @hw: pointer to the hardware structure
790 *
791 * This function removes aggregator list and free up aggregator related memory
792 * previously allocated.
793 */
794void ice_sched_clear_agg(struct ice_hw *hw)
795{
796 struct ice_sched_agg_info *agg_info;
797 struct ice_sched_agg_info *atmp;
798
799 list_for_each_entry_safe(agg_info, atmp, &hw->agg_list, list_entry) {
800 struct ice_sched_agg_vsi_info *agg_vsi_info;
801 struct ice_sched_agg_vsi_info *vtmp;
802
803 list_for_each_entry_safe(agg_vsi_info, vtmp,
804 &agg_info->agg_vsi_list, list_entry) {
805 list_del(&agg_vsi_info->list_entry);
806 devm_kfree(ice_hw_to_dev(hw), agg_vsi_info);
807 }
808 list_del(&agg_info->list_entry);
809 devm_kfree(ice_hw_to_dev(hw), agg_info);
810 }
811}
812
813/**
814 * ice_sched_clear_tx_topo - clears the scheduler tree nodes
815 * @pi: port information structure
816 *
817 * This function removes all the nodes from HW as well as from SW DB.
818 */
819static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
820{
821 if (!pi)
822 return;
823 /* remove RL profiles related lists */
824 ice_sched_clear_rl_prof(pi);
825 if (pi->root) {
826 ice_free_sched_node(pi, pi->root);
827 pi->root = NULL;
828 }
829}
830
831/**
832 * ice_sched_clear_port - clear the scheduler elements from SW DB for a port
833 * @pi: port information structure
834 *
835 * Cleanup scheduling elements from SW DB
836 */
837void ice_sched_clear_port(struct ice_port_info *pi)
838{
839 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
840 return;
841
842 pi->port_state = ICE_SCHED_PORT_STATE_INIT;
843 mutex_lock(&pi->sched_lock);
844 ice_sched_clear_tx_topo(pi);
845 mutex_unlock(&pi->sched_lock);
846 mutex_destroy(&pi->sched_lock);
847}
848
849/**
850 * ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports
851 * @hw: pointer to the HW struct
852 *
853 * Cleanup scheduling elements from SW DB for all the ports
854 */
855void ice_sched_cleanup_all(struct ice_hw *hw)
856{
857 if (!hw)
858 return;
859
860 devm_kfree(ice_hw_to_dev(hw), hw->layer_info);
861 hw->layer_info = NULL;
862
863 ice_sched_clear_port(hw->port_info);
864
865 hw->num_tx_sched_layers = 0;
866 hw->num_tx_sched_phys_layers = 0;
867 hw->flattened_layers = 0;
868 hw->max_cgds = 0;
869}
870
871/**
872 * ice_sched_add_elems - add nodes to HW and SW DB
873 * @pi: port information structure
874 * @tc_node: pointer to the branch node
875 * @parent: pointer to the parent node
876 * @layer: layer number to add nodes
877 * @num_nodes: number of nodes
878 * @num_nodes_added: pointer to num nodes added
879 * @first_node_teid: if new nodes are added then return the TEID of first node
880 * @prealloc_nodes: preallocated nodes struct for software DB
881 *
882 * This function add nodes to HW as well as to SW DB for a given layer
883 */
884int
885ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
886 struct ice_sched_node *parent, u8 layer, u16 num_nodes,
887 u16 *num_nodes_added, u32 *first_node_teid,
888 struct ice_sched_node **prealloc_nodes)
889{
890 struct ice_sched_node *prev, *new_node;
891 struct ice_aqc_add_elem *buf;
892 u16 i, num_groups_added = 0;
893 struct ice_hw *hw = pi->hw;
894 size_t buf_size;
895 int status = 0;
896 u32 teid;
897
898 buf_size = struct_size(buf, generic, num_nodes);
899 buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
900 if (!buf)
901 return -ENOMEM;
902
903 buf->hdr.parent_teid = parent->info.node_teid;
904 buf->hdr.num_elems = cpu_to_le16(num_nodes);
905 for (i = 0; i < num_nodes; i++) {
906 buf->generic[i].parent_teid = parent->info.node_teid;
907 buf->generic[i].data.elem_type = ICE_AQC_ELEM_TYPE_SE_GENERIC;
908 buf->generic[i].data.valid_sections =
909 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
910 ICE_AQC_ELEM_VALID_EIR;
911 buf->generic[i].data.generic = 0;
912 buf->generic[i].data.cir_bw.bw_profile_idx =
913 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
914 buf->generic[i].data.cir_bw.bw_alloc =
915 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
916 buf->generic[i].data.eir_bw.bw_profile_idx =
917 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
918 buf->generic[i].data.eir_bw.bw_alloc =
919 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
920 }
921
922 status = ice_aq_add_sched_elems(hw, 1, buf, buf_size,
923 &num_groups_added, NULL);
924 if (status || num_groups_added != 1) {
925 ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n",
926 hw->adminq.sq_last_status);
927 devm_kfree(ice_hw_to_dev(hw), buf);
928 return -EIO;
929 }
930
931 *num_nodes_added = num_nodes;
932 /* add nodes to the SW DB */
933 for (i = 0; i < num_nodes; i++) {
934 if (prealloc_nodes)
935 status = ice_sched_add_node(pi, layer, &buf->generic[i], prealloc_nodes[i]);
936 else
937 status = ice_sched_add_node(pi, layer, &buf->generic[i], NULL);
938
939 if (status) {
940 ice_debug(hw, ICE_DBG_SCHED, "add nodes in SW DB failed status =%d\n",
941 status);
942 break;
943 }
944
945 teid = le32_to_cpu(buf->generic[i].node_teid);
946 new_node = ice_sched_find_node_by_teid(parent, teid);
947 if (!new_node) {
948 ice_debug(hw, ICE_DBG_SCHED, "Node is missing for teid =%d\n", teid);
949 break;
950 }
951
952 new_node->sibling = NULL;
953 new_node->tc_num = tc_node->tc_num;
954 new_node->tx_weight = ICE_SCHED_DFLT_BW_WT;
955 new_node->tx_share = ICE_SCHED_DFLT_BW;
956 new_node->tx_max = ICE_SCHED_DFLT_BW;
957 new_node->name = kzalloc(SCHED_NODE_NAME_MAX_LEN, GFP_KERNEL);
958 if (!new_node->name)
959 return -ENOMEM;
960
961 status = xa_alloc(&pi->sched_node_ids, &new_node->id, NULL, XA_LIMIT(0, UINT_MAX),
962 GFP_KERNEL);
963 if (status) {
964 ice_debug(hw, ICE_DBG_SCHED, "xa_alloc failed for sched node status =%d\n",
965 status);
966 break;
967 }
968
969 snprintf(new_node->name, SCHED_NODE_NAME_MAX_LEN, "node_%u", new_node->id);
970
971 /* add it to previous node sibling pointer */
972 /* Note: siblings are not linked across branches */
973 prev = ice_sched_get_first_node(pi, tc_node, layer);
974 if (prev && prev != new_node) {
975 while (prev->sibling)
976 prev = prev->sibling;
977 prev->sibling = new_node;
978 }
979
980 /* initialize the sibling head */
981 if (!pi->sib_head[tc_node->tc_num][layer])
982 pi->sib_head[tc_node->tc_num][layer] = new_node;
983
984 if (i == 0)
985 *first_node_teid = teid;
986 }
987
988 devm_kfree(ice_hw_to_dev(hw), buf);
989 return status;
990}
991
992/**
993 * ice_sched_add_nodes_to_hw_layer - Add nodes to HW layer
994 * @pi: port information structure
995 * @tc_node: pointer to TC node
996 * @parent: pointer to parent node
997 * @layer: layer number to add nodes
998 * @num_nodes: number of nodes to be added
999 * @first_node_teid: pointer to the first node TEID
1000 * @num_nodes_added: pointer to number of nodes added
1001 *
1002 * Add nodes into specific HW layer.
1003 */
1004static int
1005ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
1006 struct ice_sched_node *tc_node,
1007 struct ice_sched_node *parent, u8 layer,
1008 u16 num_nodes, u32 *first_node_teid,
1009 u16 *num_nodes_added)
1010{
1011 u16 max_child_nodes;
1012
1013 *num_nodes_added = 0;
1014
1015 if (!num_nodes)
1016 return 0;
1017
1018 if (!parent || layer < pi->hw->sw_entry_point_layer)
1019 return -EINVAL;
1020
1021 /* max children per node per layer */
1022 max_child_nodes = pi->hw->max_children[parent->tx_sched_layer];
1023
1024 /* current number of children + required nodes exceed max children */
1025 if ((parent->num_children + num_nodes) > max_child_nodes) {
1026 /* Fail if the parent is a TC node */
1027 if (parent == tc_node)
1028 return -EIO;
1029 return -ENOSPC;
1030 }
1031
1032 return ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes,
1033 num_nodes_added, first_node_teid, NULL);
1034}
1035
1036/**
1037 * ice_sched_add_nodes_to_layer - Add nodes to a given layer
1038 * @pi: port information structure
1039 * @tc_node: pointer to TC node
1040 * @parent: pointer to parent node
1041 * @layer: layer number to add nodes
1042 * @num_nodes: number of nodes to be added
1043 * @first_node_teid: pointer to the first node TEID
1044 * @num_nodes_added: pointer to number of nodes added
1045 *
1046 * This function add nodes to a given layer.
1047 */
1048int
1049ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
1050 struct ice_sched_node *tc_node,
1051 struct ice_sched_node *parent, u8 layer,
1052 u16 num_nodes, u32 *first_node_teid,
1053 u16 *num_nodes_added)
1054{
1055 u32 *first_teid_ptr = first_node_teid;
1056 u16 new_num_nodes = num_nodes;
1057 int status = 0;
1058
1059 *num_nodes_added = 0;
1060 while (*num_nodes_added < num_nodes) {
1061 u16 max_child_nodes, num_added = 0;
1062 u32 temp;
1063
1064 status = ice_sched_add_nodes_to_hw_layer(pi, tc_node, parent,
1065 layer, new_num_nodes,
1066 first_teid_ptr,
1067 &num_added);
1068 if (!status)
1069 *num_nodes_added += num_added;
1070 /* added more nodes than requested ? */
1071 if (*num_nodes_added > num_nodes) {
1072 ice_debug(pi->hw, ICE_DBG_SCHED, "added extra nodes %d %d\n", num_nodes,
1073 *num_nodes_added);
1074 status = -EIO;
1075 break;
1076 }
1077 /* break if all the nodes are added successfully */
1078 if (!status && (*num_nodes_added == num_nodes))
1079 break;
1080 /* break if the error is not max limit */
1081 if (status && status != -ENOSPC)
1082 break;
1083 /* Exceeded the max children */
1084 max_child_nodes = pi->hw->max_children[parent->tx_sched_layer];
1085 /* utilize all the spaces if the parent is not full */
1086 if (parent->num_children < max_child_nodes) {
1087 new_num_nodes = max_child_nodes - parent->num_children;
1088 } else {
1089 /* This parent is full, try the next sibling */
1090 parent = parent->sibling;
1091 /* Don't modify the first node TEID memory if the
1092 * first node was added already in the above call.
1093 * Instead send some temp memory for all other
1094 * recursive calls.
1095 */
1096 if (num_added)
1097 first_teid_ptr = &temp;
1098
1099 new_num_nodes = num_nodes - *num_nodes_added;
1100 }
1101 }
1102 return status;
1103}
1104
1105/**
1106 * ice_sched_get_qgrp_layer - get the current queue group layer number
1107 * @hw: pointer to the HW struct
1108 *
1109 * This function returns the current queue group layer number
1110 */
1111static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw)
1112{
1113 /* It's always total layers - 1, the array is 0 relative so -2 */
1114 return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET;
1115}
1116
1117/**
1118 * ice_sched_get_vsi_layer - get the current VSI layer number
1119 * @hw: pointer to the HW struct
1120 *
1121 * This function returns the current VSI layer number
1122 */
1123u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
1124{
1125 /* Num Layers VSI layer
1126 * 9 6
1127 * 7 4
1128 * 5 or less sw_entry_point_layer
1129 */
1130 /* calculate the VSI layer based on number of layers. */
1131 if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) {
1132 u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
1133
1134 if (layer > hw->sw_entry_point_layer)
1135 return layer;
1136 }
1137 return hw->sw_entry_point_layer;
1138}
1139
1140/**
1141 * ice_sched_get_agg_layer - get the current aggregator layer number
1142 * @hw: pointer to the HW struct
1143 *
1144 * This function returns the current aggregator layer number
1145 */
1146u8 ice_sched_get_agg_layer(struct ice_hw *hw)
1147{
1148 /* Num Layers aggregator layer
1149 * 9 4
1150 * 7 or less sw_entry_point_layer
1151 */
1152 /* calculate the aggregator layer based on number of layers. */
1153 if (hw->num_tx_sched_layers > ICE_AGG_LAYER_OFFSET + 1) {
1154 u8 layer = hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET;
1155
1156 if (layer > hw->sw_entry_point_layer)
1157 return layer;
1158 }
1159 return hw->sw_entry_point_layer;
1160}
1161
1162/**
1163 * ice_rm_dflt_leaf_node - remove the default leaf node in the tree
1164 * @pi: port information structure
1165 *
1166 * This function removes the leaf node that was created by the FW
1167 * during initialization
1168 */
1169static void ice_rm_dflt_leaf_node(struct ice_port_info *pi)
1170{
1171 struct ice_sched_node *node;
1172
1173 node = pi->root;
1174 while (node) {
1175 if (!node->num_children)
1176 break;
1177 node = node->children[0];
1178 }
1179 if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) {
1180 u32 teid = le32_to_cpu(node->info.node_teid);
1181 int status;
1182
1183 /* remove the default leaf node */
1184 status = ice_sched_remove_elems(pi->hw, node->parent, teid);
1185 if (!status)
1186 ice_free_sched_node(pi, node);
1187 }
1188}
1189
1190/**
1191 * ice_sched_rm_dflt_nodes - free the default nodes in the tree
1192 * @pi: port information structure
1193 *
1194 * This function frees all the nodes except root and TC that were created by
1195 * the FW during initialization
1196 */
1197static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
1198{
1199 struct ice_sched_node *node;
1200
1201 ice_rm_dflt_leaf_node(pi);
1202
1203 /* remove the default nodes except TC and root nodes */
1204 node = pi->root;
1205 while (node) {
1206 if (node->tx_sched_layer >= pi->hw->sw_entry_point_layer &&
1207 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
1208 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT) {
1209 ice_free_sched_node(pi, node);
1210 break;
1211 }
1212
1213 if (!node->num_children)
1214 break;
1215 node = node->children[0];
1216 }
1217}
1218
1219/**
1220 * ice_sched_init_port - Initialize scheduler by querying information from FW
1221 * @pi: port info structure for the tree to cleanup
1222 *
1223 * This function is the initial call to find the total number of Tx scheduler
1224 * resources, default topology created by firmware and storing the information
1225 * in SW DB.
1226 */
1227int ice_sched_init_port(struct ice_port_info *pi)
1228{
1229 struct ice_aqc_get_topo_elem *buf;
1230 struct ice_hw *hw;
1231 u8 num_branches;
1232 u16 num_elems;
1233 int status;
1234 u8 i, j;
1235
1236 if (!pi)
1237 return -EINVAL;
1238 hw = pi->hw;
1239
1240 /* Query the Default Topology from FW */
1241 buf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
1242 if (!buf)
1243 return -ENOMEM;
1244
1245 /* Query default scheduling tree topology */
1246 status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN,
1247 &num_branches, NULL);
1248 if (status)
1249 goto err_init_port;
1250
1251 /* num_branches should be between 1-8 */
1252 if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) {
1253 ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n",
1254 num_branches);
1255 status = -EINVAL;
1256 goto err_init_port;
1257 }
1258
1259 /* get the number of elements on the default/first branch */
1260 num_elems = le16_to_cpu(buf[0].hdr.num_elems);
1261
1262 /* num_elems should always be between 1-9 */
1263 if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) {
1264 ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n",
1265 num_elems);
1266 status = -EINVAL;
1267 goto err_init_port;
1268 }
1269
1270 /* If the last node is a leaf node then the index of the queue group
1271 * layer is two less than the number of elements.
1272 */
1273 if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type ==
1274 ICE_AQC_ELEM_TYPE_LEAF)
1275 pi->last_node_teid =
1276 le32_to_cpu(buf[0].generic[num_elems - 2].node_teid);
1277 else
1278 pi->last_node_teid =
1279 le32_to_cpu(buf[0].generic[num_elems - 1].node_teid);
1280
1281 /* Insert the Tx Sched root node */
1282 status = ice_sched_add_root_node(pi, &buf[0].generic[0]);
1283 if (status)
1284 goto err_init_port;
1285
1286 /* Parse the default tree and cache the information */
1287 for (i = 0; i < num_branches; i++) {
1288 num_elems = le16_to_cpu(buf[i].hdr.num_elems);
1289
1290 /* Skip root element as already inserted */
1291 for (j = 1; j < num_elems; j++) {
1292 /* update the sw entry point */
1293 if (buf[0].generic[j].data.elem_type ==
1294 ICE_AQC_ELEM_TYPE_ENTRY_POINT)
1295 hw->sw_entry_point_layer = j;
1296
1297 status = ice_sched_add_node(pi, j, &buf[i].generic[j], NULL);
1298 if (status)
1299 goto err_init_port;
1300 }
1301 }
1302
1303 /* Remove the default nodes. */
1304 if (pi->root)
1305 ice_sched_rm_dflt_nodes(pi);
1306
1307 /* initialize the port for handling the scheduler tree */
1308 pi->port_state = ICE_SCHED_PORT_STATE_READY;
1309 mutex_init(&pi->sched_lock);
1310 for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
1311 INIT_LIST_HEAD(&pi->rl_prof_list[i]);
1312
1313err_init_port:
1314 if (status && pi->root) {
1315 ice_free_sched_node(pi, pi->root);
1316 pi->root = NULL;
1317 }
1318
1319 kfree(buf);
1320 return status;
1321}
1322
1323/**
1324 * ice_sched_query_res_alloc - query the FW for num of logical sched layers
1325 * @hw: pointer to the HW struct
1326 *
1327 * query FW for allocated scheduler resources and store in HW struct
1328 */
1329int ice_sched_query_res_alloc(struct ice_hw *hw)
1330{
1331 struct ice_aqc_query_txsched_res_resp *buf;
1332 __le16 max_sibl;
1333 int status = 0;
1334 u16 i;
1335
1336 if (hw->layer_info)
1337 return status;
1338
1339 buf = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*buf), GFP_KERNEL);
1340 if (!buf)
1341 return -ENOMEM;
1342
1343 status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL);
1344 if (status)
1345 goto sched_query_out;
1346
1347 hw->num_tx_sched_layers = le16_to_cpu(buf->sched_props.logical_levels);
1348 hw->num_tx_sched_phys_layers =
1349 le16_to_cpu(buf->sched_props.phys_levels);
1350 hw->flattened_layers = buf->sched_props.flattening_bitmap;
1351 hw->max_cgds = buf->sched_props.max_pf_cgds;
1352
1353 /* max sibling group size of current layer refers to the max children
1354 * of the below layer node.
1355 * layer 1 node max children will be layer 2 max sibling group size
1356 * layer 2 node max children will be layer 3 max sibling group size
1357 * and so on. This array will be populated from root (index 0) to
1358 * qgroup layer 7. Leaf node has no children.
1359 */
1360 for (i = 0; i < hw->num_tx_sched_layers - 1; i++) {
1361 max_sibl = buf->layer_props[i + 1].max_sibl_grp_sz;
1362 hw->max_children[i] = le16_to_cpu(max_sibl);
1363 }
1364
1365 hw->layer_info = devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props,
1366 (hw->num_tx_sched_layers *
1367 sizeof(*hw->layer_info)),
1368 GFP_KERNEL);
1369 if (!hw->layer_info) {
1370 status = -ENOMEM;
1371 goto sched_query_out;
1372 }
1373
1374sched_query_out:
1375 devm_kfree(ice_hw_to_dev(hw), buf);
1376 return status;
1377}
1378
1379/**
1380 * ice_sched_get_psm_clk_freq - determine the PSM clock frequency
1381 * @hw: pointer to the HW struct
1382 *
1383 * Determine the PSM clock frequency and store in HW struct
1384 */
1385void ice_sched_get_psm_clk_freq(struct ice_hw *hw)
1386{
1387 u32 val, clk_src;
1388
1389 val = rd32(hw, GLGEN_CLKSTAT_SRC);
1390 clk_src = FIELD_GET(GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M, val);
1391
1392#define PSM_CLK_SRC_367_MHZ 0x0
1393#define PSM_CLK_SRC_416_MHZ 0x1
1394#define PSM_CLK_SRC_446_MHZ 0x2
1395#define PSM_CLK_SRC_390_MHZ 0x3
1396
1397 switch (clk_src) {
1398 case PSM_CLK_SRC_367_MHZ:
1399 hw->psm_clk_freq = ICE_PSM_CLK_367MHZ_IN_HZ;
1400 break;
1401 case PSM_CLK_SRC_416_MHZ:
1402 hw->psm_clk_freq = ICE_PSM_CLK_416MHZ_IN_HZ;
1403 break;
1404 case PSM_CLK_SRC_446_MHZ:
1405 hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ;
1406 break;
1407 case PSM_CLK_SRC_390_MHZ:
1408 hw->psm_clk_freq = ICE_PSM_CLK_390MHZ_IN_HZ;
1409 break;
1410 default:
1411 ice_debug(hw, ICE_DBG_SCHED, "PSM clk_src unexpected %u\n",
1412 clk_src);
1413 /* fall back to a safe default */
1414 hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ;
1415 }
1416}
1417
1418/**
1419 * ice_sched_find_node_in_subtree - Find node in part of base node subtree
1420 * @hw: pointer to the HW struct
1421 * @base: pointer to the base node
1422 * @node: pointer to the node to search
1423 *
1424 * This function checks whether a given node is part of the base node
1425 * subtree or not
1426 */
1427static bool
1428ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
1429 struct ice_sched_node *node)
1430{
1431 u8 i;
1432
1433 for (i = 0; i < base->num_children; i++) {
1434 struct ice_sched_node *child = base->children[i];
1435
1436 if (node == child)
1437 return true;
1438
1439 if (child->tx_sched_layer > node->tx_sched_layer)
1440 return false;
1441
1442 /* this recursion is intentional, and wouldn't
1443 * go more than 8 calls
1444 */
1445 if (ice_sched_find_node_in_subtree(hw, child, node))
1446 return true;
1447 }
1448 return false;
1449}
1450
1451/**
1452 * ice_sched_get_free_qgrp - Scan all queue group siblings and find a free node
1453 * @pi: port information structure
1454 * @vsi_node: software VSI handle
1455 * @qgrp_node: first queue group node identified for scanning
1456 * @owner: LAN or RDMA
1457 *
1458 * This function retrieves a free LAN or RDMA queue group node by scanning
1459 * qgrp_node and its siblings for the queue group with the fewest number
1460 * of queues currently assigned.
1461 */
1462static struct ice_sched_node *
1463ice_sched_get_free_qgrp(struct ice_port_info *pi,
1464 struct ice_sched_node *vsi_node,
1465 struct ice_sched_node *qgrp_node, u8 owner)
1466{
1467 struct ice_sched_node *min_qgrp;
1468 u8 min_children;
1469
1470 if (!qgrp_node)
1471 return qgrp_node;
1472 min_children = qgrp_node->num_children;
1473 if (!min_children)
1474 return qgrp_node;
1475 min_qgrp = qgrp_node;
1476 /* scan all queue groups until find a node which has less than the
1477 * minimum number of children. This way all queue group nodes get
1478 * equal number of shares and active. The bandwidth will be equally
1479 * distributed across all queues.
1480 */
1481 while (qgrp_node) {
1482 /* make sure the qgroup node is part of the VSI subtree */
1483 if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
1484 if (qgrp_node->num_children < min_children &&
1485 qgrp_node->owner == owner) {
1486 /* replace the new min queue group node */
1487 min_qgrp = qgrp_node;
1488 min_children = min_qgrp->num_children;
1489 /* break if it has no children, */
1490 if (!min_children)
1491 break;
1492 }
1493 qgrp_node = qgrp_node->sibling;
1494 }
1495 return min_qgrp;
1496}
1497
1498/**
1499 * ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node
1500 * @pi: port information structure
1501 * @vsi_handle: software VSI handle
1502 * @tc: branch number
1503 * @owner: LAN or RDMA
1504 *
1505 * This function retrieves a free LAN or RDMA queue group node
1506 */
1507struct ice_sched_node *
1508ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
1509 u8 owner)
1510{
1511 struct ice_sched_node *vsi_node, *qgrp_node;
1512 struct ice_vsi_ctx *vsi_ctx;
1513 u16 max_children;
1514 u8 qgrp_layer;
1515
1516 qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
1517 max_children = pi->hw->max_children[qgrp_layer];
1518
1519 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
1520 if (!vsi_ctx)
1521 return NULL;
1522 vsi_node = vsi_ctx->sched.vsi_node[tc];
1523 /* validate invalid VSI ID */
1524 if (!vsi_node)
1525 return NULL;
1526
1527 /* get the first queue group node from VSI sub-tree */
1528 qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
1529 while (qgrp_node) {
1530 /* make sure the qgroup node is part of the VSI subtree */
1531 if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
1532 if (qgrp_node->num_children < max_children &&
1533 qgrp_node->owner == owner)
1534 break;
1535 qgrp_node = qgrp_node->sibling;
1536 }
1537
1538 /* Select the best queue group */
1539 return ice_sched_get_free_qgrp(pi, vsi_node, qgrp_node, owner);
1540}
1541
1542/**
1543 * ice_sched_get_vsi_node - Get a VSI node based on VSI ID
1544 * @pi: pointer to the port information structure
1545 * @tc_node: pointer to the TC node
1546 * @vsi_handle: software VSI handle
1547 *
1548 * This function retrieves a VSI node for a given VSI ID from a given
1549 * TC branch
1550 */
1551static struct ice_sched_node *
1552ice_sched_get_vsi_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
1553 u16 vsi_handle)
1554{
1555 struct ice_sched_node *node;
1556 u8 vsi_layer;
1557
1558 vsi_layer = ice_sched_get_vsi_layer(pi->hw);
1559 node = ice_sched_get_first_node(pi, tc_node, vsi_layer);
1560
1561 /* Check whether it already exists */
1562 while (node) {
1563 if (node->vsi_handle == vsi_handle)
1564 return node;
1565 node = node->sibling;
1566 }
1567
1568 return node;
1569}
1570
1571/**
1572 * ice_sched_get_agg_node - Get an aggregator node based on aggregator ID
1573 * @pi: pointer to the port information structure
1574 * @tc_node: pointer to the TC node
1575 * @agg_id: aggregator ID
1576 *
1577 * This function retrieves an aggregator node for a given aggregator ID from
1578 * a given TC branch
1579 */
1580struct ice_sched_node *
1581ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
1582 u32 agg_id)
1583{
1584 struct ice_sched_node *node;
1585 struct ice_hw *hw = pi->hw;
1586 u8 agg_layer;
1587
1588 if (!hw)
1589 return NULL;
1590 agg_layer = ice_sched_get_agg_layer(hw);
1591 node = ice_sched_get_first_node(pi, tc_node, agg_layer);
1592
1593 /* Check whether it already exists */
1594 while (node) {
1595 if (node->agg_id == agg_id)
1596 return node;
1597 node = node->sibling;
1598 }
1599
1600 return node;
1601}
1602
1603/**
1604 * ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes
1605 * @hw: pointer to the HW struct
1606 * @num_qs: number of queues
1607 * @num_nodes: num nodes array
1608 *
1609 * This function calculates the number of VSI child nodes based on the
1610 * number of queues.
1611 */
1612static void
1613ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
1614{
1615 u16 num = num_qs;
1616 u8 i, qgl, vsil;
1617
1618 qgl = ice_sched_get_qgrp_layer(hw);
1619 vsil = ice_sched_get_vsi_layer(hw);
1620
1621 /* calculate num nodes from queue group to VSI layer */
1622 for (i = qgl; i > vsil; i--) {
1623 /* round to the next integer if there is a remainder */
1624 num = DIV_ROUND_UP(num, hw->max_children[i]);
1625
1626 /* need at least one node */
1627 num_nodes[i] = num ? num : 1;
1628 }
1629}
1630
1631/**
1632 * ice_sched_add_vsi_child_nodes - add VSI child nodes to tree
1633 * @pi: port information structure
1634 * @vsi_handle: software VSI handle
1635 * @tc_node: pointer to the TC node
1636 * @num_nodes: pointer to the num nodes that needs to be added per layer
1637 * @owner: node owner (LAN or RDMA)
1638 *
1639 * This function adds the VSI child nodes to tree. It gets called for
1640 * LAN and RDMA separately.
1641 */
1642static int
1643ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
1644 struct ice_sched_node *tc_node, u16 *num_nodes,
1645 u8 owner)
1646{
1647 struct ice_sched_node *parent, *node;
1648 struct ice_hw *hw = pi->hw;
1649 u32 first_node_teid;
1650 u16 num_added = 0;
1651 u8 i, qgl, vsil;
1652
1653 qgl = ice_sched_get_qgrp_layer(hw);
1654 vsil = ice_sched_get_vsi_layer(hw);
1655 parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
1656 for (i = vsil + 1; i <= qgl; i++) {
1657 int status;
1658
1659 if (!parent)
1660 return -EIO;
1661
1662 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
1663 num_nodes[i],
1664 &first_node_teid,
1665 &num_added);
1666 if (status || num_nodes[i] != num_added)
1667 return -EIO;
1668
1669 /* The newly added node can be a new parent for the next
1670 * layer nodes
1671 */
1672 if (num_added) {
1673 parent = ice_sched_find_node_by_teid(tc_node,
1674 first_node_teid);
1675 node = parent;
1676 while (node) {
1677 node->owner = owner;
1678 node = node->sibling;
1679 }
1680 } else {
1681 parent = parent->children[0];
1682 }
1683 }
1684
1685 return 0;
1686}
1687
1688/**
1689 * ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes
1690 * @pi: pointer to the port info structure
1691 * @tc_node: pointer to TC node
1692 * @num_nodes: pointer to num nodes array
1693 *
1694 * This function calculates the number of supported nodes needed to add this
1695 * VSI into Tx tree including the VSI, parent and intermediate nodes in below
1696 * layers
1697 */
1698static void
1699ice_sched_calc_vsi_support_nodes(struct ice_port_info *pi,
1700 struct ice_sched_node *tc_node, u16 *num_nodes)
1701{
1702 struct ice_sched_node *node;
1703 u8 vsil;
1704 int i;
1705
1706 vsil = ice_sched_get_vsi_layer(pi->hw);
1707 for (i = vsil; i >= pi->hw->sw_entry_point_layer; i--)
1708 /* Add intermediate nodes if TC has no children and
1709 * need at least one node for VSI
1710 */
1711 if (!tc_node->num_children || i == vsil) {
1712 num_nodes[i]++;
1713 } else {
1714 /* If intermediate nodes are reached max children
1715 * then add a new one.
1716 */
1717 node = ice_sched_get_first_node(pi, tc_node, (u8)i);
1718 /* scan all the siblings */
1719 while (node) {
1720 if (node->num_children < pi->hw->max_children[i])
1721 break;
1722 node = node->sibling;
1723 }
1724
1725 /* tree has one intermediate node to add this new VSI.
1726 * So no need to calculate supported nodes for below
1727 * layers.
1728 */
1729 if (node)
1730 break;
1731 /* all the nodes are full, allocate a new one */
1732 num_nodes[i]++;
1733 }
1734}
1735
1736/**
1737 * ice_sched_add_vsi_support_nodes - add VSI supported nodes into Tx tree
1738 * @pi: port information structure
1739 * @vsi_handle: software VSI handle
1740 * @tc_node: pointer to TC node
1741 * @num_nodes: pointer to num nodes array
1742 *
1743 * This function adds the VSI supported nodes into Tx tree including the
1744 * VSI, its parent and intermediate nodes in below layers
1745 */
1746static int
1747ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
1748 struct ice_sched_node *tc_node, u16 *num_nodes)
1749{
1750 struct ice_sched_node *parent = tc_node;
1751 u32 first_node_teid;
1752 u16 num_added = 0;
1753 u8 i, vsil;
1754
1755 if (!pi)
1756 return -EINVAL;
1757
1758 vsil = ice_sched_get_vsi_layer(pi->hw);
1759 for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
1760 int status;
1761
1762 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
1763 i, num_nodes[i],
1764 &first_node_teid,
1765 &num_added);
1766 if (status || num_nodes[i] != num_added)
1767 return -EIO;
1768
1769 /* The newly added node can be a new parent for the next
1770 * layer nodes
1771 */
1772 if (num_added)
1773 parent = ice_sched_find_node_by_teid(tc_node,
1774 first_node_teid);
1775 else
1776 parent = parent->children[0];
1777
1778 if (!parent)
1779 return -EIO;
1780
1781 if (i == vsil)
1782 parent->vsi_handle = vsi_handle;
1783 }
1784
1785 return 0;
1786}
1787
1788/**
1789 * ice_sched_add_vsi_to_topo - add a new VSI into tree
1790 * @pi: port information structure
1791 * @vsi_handle: software VSI handle
1792 * @tc: TC number
1793 *
1794 * This function adds a new VSI into scheduler tree
1795 */
1796static int
1797ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
1798{
1799 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
1800 struct ice_sched_node *tc_node;
1801
1802 tc_node = ice_sched_get_tc_node(pi, tc);
1803 if (!tc_node)
1804 return -EINVAL;
1805
1806 /* calculate number of supported nodes needed for this VSI */
1807 ice_sched_calc_vsi_support_nodes(pi, tc_node, num_nodes);
1808
1809 /* add VSI supported nodes to TC subtree */
1810 return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node,
1811 num_nodes);
1812}
1813
1814/**
1815 * ice_sched_update_vsi_child_nodes - update VSI child nodes
1816 * @pi: port information structure
1817 * @vsi_handle: software VSI handle
1818 * @tc: TC number
1819 * @new_numqs: new number of max queues
1820 * @owner: owner of this subtree
1821 *
1822 * This function updates the VSI child nodes based on the number of queues
1823 */
1824static int
1825ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
1826 u8 tc, u16 new_numqs, u8 owner)
1827{
1828 u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
1829 struct ice_sched_node *vsi_node;
1830 struct ice_sched_node *tc_node;
1831 struct ice_vsi_ctx *vsi_ctx;
1832 struct ice_hw *hw = pi->hw;
1833 u16 prev_numqs;
1834 int status = 0;
1835
1836 tc_node = ice_sched_get_tc_node(pi, tc);
1837 if (!tc_node)
1838 return -EIO;
1839
1840 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
1841 if (!vsi_node)
1842 return -EIO;
1843
1844 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1845 if (!vsi_ctx)
1846 return -EINVAL;
1847
1848 if (owner == ICE_SCHED_NODE_OWNER_LAN)
1849 prev_numqs = vsi_ctx->sched.max_lanq[tc];
1850 else
1851 prev_numqs = vsi_ctx->sched.max_rdmaq[tc];
1852 /* num queues are not changed or less than the previous number */
1853 if (new_numqs <= prev_numqs)
1854 return status;
1855 if (owner == ICE_SCHED_NODE_OWNER_LAN) {
1856 status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);
1857 if (status)
1858 return status;
1859 } else {
1860 status = ice_alloc_rdma_q_ctx(hw, vsi_handle, tc, new_numqs);
1861 if (status)
1862 return status;
1863 }
1864
1865 if (new_numqs)
1866 ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
1867 /* Keep the max number of queue configuration all the time. Update the
1868 * tree only if number of queues > previous number of queues. This may
1869 * leave some extra nodes in the tree if number of queues < previous
1870 * number but that wouldn't harm anything. Removing those extra nodes
1871 * may complicate the code if those nodes are part of SRL or
1872 * individually rate limited.
1873 */
1874 status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
1875 new_num_nodes, owner);
1876 if (status)
1877 return status;
1878 if (owner == ICE_SCHED_NODE_OWNER_LAN)
1879 vsi_ctx->sched.max_lanq[tc] = new_numqs;
1880 else
1881 vsi_ctx->sched.max_rdmaq[tc] = new_numqs;
1882
1883 return 0;
1884}
1885
1886/**
1887 * ice_sched_cfg_vsi - configure the new/existing VSI
1888 * @pi: port information structure
1889 * @vsi_handle: software VSI handle
1890 * @tc: TC number
1891 * @maxqs: max number of queues
1892 * @owner: LAN or RDMA
1893 * @enable: TC enabled or disabled
1894 *
1895 * This function adds/updates VSI nodes based on the number of queues. If TC is
1896 * enabled and VSI is in suspended state then resume the VSI back. If TC is
1897 * disabled then suspend the VSI if it is not already.
1898 */
1899int
1900ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
1901 u8 owner, bool enable)
1902{
1903 struct ice_sched_node *vsi_node, *tc_node;
1904 struct ice_vsi_ctx *vsi_ctx;
1905 struct ice_hw *hw = pi->hw;
1906 int status = 0;
1907
1908 ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle);
1909 tc_node = ice_sched_get_tc_node(pi, tc);
1910 if (!tc_node)
1911 return -EINVAL;
1912 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1913 if (!vsi_ctx)
1914 return -EINVAL;
1915 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
1916
1917 /* suspend the VSI if TC is not enabled */
1918 if (!enable) {
1919 if (vsi_node && vsi_node->in_use) {
1920 u32 teid = le32_to_cpu(vsi_node->info.node_teid);
1921
1922 status = ice_sched_suspend_resume_elems(hw, 1, &teid,
1923 true);
1924 if (!status)
1925 vsi_node->in_use = false;
1926 }
1927 return status;
1928 }
1929
1930 /* TC is enabled, if it is a new VSI then add it to the tree */
1931 if (!vsi_node) {
1932 status = ice_sched_add_vsi_to_topo(pi, vsi_handle, tc);
1933 if (status)
1934 return status;
1935
1936 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
1937 if (!vsi_node)
1938 return -EIO;
1939
1940 vsi_ctx->sched.vsi_node[tc] = vsi_node;
1941 vsi_node->in_use = true;
1942 /* invalidate the max queues whenever VSI gets added first time
1943 * into the scheduler tree (boot or after reset). We need to
1944 * recreate the child nodes all the time in these cases.
1945 */
1946 vsi_ctx->sched.max_lanq[tc] = 0;
1947 vsi_ctx->sched.max_rdmaq[tc] = 0;
1948 }
1949
1950 /* update the VSI child nodes */
1951 status = ice_sched_update_vsi_child_nodes(pi, vsi_handle, tc, maxqs,
1952 owner);
1953 if (status)
1954 return status;
1955
1956 /* TC is enabled, resume the VSI if it is in the suspend state */
1957 if (!vsi_node->in_use) {
1958 u32 teid = le32_to_cpu(vsi_node->info.node_teid);
1959
1960 status = ice_sched_suspend_resume_elems(hw, 1, &teid, false);
1961 if (!status)
1962 vsi_node->in_use = true;
1963 }
1964
1965 return status;
1966}
1967
1968/**
1969 * ice_sched_rm_agg_vsi_info - remove aggregator related VSI info entry
1970 * @pi: port information structure
1971 * @vsi_handle: software VSI handle
1972 *
1973 * This function removes single aggregator VSI info entry from
1974 * aggregator list.
1975 */
1976static void ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
1977{
1978 struct ice_sched_agg_info *agg_info;
1979 struct ice_sched_agg_info *atmp;
1980
1981 list_for_each_entry_safe(agg_info, atmp, &pi->hw->agg_list,
1982 list_entry) {
1983 struct ice_sched_agg_vsi_info *agg_vsi_info;
1984 struct ice_sched_agg_vsi_info *vtmp;
1985
1986 list_for_each_entry_safe(agg_vsi_info, vtmp,
1987 &agg_info->agg_vsi_list, list_entry)
1988 if (agg_vsi_info->vsi_handle == vsi_handle) {
1989 list_del(&agg_vsi_info->list_entry);
1990 devm_kfree(ice_hw_to_dev(pi->hw),
1991 agg_vsi_info);
1992 return;
1993 }
1994 }
1995}
1996
1997/**
1998 * ice_sched_is_leaf_node_present - check for a leaf node in the sub-tree
1999 * @node: pointer to the sub-tree node
2000 *
2001 * This function checks for a leaf node presence in a given sub-tree node.
2002 */
2003static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node)
2004{
2005 u8 i;
2006
2007 for (i = 0; i < node->num_children; i++)
2008 if (ice_sched_is_leaf_node_present(node->children[i]))
2009 return true;
2010 /* check for a leaf node */
2011 return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF);
2012}
2013
2014/**
2015 * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes
2016 * @pi: port information structure
2017 * @vsi_handle: software VSI handle
2018 * @owner: LAN or RDMA
2019 *
2020 * This function removes the VSI and its LAN or RDMA children nodes from the
2021 * scheduler tree.
2022 */
2023static int
2024ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
2025{
2026 struct ice_vsi_ctx *vsi_ctx;
2027 int status = -EINVAL;
2028 u8 i;
2029
2030 ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle);
2031 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
2032 return status;
2033 mutex_lock(&pi->sched_lock);
2034 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
2035 if (!vsi_ctx)
2036 goto exit_sched_rm_vsi_cfg;
2037
2038 ice_for_each_traffic_class(i) {
2039 struct ice_sched_node *vsi_node, *tc_node;
2040 u8 j = 0;
2041
2042 tc_node = ice_sched_get_tc_node(pi, i);
2043 if (!tc_node)
2044 continue;
2045
2046 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
2047 if (!vsi_node)
2048 continue;
2049
2050 if (ice_sched_is_leaf_node_present(vsi_node)) {
2051 ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i);
2052 status = -EBUSY;
2053 goto exit_sched_rm_vsi_cfg;
2054 }
2055 while (j < vsi_node->num_children) {
2056 if (vsi_node->children[j]->owner == owner) {
2057 ice_free_sched_node(pi, vsi_node->children[j]);
2058
2059 /* reset the counter again since the num
2060 * children will be updated after node removal
2061 */
2062 j = 0;
2063 } else {
2064 j++;
2065 }
2066 }
2067 /* remove the VSI if it has no children */
2068 if (!vsi_node->num_children) {
2069 ice_free_sched_node(pi, vsi_node);
2070 vsi_ctx->sched.vsi_node[i] = NULL;
2071
2072 /* clean up aggregator related VSI info if any */
2073 ice_sched_rm_agg_vsi_info(pi, vsi_handle);
2074 }
2075 if (owner == ICE_SCHED_NODE_OWNER_LAN)
2076 vsi_ctx->sched.max_lanq[i] = 0;
2077 else
2078 vsi_ctx->sched.max_rdmaq[i] = 0;
2079 }
2080 status = 0;
2081
2082exit_sched_rm_vsi_cfg:
2083 mutex_unlock(&pi->sched_lock);
2084 return status;
2085}
2086
2087/**
2088 * ice_rm_vsi_lan_cfg - remove VSI and its LAN children nodes
2089 * @pi: port information structure
2090 * @vsi_handle: software VSI handle
2091 *
2092 * This function clears the VSI and its LAN children nodes from scheduler tree
2093 * for all TCs.
2094 */
2095int ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
2096{
2097 return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
2098}
2099
2100/**
2101 * ice_rm_vsi_rdma_cfg - remove VSI and its RDMA children nodes
2102 * @pi: port information structure
2103 * @vsi_handle: software VSI handle
2104 *
2105 * This function clears the VSI and its RDMA children nodes from scheduler tree
2106 * for all TCs.
2107 */
2108int ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle)
2109{
2110 return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_RDMA);
2111}
2112
2113/**
2114 * ice_get_agg_info - get the aggregator ID
2115 * @hw: pointer to the hardware structure
2116 * @agg_id: aggregator ID
2117 *
2118 * This function validates aggregator ID. The function returns info if
2119 * aggregator ID is present in list otherwise it returns null.
2120 */
2121static struct ice_sched_agg_info *
2122ice_get_agg_info(struct ice_hw *hw, u32 agg_id)
2123{
2124 struct ice_sched_agg_info *agg_info;
2125
2126 list_for_each_entry(agg_info, &hw->agg_list, list_entry)
2127 if (agg_info->agg_id == agg_id)
2128 return agg_info;
2129
2130 return NULL;
2131}
2132
2133/**
2134 * ice_sched_get_free_vsi_parent - Find a free parent node in aggregator subtree
2135 * @hw: pointer to the HW struct
2136 * @node: pointer to a child node
2137 * @num_nodes: num nodes count array
2138 *
2139 * This function walks through the aggregator subtree to find a free parent
2140 * node
2141 */
2142struct ice_sched_node *
2143ice_sched_get_free_vsi_parent(struct ice_hw *hw, struct ice_sched_node *node,
2144 u16 *num_nodes)
2145{
2146 u8 l = node->tx_sched_layer;
2147 u8 vsil, i;
2148
2149 vsil = ice_sched_get_vsi_layer(hw);
2150
2151 /* Is it VSI parent layer ? */
2152 if (l == vsil - 1)
2153 return (node->num_children < hw->max_children[l]) ? node : NULL;
2154
2155 /* We have intermediate nodes. Let's walk through the subtree. If the
2156 * intermediate node has space to add a new node then clear the count
2157 */
2158 if (node->num_children < hw->max_children[l])
2159 num_nodes[l] = 0;
2160 /* The below recursive call is intentional and wouldn't go more than
2161 * 2 or 3 iterations.
2162 */
2163
2164 for (i = 0; i < node->num_children; i++) {
2165 struct ice_sched_node *parent;
2166
2167 parent = ice_sched_get_free_vsi_parent(hw, node->children[i],
2168 num_nodes);
2169 if (parent)
2170 return parent;
2171 }
2172
2173 return NULL;
2174}
2175
2176/**
2177 * ice_sched_update_parent - update the new parent in SW DB
2178 * @new_parent: pointer to a new parent node
2179 * @node: pointer to a child node
2180 *
2181 * This function removes the child from the old parent and adds it to a new
2182 * parent
2183 */
2184void
2185ice_sched_update_parent(struct ice_sched_node *new_parent,
2186 struct ice_sched_node *node)
2187{
2188 struct ice_sched_node *old_parent;
2189 u8 i, j;
2190
2191 old_parent = node->parent;
2192
2193 /* update the old parent children */
2194 for (i = 0; i < old_parent->num_children; i++)
2195 if (old_parent->children[i] == node) {
2196 for (j = i + 1; j < old_parent->num_children; j++)
2197 old_parent->children[j - 1] =
2198 old_parent->children[j];
2199 old_parent->num_children--;
2200 break;
2201 }
2202
2203 /* now move the node to a new parent */
2204 new_parent->children[new_parent->num_children++] = node;
2205 node->parent = new_parent;
2206 node->info.parent_teid = new_parent->info.node_teid;
2207}
2208
2209/**
2210 * ice_sched_move_nodes - move child nodes to a given parent
2211 * @pi: port information structure
2212 * @parent: pointer to parent node
2213 * @num_items: number of child nodes to be moved
2214 * @list: pointer to child node teids
2215 *
2216 * This function move the child nodes to a given parent.
2217 */
2218int
2219ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
2220 u16 num_items, u32 *list)
2221{
2222 DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
2223 u16 buf_len = __struct_size(buf);
2224 struct ice_sched_node *node;
2225 u16 i, grps_movd = 0;
2226 struct ice_hw *hw;
2227 int status = 0;
2228
2229 hw = pi->hw;
2230
2231 if (!parent || !num_items)
2232 return -EINVAL;
2233
2234 /* Does parent have enough space */
2235 if (parent->num_children + num_items >
2236 hw->max_children[parent->tx_sched_layer])
2237 return -ENOSPC;
2238
2239 for (i = 0; i < num_items; i++) {
2240 node = ice_sched_find_node_by_teid(pi->root, list[i]);
2241 if (!node) {
2242 status = -EINVAL;
2243 break;
2244 }
2245
2246 buf->hdr.src_parent_teid = node->info.parent_teid;
2247 buf->hdr.dest_parent_teid = parent->info.node_teid;
2248 buf->teid[0] = node->info.node_teid;
2249 buf->hdr.num_elems = cpu_to_le16(1);
2250 status = ice_aq_move_sched_elems(hw, buf, buf_len, &grps_movd);
2251 if (status && grps_movd != 1) {
2252 status = -EIO;
2253 break;
2254 }
2255
2256 /* update the SW DB */
2257 ice_sched_update_parent(parent, node);
2258 }
2259
2260 return status;
2261}
2262
2263/**
2264 * ice_sched_move_vsi_to_agg - move VSI to aggregator node
2265 * @pi: port information structure
2266 * @vsi_handle: software VSI handle
2267 * @agg_id: aggregator ID
2268 * @tc: TC number
2269 *
2270 * This function moves a VSI to an aggregator node or its subtree.
2271 * Intermediate nodes may be created if required.
2272 */
2273static int
2274ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id,
2275 u8 tc)
2276{
2277 struct ice_sched_node *vsi_node, *agg_node, *tc_node, *parent;
2278 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
2279 u32 first_node_teid, vsi_teid;
2280 u16 num_nodes_added;
2281 u8 aggl, vsil, i;
2282 int status;
2283
2284 tc_node = ice_sched_get_tc_node(pi, tc);
2285 if (!tc_node)
2286 return -EIO;
2287
2288 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
2289 if (!agg_node)
2290 return -ENOENT;
2291
2292 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
2293 if (!vsi_node)
2294 return -ENOENT;
2295
2296 /* Is this VSI already part of given aggregator? */
2297 if (ice_sched_find_node_in_subtree(pi->hw, agg_node, vsi_node))
2298 return 0;
2299
2300 aggl = ice_sched_get_agg_layer(pi->hw);
2301 vsil = ice_sched_get_vsi_layer(pi->hw);
2302
2303 /* set intermediate node count to 1 between aggregator and VSI layers */
2304 for (i = aggl + 1; i < vsil; i++)
2305 num_nodes[i] = 1;
2306
2307 /* Check if the aggregator subtree has any free node to add the VSI */
2308 for (i = 0; i < agg_node->num_children; i++) {
2309 parent = ice_sched_get_free_vsi_parent(pi->hw,
2310 agg_node->children[i],
2311 num_nodes);
2312 if (parent)
2313 goto move_nodes;
2314 }
2315
2316 /* add new nodes */
2317 parent = agg_node;
2318 for (i = aggl + 1; i < vsil; i++) {
2319 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
2320 num_nodes[i],
2321 &first_node_teid,
2322 &num_nodes_added);
2323 if (status || num_nodes[i] != num_nodes_added)
2324 return -EIO;
2325
2326 /* The newly added node can be a new parent for the next
2327 * layer nodes
2328 */
2329 if (num_nodes_added)
2330 parent = ice_sched_find_node_by_teid(tc_node,
2331 first_node_teid);
2332 else
2333 parent = parent->children[0];
2334
2335 if (!parent)
2336 return -EIO;
2337 }
2338
2339move_nodes:
2340 vsi_teid = le32_to_cpu(vsi_node->info.node_teid);
2341 return ice_sched_move_nodes(pi, parent, 1, &vsi_teid);
2342}
2343
2344/**
2345 * ice_move_all_vsi_to_dflt_agg - move all VSI(s) to default aggregator
2346 * @pi: port information structure
2347 * @agg_info: aggregator info
2348 * @tc: traffic class number
2349 * @rm_vsi_info: true or false
2350 *
2351 * This function move all the VSI(s) to the default aggregator and delete
2352 * aggregator VSI info based on passed in boolean parameter rm_vsi_info. The
2353 * caller holds the scheduler lock.
2354 */
2355static int
2356ice_move_all_vsi_to_dflt_agg(struct ice_port_info *pi,
2357 struct ice_sched_agg_info *agg_info, u8 tc,
2358 bool rm_vsi_info)
2359{
2360 struct ice_sched_agg_vsi_info *agg_vsi_info;
2361 struct ice_sched_agg_vsi_info *tmp;
2362 int status = 0;
2363
2364 list_for_each_entry_safe(agg_vsi_info, tmp, &agg_info->agg_vsi_list,
2365 list_entry) {
2366 u16 vsi_handle = agg_vsi_info->vsi_handle;
2367
2368 /* Move VSI to default aggregator */
2369 if (!ice_is_tc_ena(agg_vsi_info->tc_bitmap[0], tc))
2370 continue;
2371
2372 status = ice_sched_move_vsi_to_agg(pi, vsi_handle,
2373 ICE_DFLT_AGG_ID, tc);
2374 if (status)
2375 break;
2376
2377 clear_bit(tc, agg_vsi_info->tc_bitmap);
2378 if (rm_vsi_info && !agg_vsi_info->tc_bitmap[0]) {
2379 list_del(&agg_vsi_info->list_entry);
2380 devm_kfree(ice_hw_to_dev(pi->hw), agg_vsi_info);
2381 }
2382 }
2383
2384 return status;
2385}
2386
2387/**
2388 * ice_sched_is_agg_inuse - check whether the aggregator is in use or not
2389 * @pi: port information structure
2390 * @node: node pointer
2391 *
2392 * This function checks whether the aggregator is attached with any VSI or not.
2393 */
2394static bool
2395ice_sched_is_agg_inuse(struct ice_port_info *pi, struct ice_sched_node *node)
2396{
2397 u8 vsil, i;
2398
2399 vsil = ice_sched_get_vsi_layer(pi->hw);
2400 if (node->tx_sched_layer < vsil - 1) {
2401 for (i = 0; i < node->num_children; i++)
2402 if (ice_sched_is_agg_inuse(pi, node->children[i]))
2403 return true;
2404 return false;
2405 } else {
2406 return node->num_children ? true : false;
2407 }
2408}
2409
2410/**
2411 * ice_sched_rm_agg_cfg - remove the aggregator node
2412 * @pi: port information structure
2413 * @agg_id: aggregator ID
2414 * @tc: TC number
2415 *
2416 * This function removes the aggregator node and intermediate nodes if any
2417 * from the given TC
2418 */
2419static int
2420ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
2421{
2422 struct ice_sched_node *tc_node, *agg_node;
2423 struct ice_hw *hw = pi->hw;
2424
2425 tc_node = ice_sched_get_tc_node(pi, tc);
2426 if (!tc_node)
2427 return -EIO;
2428
2429 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
2430 if (!agg_node)
2431 return -ENOENT;
2432
2433 /* Can't remove the aggregator node if it has children */
2434 if (ice_sched_is_agg_inuse(pi, agg_node))
2435 return -EBUSY;
2436
2437 /* need to remove the whole subtree if aggregator node is the
2438 * only child.
2439 */
2440 while (agg_node->tx_sched_layer > hw->sw_entry_point_layer) {
2441 struct ice_sched_node *parent = agg_node->parent;
2442
2443 if (!parent)
2444 return -EIO;
2445
2446 if (parent->num_children > 1)
2447 break;
2448
2449 agg_node = parent;
2450 }
2451
2452 ice_free_sched_node(pi, agg_node);
2453 return 0;
2454}
2455
2456/**
2457 * ice_rm_agg_cfg_tc - remove aggregator configuration for TC
2458 * @pi: port information structure
2459 * @agg_info: aggregator ID
2460 * @tc: TC number
2461 * @rm_vsi_info: bool value true or false
2462 *
2463 * This function removes aggregator reference to VSI of given TC. It removes
2464 * the aggregator configuration completely for requested TC. The caller needs
2465 * to hold the scheduler lock.
2466 */
2467static int
2468ice_rm_agg_cfg_tc(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info,
2469 u8 tc, bool rm_vsi_info)
2470{
2471 int status = 0;
2472
2473 /* If nothing to remove - return success */
2474 if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
2475 goto exit_rm_agg_cfg_tc;
2476
2477 status = ice_move_all_vsi_to_dflt_agg(pi, agg_info, tc, rm_vsi_info);
2478 if (status)
2479 goto exit_rm_agg_cfg_tc;
2480
2481 /* Delete aggregator node(s) */
2482 status = ice_sched_rm_agg_cfg(pi, agg_info->agg_id, tc);
2483 if (status)
2484 goto exit_rm_agg_cfg_tc;
2485
2486 clear_bit(tc, agg_info->tc_bitmap);
2487exit_rm_agg_cfg_tc:
2488 return status;
2489}
2490
2491/**
2492 * ice_save_agg_tc_bitmap - save aggregator TC bitmap
2493 * @pi: port information structure
2494 * @agg_id: aggregator ID
2495 * @tc_bitmap: 8 bits TC bitmap
2496 *
2497 * Save aggregator TC bitmap. This function needs to be called with scheduler
2498 * lock held.
2499 */
2500static int
2501ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id,
2502 unsigned long *tc_bitmap)
2503{
2504 struct ice_sched_agg_info *agg_info;
2505
2506 agg_info = ice_get_agg_info(pi->hw, agg_id);
2507 if (!agg_info)
2508 return -EINVAL;
2509 bitmap_copy(agg_info->replay_tc_bitmap, tc_bitmap,
2510 ICE_MAX_TRAFFIC_CLASS);
2511 return 0;
2512}
2513
2514/**
2515 * ice_sched_add_agg_cfg - create an aggregator node
2516 * @pi: port information structure
2517 * @agg_id: aggregator ID
2518 * @tc: TC number
2519 *
2520 * This function creates an aggregator node and intermediate nodes if required
2521 * for the given TC
2522 */
2523static int
2524ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
2525{
2526 struct ice_sched_node *parent, *agg_node, *tc_node;
2527 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
2528 struct ice_hw *hw = pi->hw;
2529 u32 first_node_teid;
2530 u16 num_nodes_added;
2531 int status = 0;
2532 u8 i, aggl;
2533
2534 tc_node = ice_sched_get_tc_node(pi, tc);
2535 if (!tc_node)
2536 return -EIO;
2537
2538 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
2539 /* Does Agg node already exist ? */
2540 if (agg_node)
2541 return status;
2542
2543 aggl = ice_sched_get_agg_layer(hw);
2544
2545 /* need one node in Agg layer */
2546 num_nodes[aggl] = 1;
2547
2548 /* Check whether the intermediate nodes have space to add the
2549 * new aggregator. If they are full, then SW needs to allocate a new
2550 * intermediate node on those layers
2551 */
2552 for (i = hw->sw_entry_point_layer; i < aggl; i++) {
2553 parent = ice_sched_get_first_node(pi, tc_node, i);
2554
2555 /* scan all the siblings */
2556 while (parent) {
2557 if (parent->num_children < hw->max_children[i])
2558 break;
2559 parent = parent->sibling;
2560 }
2561
2562 /* all the nodes are full, reserve one for this layer */
2563 if (!parent)
2564 num_nodes[i]++;
2565 }
2566
2567 /* add the aggregator node */
2568 parent = tc_node;
2569 for (i = hw->sw_entry_point_layer; i <= aggl; i++) {
2570 if (!parent)
2571 return -EIO;
2572
2573 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
2574 num_nodes[i],
2575 &first_node_teid,
2576 &num_nodes_added);
2577 if (status || num_nodes[i] != num_nodes_added)
2578 return -EIO;
2579
2580 /* The newly added node can be a new parent for the next
2581 * layer nodes
2582 */
2583 if (num_nodes_added) {
2584 parent = ice_sched_find_node_by_teid(tc_node,
2585 first_node_teid);
2586 /* register aggregator ID with the aggregator node */
2587 if (parent && i == aggl)
2588 parent->agg_id = agg_id;
2589 } else {
2590 parent = parent->children[0];
2591 }
2592 }
2593
2594 return 0;
2595}
2596
2597/**
2598 * ice_sched_cfg_agg - configure aggregator node
2599 * @pi: port information structure
2600 * @agg_id: aggregator ID
2601 * @agg_type: aggregator type queue, VSI, or aggregator group
2602 * @tc_bitmap: bits TC bitmap
2603 *
2604 * It registers a unique aggregator node into scheduler services. It
2605 * allows a user to register with a unique ID to track it's resources.
2606 * The aggregator type determines if this is a queue group, VSI group
2607 * or aggregator group. It then creates the aggregator node(s) for requested
2608 * TC(s) or removes an existing aggregator node including its configuration
2609 * if indicated via tc_bitmap. Call ice_rm_agg_cfg to release aggregator
2610 * resources and remove aggregator ID.
2611 * This function needs to be called with scheduler lock held.
2612 */
2613static int
2614ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id,
2615 enum ice_agg_type agg_type, unsigned long *tc_bitmap)
2616{
2617 struct ice_sched_agg_info *agg_info;
2618 struct ice_hw *hw = pi->hw;
2619 int status = 0;
2620 u8 tc;
2621
2622 agg_info = ice_get_agg_info(hw, agg_id);
2623 if (!agg_info) {
2624 /* Create new entry for new aggregator ID */
2625 agg_info = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*agg_info),
2626 GFP_KERNEL);
2627 if (!agg_info)
2628 return -ENOMEM;
2629
2630 agg_info->agg_id = agg_id;
2631 agg_info->agg_type = agg_type;
2632 agg_info->tc_bitmap[0] = 0;
2633
2634 /* Initialize the aggregator VSI list head */
2635 INIT_LIST_HEAD(&agg_info->agg_vsi_list);
2636
2637 /* Add new entry in aggregator list */
2638 list_add(&agg_info->list_entry, &hw->agg_list);
2639 }
2640 /* Create aggregator node(s) for requested TC(s) */
2641 ice_for_each_traffic_class(tc) {
2642 if (!ice_is_tc_ena(*tc_bitmap, tc)) {
2643 /* Delete aggregator cfg TC if it exists previously */
2644 status = ice_rm_agg_cfg_tc(pi, agg_info, tc, false);
2645 if (status)
2646 break;
2647 continue;
2648 }
2649
2650 /* Check if aggregator node for TC already exists */
2651 if (ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
2652 continue;
2653
2654 /* Create new aggregator node for TC */
2655 status = ice_sched_add_agg_cfg(pi, agg_id, tc);
2656 if (status)
2657 break;
2658
2659 /* Save aggregator node's TC information */
2660 set_bit(tc, agg_info->tc_bitmap);
2661 }
2662
2663 return status;
2664}
2665
2666/**
2667 * ice_cfg_agg - config aggregator node
2668 * @pi: port information structure
2669 * @agg_id: aggregator ID
2670 * @agg_type: aggregator type queue, VSI, or aggregator group
2671 * @tc_bitmap: bits TC bitmap
2672 *
2673 * This function configures aggregator node(s).
2674 */
2675int
2676ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type,
2677 u8 tc_bitmap)
2678{
2679 unsigned long bitmap = tc_bitmap;
2680 int status;
2681
2682 mutex_lock(&pi->sched_lock);
2683 status = ice_sched_cfg_agg(pi, agg_id, agg_type, &bitmap);
2684 if (!status)
2685 status = ice_save_agg_tc_bitmap(pi, agg_id, &bitmap);
2686 mutex_unlock(&pi->sched_lock);
2687 return status;
2688}
2689
2690/**
2691 * ice_get_agg_vsi_info - get the aggregator ID
2692 * @agg_info: aggregator info
2693 * @vsi_handle: software VSI handle
2694 *
2695 * The function returns aggregator VSI info based on VSI handle. This function
2696 * needs to be called with scheduler lock held.
2697 */
2698static struct ice_sched_agg_vsi_info *
2699ice_get_agg_vsi_info(struct ice_sched_agg_info *agg_info, u16 vsi_handle)
2700{
2701 struct ice_sched_agg_vsi_info *agg_vsi_info;
2702
2703 list_for_each_entry(agg_vsi_info, &agg_info->agg_vsi_list, list_entry)
2704 if (agg_vsi_info->vsi_handle == vsi_handle)
2705 return agg_vsi_info;
2706
2707 return NULL;
2708}
2709
2710/**
2711 * ice_get_vsi_agg_info - get the aggregator info of VSI
2712 * @hw: pointer to the hardware structure
2713 * @vsi_handle: Sw VSI handle
2714 *
2715 * The function returns aggregator info of VSI represented via vsi_handle. The
2716 * VSI has in this case a different aggregator than the default one. This
2717 * function needs to be called with scheduler lock held.
2718 */
2719static struct ice_sched_agg_info *
2720ice_get_vsi_agg_info(struct ice_hw *hw, u16 vsi_handle)
2721{
2722 struct ice_sched_agg_info *agg_info;
2723
2724 list_for_each_entry(agg_info, &hw->agg_list, list_entry) {
2725 struct ice_sched_agg_vsi_info *agg_vsi_info;
2726
2727 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
2728 if (agg_vsi_info)
2729 return agg_info;
2730 }
2731 return NULL;
2732}
2733
2734/**
2735 * ice_save_agg_vsi_tc_bitmap - save aggregator VSI TC bitmap
2736 * @pi: port information structure
2737 * @agg_id: aggregator ID
2738 * @vsi_handle: software VSI handle
2739 * @tc_bitmap: TC bitmap of enabled TC(s)
2740 *
2741 * Save VSI to aggregator TC bitmap. This function needs to call with scheduler
2742 * lock held.
2743 */
2744static int
2745ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
2746 unsigned long *tc_bitmap)
2747{
2748 struct ice_sched_agg_vsi_info *agg_vsi_info;
2749 struct ice_sched_agg_info *agg_info;
2750
2751 agg_info = ice_get_agg_info(pi->hw, agg_id);
2752 if (!agg_info)
2753 return -EINVAL;
2754 /* check if entry already exist */
2755 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
2756 if (!agg_vsi_info)
2757 return -EINVAL;
2758 bitmap_copy(agg_vsi_info->replay_tc_bitmap, tc_bitmap,
2759 ICE_MAX_TRAFFIC_CLASS);
2760 return 0;
2761}
2762
2763/**
2764 * ice_sched_assoc_vsi_to_agg - associate/move VSI to new/default aggregator
2765 * @pi: port information structure
2766 * @agg_id: aggregator ID
2767 * @vsi_handle: software VSI handle
2768 * @tc_bitmap: TC bitmap of enabled TC(s)
2769 *
2770 * This function moves VSI to a new or default aggregator node. If VSI is
2771 * already associated to the aggregator node then no operation is performed on
2772 * the tree. This function needs to be called with scheduler lock held.
2773 */
2774static int
2775ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
2776 u16 vsi_handle, unsigned long *tc_bitmap)
2777{
2778 struct ice_sched_agg_vsi_info *agg_vsi_info, *iter, *old_agg_vsi_info = NULL;
2779 struct ice_sched_agg_info *agg_info, *old_agg_info;
2780 struct ice_hw *hw = pi->hw;
2781 int status = 0;
2782 u8 tc;
2783
2784 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
2785 return -EINVAL;
2786 agg_info = ice_get_agg_info(hw, agg_id);
2787 if (!agg_info)
2788 return -EINVAL;
2789 /* If the VSI is already part of another aggregator then update
2790 * its VSI info list
2791 */
2792 old_agg_info = ice_get_vsi_agg_info(hw, vsi_handle);
2793 if (old_agg_info && old_agg_info != agg_info) {
2794 struct ice_sched_agg_vsi_info *vtmp;
2795
2796 list_for_each_entry_safe(iter, vtmp,
2797 &old_agg_info->agg_vsi_list,
2798 list_entry)
2799 if (iter->vsi_handle == vsi_handle) {
2800 old_agg_vsi_info = iter;
2801 break;
2802 }
2803 }
2804
2805 /* check if entry already exist */
2806 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
2807 if (!agg_vsi_info) {
2808 /* Create new entry for VSI under aggregator list */
2809 agg_vsi_info = devm_kzalloc(ice_hw_to_dev(hw),
2810 sizeof(*agg_vsi_info), GFP_KERNEL);
2811 if (!agg_vsi_info)
2812 return -EINVAL;
2813
2814 /* add VSI ID into the aggregator list */
2815 agg_vsi_info->vsi_handle = vsi_handle;
2816 list_add(&agg_vsi_info->list_entry, &agg_info->agg_vsi_list);
2817 }
2818 /* Move VSI node to new aggregator node for requested TC(s) */
2819 ice_for_each_traffic_class(tc) {
2820 if (!ice_is_tc_ena(*tc_bitmap, tc))
2821 continue;
2822
2823 /* Move VSI to new aggregator */
2824 status = ice_sched_move_vsi_to_agg(pi, vsi_handle, agg_id, tc);
2825 if (status)
2826 break;
2827
2828 set_bit(tc, agg_vsi_info->tc_bitmap);
2829 if (old_agg_vsi_info)
2830 clear_bit(tc, old_agg_vsi_info->tc_bitmap);
2831 }
2832 if (old_agg_vsi_info && !old_agg_vsi_info->tc_bitmap[0]) {
2833 list_del(&old_agg_vsi_info->list_entry);
2834 devm_kfree(ice_hw_to_dev(pi->hw), old_agg_vsi_info);
2835 }
2836 return status;
2837}
2838
2839/**
2840 * ice_sched_rm_unused_rl_prof - remove unused RL profile
2841 * @pi: port information structure
2842 *
2843 * This function removes unused rate limit profiles from the HW and
2844 * SW DB. The caller needs to hold scheduler lock.
2845 */
2846static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi)
2847{
2848 u16 ln;
2849
2850 for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {
2851 struct ice_aqc_rl_profile_info *rl_prof_elem;
2852 struct ice_aqc_rl_profile_info *rl_prof_tmp;
2853
2854 list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp,
2855 &pi->rl_prof_list[ln], list_entry) {
2856 if (!ice_sched_del_rl_profile(pi->hw, rl_prof_elem))
2857 ice_debug(pi->hw, ICE_DBG_SCHED, "Removed rl profile\n");
2858 }
2859 }
2860}
2861
2862/**
2863 * ice_sched_update_elem - update element
2864 * @hw: pointer to the HW struct
2865 * @node: pointer to node
2866 * @info: node info to update
2867 *
2868 * Update the HW DB, and local SW DB of node. Update the scheduling
2869 * parameters of node from argument info data buffer (Info->data buf) and
2870 * returns success or error on config sched element failure. The caller
2871 * needs to hold scheduler lock.
2872 */
2873static int
2874ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
2875 struct ice_aqc_txsched_elem_data *info)
2876{
2877 struct ice_aqc_txsched_elem_data buf;
2878 u16 elem_cfgd = 0;
2879 u16 num_elems = 1;
2880 int status;
2881
2882 buf = *info;
2883 /* Parent TEID is reserved field in this aq call */
2884 buf.parent_teid = 0;
2885 /* Element type is reserved field in this aq call */
2886 buf.data.elem_type = 0;
2887 /* Flags is reserved field in this aq call */
2888 buf.data.flags = 0;
2889
2890 /* Update HW DB */
2891 /* Configure element node */
2892 status = ice_aq_cfg_sched_elems(hw, num_elems, &buf, sizeof(buf),
2893 &elem_cfgd, NULL);
2894 if (status || elem_cfgd != num_elems) {
2895 ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n");
2896 return -EIO;
2897 }
2898
2899 /* Config success case */
2900 /* Now update local SW DB */
2901 /* Only copy the data portion of info buffer */
2902 node->info.data = info->data;
2903 return status;
2904}
2905
2906/**
2907 * ice_sched_cfg_node_bw_alloc - configure node BW weight/alloc params
2908 * @hw: pointer to the HW struct
2909 * @node: sched node to configure
2910 * @rl_type: rate limit type CIR, EIR, or shared
2911 * @bw_alloc: BW weight/allocation
2912 *
2913 * This function configures node element's BW allocation.
2914 */
2915static int
2916ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
2917 enum ice_rl_type rl_type, u16 bw_alloc)
2918{
2919 struct ice_aqc_txsched_elem_data buf;
2920 struct ice_aqc_txsched_elem *data;
2921
2922 buf = node->info;
2923 data = &buf.data;
2924 if (rl_type == ICE_MIN_BW) {
2925 data->valid_sections |= ICE_AQC_ELEM_VALID_CIR;
2926 data->cir_bw.bw_alloc = cpu_to_le16(bw_alloc);
2927 } else if (rl_type == ICE_MAX_BW) {
2928 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
2929 data->eir_bw.bw_alloc = cpu_to_le16(bw_alloc);
2930 } else {
2931 return -EINVAL;
2932 }
2933
2934 /* Configure element */
2935 return ice_sched_update_elem(hw, node, &buf);
2936}
2937
2938/**
2939 * ice_move_vsi_to_agg - moves VSI to new or default aggregator
2940 * @pi: port information structure
2941 * @agg_id: aggregator ID
2942 * @vsi_handle: software VSI handle
2943 * @tc_bitmap: TC bitmap of enabled TC(s)
2944 *
2945 * Move or associate VSI to a new or default aggregator node.
2946 */
2947int
2948ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
2949 u8 tc_bitmap)
2950{
2951 unsigned long bitmap = tc_bitmap;
2952 int status;
2953
2954 mutex_lock(&pi->sched_lock);
2955 status = ice_sched_assoc_vsi_to_agg(pi, agg_id, vsi_handle,
2956 (unsigned long *)&bitmap);
2957 if (!status)
2958 status = ice_save_agg_vsi_tc_bitmap(pi, agg_id, vsi_handle,
2959 (unsigned long *)&bitmap);
2960 mutex_unlock(&pi->sched_lock);
2961 return status;
2962}
2963
2964/**
2965 * ice_set_clear_cir_bw - set or clear CIR BW
2966 * @bw_t_info: bandwidth type information structure
2967 * @bw: bandwidth in Kbps - Kilo bits per sec
2968 *
2969 * Save or clear CIR bandwidth (BW) in the passed param bw_t_info.
2970 */
2971static void ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
2972{
2973 if (bw == ICE_SCHED_DFLT_BW) {
2974 clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
2975 bw_t_info->cir_bw.bw = 0;
2976 } else {
2977 /* Save type of BW information */
2978 set_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
2979 bw_t_info->cir_bw.bw = bw;
2980 }
2981}
2982
2983/**
2984 * ice_set_clear_eir_bw - set or clear EIR BW
2985 * @bw_t_info: bandwidth type information structure
2986 * @bw: bandwidth in Kbps - Kilo bits per sec
2987 *
2988 * Save or clear EIR bandwidth (BW) in the passed param bw_t_info.
2989 */
2990static void ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
2991{
2992 if (bw == ICE_SCHED_DFLT_BW) {
2993 clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
2994 bw_t_info->eir_bw.bw = 0;
2995 } else {
2996 /* EIR BW and Shared BW profiles are mutually exclusive and
2997 * hence only one of them may be set for any given element.
2998 * First clear earlier saved shared BW information.
2999 */
3000 clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
3001 bw_t_info->shared_bw = 0;
3002 /* save EIR BW information */
3003 set_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
3004 bw_t_info->eir_bw.bw = bw;
3005 }
3006}
3007
3008/**
3009 * ice_set_clear_shared_bw - set or clear shared BW
3010 * @bw_t_info: bandwidth type information structure
3011 * @bw: bandwidth in Kbps - Kilo bits per sec
3012 *
3013 * Save or clear shared bandwidth (BW) in the passed param bw_t_info.
3014 */
3015static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
3016{
3017 if (bw == ICE_SCHED_DFLT_BW) {
3018 clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
3019 bw_t_info->shared_bw = 0;
3020 } else {
3021 /* EIR BW and Shared BW profiles are mutually exclusive and
3022 * hence only one of them may be set for any given element.
3023 * First clear earlier saved EIR BW information.
3024 */
3025 clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
3026 bw_t_info->eir_bw.bw = 0;
3027 /* save shared BW information */
3028 set_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
3029 bw_t_info->shared_bw = bw;
3030 }
3031}
3032
3033/**
3034 * ice_sched_save_vsi_bw - save VSI node's BW information
3035 * @pi: port information structure
3036 * @vsi_handle: sw VSI handle
3037 * @tc: traffic class
3038 * @rl_type: rate limit type min, max, or shared
3039 * @bw: bandwidth in Kbps - Kilo bits per sec
3040 *
3041 * Save BW information of VSI type node for post replay use.
3042 */
3043static int
3044ice_sched_save_vsi_bw(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
3045 enum ice_rl_type rl_type, u32 bw)
3046{
3047 struct ice_vsi_ctx *vsi_ctx;
3048
3049 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
3050 return -EINVAL;
3051 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
3052 if (!vsi_ctx)
3053 return -EINVAL;
3054 switch (rl_type) {
3055 case ICE_MIN_BW:
3056 ice_set_clear_cir_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
3057 break;
3058 case ICE_MAX_BW:
3059 ice_set_clear_eir_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
3060 break;
3061 case ICE_SHARED_BW:
3062 ice_set_clear_shared_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
3063 break;
3064 default:
3065 return -EINVAL;
3066 }
3067 return 0;
3068}
3069
3070/**
3071 * ice_sched_calc_wakeup - calculate RL profile wakeup parameter
3072 * @hw: pointer to the HW struct
3073 * @bw: bandwidth in Kbps
3074 *
3075 * This function calculates the wakeup parameter of RL profile.
3076 */
3077static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw)
3078{
3079 s64 bytes_per_sec, wakeup_int, wakeup_a, wakeup_b, wakeup_f;
3080 s32 wakeup_f_int;
3081 u16 wakeup = 0;
3082
3083 /* Get the wakeup integer value */
3084 bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE);
3085 wakeup_int = div64_long(hw->psm_clk_freq, bytes_per_sec);
3086 if (wakeup_int > 63) {
3087 wakeup = (u16)((1 << 15) | wakeup_int);
3088 } else {
3089 /* Calculate fraction value up to 4 decimals
3090 * Convert Integer value to a constant multiplier
3091 */
3092 wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int;
3093 wakeup_a = div64_long((s64)ICE_RL_PROF_MULTIPLIER *
3094 hw->psm_clk_freq, bytes_per_sec);
3095
3096 /* Get Fraction value */
3097 wakeup_f = wakeup_a - wakeup_b;
3098
3099 /* Round up the Fractional value via Ceil(Fractional value) */
3100 if (wakeup_f > div64_long(ICE_RL_PROF_MULTIPLIER, 2))
3101 wakeup_f += 1;
3102
3103 wakeup_f_int = (s32)div64_long(wakeup_f * ICE_RL_PROF_FRACTION,
3104 ICE_RL_PROF_MULTIPLIER);
3105 wakeup |= (u16)(wakeup_int << 9);
3106 wakeup |= (u16)(0x1ff & wakeup_f_int);
3107 }
3108
3109 return wakeup;
3110}
3111
3112/**
3113 * ice_sched_bw_to_rl_profile - convert BW to profile parameters
3114 * @hw: pointer to the HW struct
3115 * @bw: bandwidth in Kbps
3116 * @profile: profile parameters to return
3117 *
3118 * This function converts the BW to profile structure format.
3119 */
3120static int
3121ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw,
3122 struct ice_aqc_rl_profile_elem *profile)
3123{
3124 s64 bytes_per_sec, ts_rate, mv_tmp;
3125 int status = -EINVAL;
3126 bool found = false;
3127 s32 encode = 0;
3128 s64 mv = 0;
3129 s32 i;
3130
3131 /* Bw settings range is from 0.5Mb/sec to 100Gb/sec */
3132 if (bw < ICE_SCHED_MIN_BW || bw > ICE_SCHED_MAX_BW)
3133 return status;
3134
3135 /* Bytes per second from Kbps */
3136 bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE);
3137
3138 /* encode is 6 bits but really useful are 5 bits */
3139 for (i = 0; i < 64; i++) {
3140 u64 pow_result = BIT_ULL(i);
3141
3142 ts_rate = div64_long((s64)hw->psm_clk_freq,
3143 pow_result * ICE_RL_PROF_TS_MULTIPLIER);
3144 if (ts_rate <= 0)
3145 continue;
3146
3147 /* Multiplier value */
3148 mv_tmp = div64_long(bytes_per_sec * ICE_RL_PROF_MULTIPLIER,
3149 ts_rate);
3150
3151 /* Round to the nearest ICE_RL_PROF_MULTIPLIER */
3152 mv = round_up_64bit(mv_tmp, ICE_RL_PROF_MULTIPLIER);
3153
3154 /* First multiplier value greater than the given
3155 * accuracy bytes
3156 */
3157 if (mv > ICE_RL_PROF_ACCURACY_BYTES) {
3158 encode = i;
3159 found = true;
3160 break;
3161 }
3162 }
3163 if (found) {
3164 u16 wm;
3165
3166 wm = ice_sched_calc_wakeup(hw, bw);
3167 profile->rl_multiply = cpu_to_le16(mv);
3168 profile->wake_up_calc = cpu_to_le16(wm);
3169 profile->rl_encode = cpu_to_le16(encode);
3170 status = 0;
3171 } else {
3172 status = -ENOENT;
3173 }
3174
3175 return status;
3176}
3177
3178/**
3179 * ice_sched_add_rl_profile - add RL profile
3180 * @pi: port information structure
3181 * @rl_type: type of rate limit BW - min, max, or shared
3182 * @bw: bandwidth in Kbps - Kilo bits per sec
3183 * @layer_num: specifies in which layer to create profile
3184 *
3185 * This function first checks the existing list for corresponding BW
3186 * parameter. If it exists, it returns the associated profile otherwise
3187 * it creates a new rate limit profile for requested BW, and adds it to
3188 * the HW DB and local list. It returns the new profile or null on error.
3189 * The caller needs to hold the scheduler lock.
3190 */
3191static struct ice_aqc_rl_profile_info *
3192ice_sched_add_rl_profile(struct ice_port_info *pi,
3193 enum ice_rl_type rl_type, u32 bw, u8 layer_num)
3194{
3195 struct ice_aqc_rl_profile_info *rl_prof_elem;
3196 u16 profiles_added = 0, num_profiles = 1;
3197 struct ice_aqc_rl_profile_elem *buf;
3198 struct ice_hw *hw;
3199 u8 profile_type;
3200 int status;
3201
3202 if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
3203 return NULL;
3204 switch (rl_type) {
3205 case ICE_MIN_BW:
3206 profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;
3207 break;
3208 case ICE_MAX_BW:
3209 profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;
3210 break;
3211 case ICE_SHARED_BW:
3212 profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;
3213 break;
3214 default:
3215 return NULL;
3216 }
3217
3218 if (!pi)
3219 return NULL;
3220 hw = pi->hw;
3221 list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
3222 list_entry)
3223 if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
3224 profile_type && rl_prof_elem->bw == bw)
3225 /* Return existing profile ID info */
3226 return rl_prof_elem;
3227
3228 /* Create new profile ID */
3229 rl_prof_elem = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rl_prof_elem),
3230 GFP_KERNEL);
3231
3232 if (!rl_prof_elem)
3233 return NULL;
3234
3235 status = ice_sched_bw_to_rl_profile(hw, bw, &rl_prof_elem->profile);
3236 if (status)
3237 goto exit_add_rl_prof;
3238
3239 rl_prof_elem->bw = bw;
3240 /* layer_num is zero relative, and fw expects level from 1 to 9 */
3241 rl_prof_elem->profile.level = layer_num + 1;
3242 rl_prof_elem->profile.flags = profile_type;
3243 rl_prof_elem->profile.max_burst_size = cpu_to_le16(hw->max_burst_size);
3244
3245 /* Create new entry in HW DB */
3246 buf = &rl_prof_elem->profile;
3247 status = ice_aq_add_rl_profile(hw, num_profiles, buf, sizeof(*buf),
3248 &profiles_added, NULL);
3249 if (status || profiles_added != num_profiles)
3250 goto exit_add_rl_prof;
3251
3252 /* Good entry - add in the list */
3253 rl_prof_elem->prof_id_ref = 0;
3254 list_add(&rl_prof_elem->list_entry, &pi->rl_prof_list[layer_num]);
3255 return rl_prof_elem;
3256
3257exit_add_rl_prof:
3258 devm_kfree(ice_hw_to_dev(hw), rl_prof_elem);
3259 return NULL;
3260}
3261
3262/**
3263 * ice_sched_cfg_node_bw_lmt - configure node sched params
3264 * @hw: pointer to the HW struct
3265 * @node: sched node to configure
3266 * @rl_type: rate limit type CIR, EIR, or shared
3267 * @rl_prof_id: rate limit profile ID
3268 *
3269 * This function configures node element's BW limit.
3270 */
3271static int
3272ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
3273 enum ice_rl_type rl_type, u16 rl_prof_id)
3274{
3275 struct ice_aqc_txsched_elem_data buf;
3276 struct ice_aqc_txsched_elem *data;
3277
3278 buf = node->info;
3279 data = &buf.data;
3280 switch (rl_type) {
3281 case ICE_MIN_BW:
3282 data->valid_sections |= ICE_AQC_ELEM_VALID_CIR;
3283 data->cir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id);
3284 break;
3285 case ICE_MAX_BW:
3286 /* EIR BW and Shared BW profiles are mutually exclusive and
3287 * hence only one of them may be set for any given element
3288 */
3289 if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
3290 return -EIO;
3291 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
3292 data->eir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id);
3293 break;
3294 case ICE_SHARED_BW:
3295 /* Check for removing shared BW */
3296 if (rl_prof_id == ICE_SCHED_NO_SHARED_RL_PROF_ID) {
3297 /* remove shared profile */
3298 data->valid_sections &= ~ICE_AQC_ELEM_VALID_SHARED;
3299 data->srl_id = 0; /* clear SRL field */
3300
3301 /* enable back EIR to default profile */
3302 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
3303 data->eir_bw.bw_profile_idx =
3304 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
3305 break;
3306 }
3307 /* EIR BW and Shared BW profiles are mutually exclusive and
3308 * hence only one of them may be set for any given element
3309 */
3310 if ((data->valid_sections & ICE_AQC_ELEM_VALID_EIR) &&
3311 (le16_to_cpu(data->eir_bw.bw_profile_idx) !=
3312 ICE_SCHED_DFLT_RL_PROF_ID))
3313 return -EIO;
3314 /* EIR BW is set to default, disable it */
3315 data->valid_sections &= ~ICE_AQC_ELEM_VALID_EIR;
3316 /* Okay to enable shared BW now */
3317 data->valid_sections |= ICE_AQC_ELEM_VALID_SHARED;
3318 data->srl_id = cpu_to_le16(rl_prof_id);
3319 break;
3320 default:
3321 /* Unknown rate limit type */
3322 return -EINVAL;
3323 }
3324
3325 /* Configure element */
3326 return ice_sched_update_elem(hw, node, &buf);
3327}
3328
3329/**
3330 * ice_sched_get_node_rl_prof_id - get node's rate limit profile ID
3331 * @node: sched node
3332 * @rl_type: rate limit type
3333 *
3334 * If existing profile matches, it returns the corresponding rate
3335 * limit profile ID, otherwise it returns an invalid ID as error.
3336 */
3337static u16
3338ice_sched_get_node_rl_prof_id(struct ice_sched_node *node,
3339 enum ice_rl_type rl_type)
3340{
3341 u16 rl_prof_id = ICE_SCHED_INVAL_PROF_ID;
3342 struct ice_aqc_txsched_elem *data;
3343
3344 data = &node->info.data;
3345 switch (rl_type) {
3346 case ICE_MIN_BW:
3347 if (data->valid_sections & ICE_AQC_ELEM_VALID_CIR)
3348 rl_prof_id = le16_to_cpu(data->cir_bw.bw_profile_idx);
3349 break;
3350 case ICE_MAX_BW:
3351 if (data->valid_sections & ICE_AQC_ELEM_VALID_EIR)
3352 rl_prof_id = le16_to_cpu(data->eir_bw.bw_profile_idx);
3353 break;
3354 case ICE_SHARED_BW:
3355 if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
3356 rl_prof_id = le16_to_cpu(data->srl_id);
3357 break;
3358 default:
3359 break;
3360 }
3361
3362 return rl_prof_id;
3363}
3364
3365/**
3366 * ice_sched_get_rl_prof_layer - selects rate limit profile creation layer
3367 * @pi: port information structure
3368 * @rl_type: type of rate limit BW - min, max, or shared
3369 * @layer_index: layer index
3370 *
3371 * This function returns requested profile creation layer.
3372 */
3373static u8
3374ice_sched_get_rl_prof_layer(struct ice_port_info *pi, enum ice_rl_type rl_type,
3375 u8 layer_index)
3376{
3377 struct ice_hw *hw = pi->hw;
3378
3379 if (layer_index >= hw->num_tx_sched_layers)
3380 return ICE_SCHED_INVAL_LAYER_NUM;
3381 switch (rl_type) {
3382 case ICE_MIN_BW:
3383 if (hw->layer_info[layer_index].max_cir_rl_profiles)
3384 return layer_index;
3385 break;
3386 case ICE_MAX_BW:
3387 if (hw->layer_info[layer_index].max_eir_rl_profiles)
3388 return layer_index;
3389 break;
3390 case ICE_SHARED_BW:
3391 /* if current layer doesn't support SRL profile creation
3392 * then try a layer up or down.
3393 */
3394 if (hw->layer_info[layer_index].max_srl_profiles)
3395 return layer_index;
3396 else if (layer_index < hw->num_tx_sched_layers - 1 &&
3397 hw->layer_info[layer_index + 1].max_srl_profiles)
3398 return layer_index + 1;
3399 else if (layer_index > 0 &&
3400 hw->layer_info[layer_index - 1].max_srl_profiles)
3401 return layer_index - 1;
3402 break;
3403 default:
3404 break;
3405 }
3406 return ICE_SCHED_INVAL_LAYER_NUM;
3407}
3408
3409/**
3410 * ice_sched_get_srl_node - get shared rate limit node
3411 * @node: tree node
3412 * @srl_layer: shared rate limit layer
3413 *
3414 * This function returns SRL node to be used for shared rate limit purpose.
3415 * The caller needs to hold scheduler lock.
3416 */
3417static struct ice_sched_node *
3418ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer)
3419{
3420 if (srl_layer > node->tx_sched_layer)
3421 return node->children[0];
3422 else if (srl_layer < node->tx_sched_layer)
3423 /* Node can't be created without a parent. It will always
3424 * have a valid parent except root node.
3425 */
3426 return node->parent;
3427 else
3428 return node;
3429}
3430
3431/**
3432 * ice_sched_rm_rl_profile - remove RL profile ID
3433 * @pi: port information structure
3434 * @layer_num: layer number where profiles are saved
3435 * @profile_type: profile type like EIR, CIR, or SRL
3436 * @profile_id: profile ID to remove
3437 *
3438 * This function removes rate limit profile from layer 'layer_num' of type
3439 * 'profile_type' and profile ID as 'profile_id'. The caller needs to hold
3440 * scheduler lock.
3441 */
3442static int
3443ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
3444 u16 profile_id)
3445{
3446 struct ice_aqc_rl_profile_info *rl_prof_elem;
3447 int status = 0;
3448
3449 if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
3450 return -EINVAL;
3451 /* Check the existing list for RL profile */
3452 list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
3453 list_entry)
3454 if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
3455 profile_type &&
3456 le16_to_cpu(rl_prof_elem->profile.profile_id) ==
3457 profile_id) {
3458 if (rl_prof_elem->prof_id_ref)
3459 rl_prof_elem->prof_id_ref--;
3460
3461 /* Remove old profile ID from database */
3462 status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem);
3463 if (status && status != -EBUSY)
3464 ice_debug(pi->hw, ICE_DBG_SCHED, "Remove rl profile failed\n");
3465 break;
3466 }
3467 if (status == -EBUSY)
3468 status = 0;
3469 return status;
3470}
3471
3472/**
3473 * ice_sched_set_node_bw_dflt - set node's bandwidth limit to default
3474 * @pi: port information structure
3475 * @node: pointer to node structure
3476 * @rl_type: rate limit type min, max, or shared
3477 * @layer_num: layer number where RL profiles are saved
3478 *
3479 * This function configures node element's BW rate limit profile ID of
3480 * type CIR, EIR, or SRL to default. This function needs to be called
3481 * with the scheduler lock held.
3482 */
3483static int
3484ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
3485 struct ice_sched_node *node,
3486 enum ice_rl_type rl_type, u8 layer_num)
3487{
3488 struct ice_hw *hw;
3489 u8 profile_type;
3490 u16 rl_prof_id;
3491 u16 old_id;
3492 int status;
3493
3494 hw = pi->hw;
3495 switch (rl_type) {
3496 case ICE_MIN_BW:
3497 profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;
3498 rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;
3499 break;
3500 case ICE_MAX_BW:
3501 profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;
3502 rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;
3503 break;
3504 case ICE_SHARED_BW:
3505 profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;
3506 /* No SRL is configured for default case */
3507 rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID;
3508 break;
3509 default:
3510 return -EINVAL;
3511 }
3512 /* Save existing RL prof ID for later clean up */
3513 old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
3514 /* Configure BW scheduling parameters */
3515 status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
3516 if (status)
3517 return status;
3518
3519 /* Remove stale RL profile ID */
3520 if (old_id == ICE_SCHED_DFLT_RL_PROF_ID ||
3521 old_id == ICE_SCHED_INVAL_PROF_ID)
3522 return 0;
3523
3524 return ice_sched_rm_rl_profile(pi, layer_num, profile_type, old_id);
3525}
3526
3527/**
3528 * ice_sched_set_eir_srl_excl - set EIR/SRL exclusiveness
3529 * @pi: port information structure
3530 * @node: pointer to node structure
3531 * @layer_num: layer number where rate limit profiles are saved
3532 * @rl_type: rate limit type min, max, or shared
3533 * @bw: bandwidth value
3534 *
3535 * This function prepares node element's bandwidth to SRL or EIR exclusively.
3536 * EIR BW and Shared BW profiles are mutually exclusive and hence only one of
3537 * them may be set for any given element. This function needs to be called
3538 * with the scheduler lock held.
3539 */
3540static int
3541ice_sched_set_eir_srl_excl(struct ice_port_info *pi,
3542 struct ice_sched_node *node,
3543 u8 layer_num, enum ice_rl_type rl_type, u32 bw)
3544{
3545 if (rl_type == ICE_SHARED_BW) {
3546 /* SRL node passed in this case, it may be different node */
3547 if (bw == ICE_SCHED_DFLT_BW)
3548 /* SRL being removed, ice_sched_cfg_node_bw_lmt()
3549 * enables EIR to default. EIR is not set in this
3550 * case, so no additional action is required.
3551 */
3552 return 0;
3553
3554 /* SRL being configured, set EIR to default here.
3555 * ice_sched_cfg_node_bw_lmt() disables EIR when it
3556 * configures SRL
3557 */
3558 return ice_sched_set_node_bw_dflt(pi, node, ICE_MAX_BW,
3559 layer_num);
3560 } else if (rl_type == ICE_MAX_BW &&
3561 node->info.data.valid_sections & ICE_AQC_ELEM_VALID_SHARED) {
3562 /* Remove Shared profile. Set default shared BW call
3563 * removes shared profile for a node.
3564 */
3565 return ice_sched_set_node_bw_dflt(pi, node,
3566 ICE_SHARED_BW,
3567 layer_num);
3568 }
3569 return 0;
3570}
3571
3572/**
3573 * ice_sched_set_node_bw - set node's bandwidth
3574 * @pi: port information structure
3575 * @node: tree node
3576 * @rl_type: rate limit type min, max, or shared
3577 * @bw: bandwidth in Kbps - Kilo bits per sec
3578 * @layer_num: layer number
3579 *
3580 * This function adds new profile corresponding to requested BW, configures
3581 * node's RL profile ID of type CIR, EIR, or SRL, and removes old profile
3582 * ID from local database. The caller needs to hold scheduler lock.
3583 */
3584int
3585ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
3586 enum ice_rl_type rl_type, u32 bw, u8 layer_num)
3587{
3588 struct ice_aqc_rl_profile_info *rl_prof_info;
3589 struct ice_hw *hw = pi->hw;
3590 u16 old_id, rl_prof_id;
3591 int status = -EINVAL;
3592
3593 rl_prof_info = ice_sched_add_rl_profile(pi, rl_type, bw, layer_num);
3594 if (!rl_prof_info)
3595 return status;
3596
3597 rl_prof_id = le16_to_cpu(rl_prof_info->profile.profile_id);
3598
3599 /* Save existing RL prof ID for later clean up */
3600 old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
3601 /* Configure BW scheduling parameters */
3602 status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
3603 if (status)
3604 return status;
3605
3606 /* New changes has been applied */
3607 /* Increment the profile ID reference count */
3608 rl_prof_info->prof_id_ref++;
3609
3610 /* Check for old ID removal */
3611 if ((old_id == ICE_SCHED_DFLT_RL_PROF_ID && rl_type != ICE_SHARED_BW) ||
3612 old_id == ICE_SCHED_INVAL_PROF_ID || old_id == rl_prof_id)
3613 return 0;
3614
3615 return ice_sched_rm_rl_profile(pi, layer_num,
3616 rl_prof_info->profile.flags &
3617 ICE_AQC_RL_PROFILE_TYPE_M, old_id);
3618}
3619
3620/**
3621 * ice_sched_set_node_priority - set node's priority
3622 * @pi: port information structure
3623 * @node: tree node
3624 * @priority: number 0-7 representing priority among siblings
3625 *
3626 * This function sets priority of a node among it's siblings.
3627 */
3628int
3629ice_sched_set_node_priority(struct ice_port_info *pi, struct ice_sched_node *node,
3630 u16 priority)
3631{
3632 struct ice_aqc_txsched_elem_data buf;
3633 struct ice_aqc_txsched_elem *data;
3634
3635 buf = node->info;
3636 data = &buf.data;
3637
3638 data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC;
3639 data->generic |= FIELD_PREP(ICE_AQC_ELEM_GENERIC_PRIO_M, priority);
3640
3641 return ice_sched_update_elem(pi->hw, node, &buf);
3642}
3643
3644/**
3645 * ice_sched_set_node_weight - set node's weight
3646 * @pi: port information structure
3647 * @node: tree node
3648 * @weight: number 1-200 representing weight for WFQ
3649 *
3650 * This function sets weight of the node for WFQ algorithm.
3651 */
3652int
3653ice_sched_set_node_weight(struct ice_port_info *pi, struct ice_sched_node *node, u16 weight)
3654{
3655 struct ice_aqc_txsched_elem_data buf;
3656 struct ice_aqc_txsched_elem *data;
3657
3658 buf = node->info;
3659 data = &buf.data;
3660
3661 data->valid_sections = ICE_AQC_ELEM_VALID_CIR | ICE_AQC_ELEM_VALID_EIR |
3662 ICE_AQC_ELEM_VALID_GENERIC;
3663 data->cir_bw.bw_alloc = cpu_to_le16(weight);
3664 data->eir_bw.bw_alloc = cpu_to_le16(weight);
3665
3666 data->generic |= FIELD_PREP(ICE_AQC_ELEM_GENERIC_SP_M, 0x0);
3667
3668 return ice_sched_update_elem(pi->hw, node, &buf);
3669}
3670
3671/**
3672 * ice_sched_set_node_bw_lmt - set node's BW limit
3673 * @pi: port information structure
3674 * @node: tree node
3675 * @rl_type: rate limit type min, max, or shared
3676 * @bw: bandwidth in Kbps - Kilo bits per sec
3677 *
3678 * It updates node's BW limit parameters like BW RL profile ID of type CIR,
3679 * EIR, or SRL. The caller needs to hold scheduler lock.
3680 */
3681int
3682ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
3683 enum ice_rl_type rl_type, u32 bw)
3684{
3685 struct ice_sched_node *cfg_node = node;
3686 int status;
3687
3688 struct ice_hw *hw;
3689 u8 layer_num;
3690
3691 if (!pi)
3692 return -EINVAL;
3693 hw = pi->hw;
3694 /* Remove unused RL profile IDs from HW and SW DB */
3695 ice_sched_rm_unused_rl_prof(pi);
3696 layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
3697 node->tx_sched_layer);
3698 if (layer_num >= hw->num_tx_sched_layers)
3699 return -EINVAL;
3700
3701 if (rl_type == ICE_SHARED_BW) {
3702 /* SRL node may be different */
3703 cfg_node = ice_sched_get_srl_node(node, layer_num);
3704 if (!cfg_node)
3705 return -EIO;
3706 }
3707 /* EIR BW and Shared BW profiles are mutually exclusive and
3708 * hence only one of them may be set for any given element
3709 */
3710 status = ice_sched_set_eir_srl_excl(pi, cfg_node, layer_num, rl_type,
3711 bw);
3712 if (status)
3713 return status;
3714 if (bw == ICE_SCHED_DFLT_BW)
3715 return ice_sched_set_node_bw_dflt(pi, cfg_node, rl_type,
3716 layer_num);
3717 return ice_sched_set_node_bw(pi, cfg_node, rl_type, bw, layer_num);
3718}
3719
3720/**
3721 * ice_sched_set_node_bw_dflt_lmt - set node's BW limit to default
3722 * @pi: port information structure
3723 * @node: pointer to node structure
3724 * @rl_type: rate limit type min, max, or shared
3725 *
3726 * This function configures node element's BW rate limit profile ID of
3727 * type CIR, EIR, or SRL to default. This function needs to be called
3728 * with the scheduler lock held.
3729 */
3730static int
3731ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi,
3732 struct ice_sched_node *node,
3733 enum ice_rl_type rl_type)
3734{
3735 return ice_sched_set_node_bw_lmt(pi, node, rl_type,
3736 ICE_SCHED_DFLT_BW);
3737}
3738
3739/**
3740 * ice_sched_validate_srl_node - Check node for SRL applicability
3741 * @node: sched node to configure
3742 * @sel_layer: selected SRL layer
3743 *
3744 * This function checks if the SRL can be applied to a selected layer node on
3745 * behalf of the requested node (first argument). This function needs to be
3746 * called with scheduler lock held.
3747 */
3748static int
3749ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
3750{
3751 /* SRL profiles are not available on all layers. Check if the
3752 * SRL profile can be applied to a node above or below the
3753 * requested node. SRL configuration is possible only if the
3754 * selected layer's node has single child.
3755 */
3756 if (sel_layer == node->tx_sched_layer ||
3757 ((sel_layer == node->tx_sched_layer + 1) &&
3758 node->num_children == 1) ||
3759 ((sel_layer == node->tx_sched_layer - 1) &&
3760 (node->parent && node->parent->num_children == 1)))
3761 return 0;
3762
3763 return -EIO;
3764}
3765
3766/**
3767 * ice_sched_save_q_bw - save queue node's BW information
3768 * @q_ctx: queue context structure
3769 * @rl_type: rate limit type min, max, or shared
3770 * @bw: bandwidth in Kbps - Kilo bits per sec
3771 *
3772 * Save BW information of queue type node for post replay use.
3773 */
3774static int
3775ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)
3776{
3777 switch (rl_type) {
3778 case ICE_MIN_BW:
3779 ice_set_clear_cir_bw(&q_ctx->bw_t_info, bw);
3780 break;
3781 case ICE_MAX_BW:
3782 ice_set_clear_eir_bw(&q_ctx->bw_t_info, bw);
3783 break;
3784 case ICE_SHARED_BW:
3785 ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw);
3786 break;
3787 default:
3788 return -EINVAL;
3789 }
3790 return 0;
3791}
3792
3793/**
3794 * ice_sched_set_q_bw_lmt - sets queue BW limit
3795 * @pi: port information structure
3796 * @vsi_handle: sw VSI handle
3797 * @tc: traffic class
3798 * @q_handle: software queue handle
3799 * @rl_type: min, max, or shared
3800 * @bw: bandwidth in Kbps
3801 *
3802 * This function sets BW limit of queue scheduling node.
3803 */
3804static int
3805ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
3806 u16 q_handle, enum ice_rl_type rl_type, u32 bw)
3807{
3808 struct ice_sched_node *node;
3809 struct ice_q_ctx *q_ctx;
3810 int status = -EINVAL;
3811
3812 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
3813 return -EINVAL;
3814 mutex_lock(&pi->sched_lock);
3815 q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle);
3816 if (!q_ctx)
3817 goto exit_q_bw_lmt;
3818 node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
3819 if (!node) {
3820 ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong q_teid\n");
3821 goto exit_q_bw_lmt;
3822 }
3823
3824 /* Return error if it is not a leaf node */
3825 if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF)
3826 goto exit_q_bw_lmt;
3827
3828 /* SRL bandwidth layer selection */
3829 if (rl_type == ICE_SHARED_BW) {
3830 u8 sel_layer; /* selected layer */
3831
3832 sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type,
3833 node->tx_sched_layer);
3834 if (sel_layer >= pi->hw->num_tx_sched_layers) {
3835 status = -EINVAL;
3836 goto exit_q_bw_lmt;
3837 }
3838 status = ice_sched_validate_srl_node(node, sel_layer);
3839 if (status)
3840 goto exit_q_bw_lmt;
3841 }
3842
3843 if (bw == ICE_SCHED_DFLT_BW)
3844 status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type);
3845 else
3846 status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw);
3847
3848 if (!status)
3849 status = ice_sched_save_q_bw(q_ctx, rl_type, bw);
3850
3851exit_q_bw_lmt:
3852 mutex_unlock(&pi->sched_lock);
3853 return status;
3854}
3855
3856/**
3857 * ice_cfg_q_bw_lmt - configure queue BW limit
3858 * @pi: port information structure
3859 * @vsi_handle: sw VSI handle
3860 * @tc: traffic class
3861 * @q_handle: software queue handle
3862 * @rl_type: min, max, or shared
3863 * @bw: bandwidth in Kbps
3864 *
3865 * This function configures BW limit of queue scheduling node.
3866 */
3867int
3868ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
3869 u16 q_handle, enum ice_rl_type rl_type, u32 bw)
3870{
3871 return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,
3872 bw);
3873}
3874
3875/**
3876 * ice_cfg_q_bw_dflt_lmt - configure queue BW default limit
3877 * @pi: port information structure
3878 * @vsi_handle: sw VSI handle
3879 * @tc: traffic class
3880 * @q_handle: software queue handle
3881 * @rl_type: min, max, or shared
3882 *
3883 * This function configures BW default limit of queue scheduling node.
3884 */
3885int
3886ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
3887 u16 q_handle, enum ice_rl_type rl_type)
3888{
3889 return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,
3890 ICE_SCHED_DFLT_BW);
3891}
3892
3893/**
3894 * ice_sched_get_node_by_id_type - get node from ID type
3895 * @pi: port information structure
3896 * @id: identifier
3897 * @agg_type: type of aggregator
3898 * @tc: traffic class
3899 *
3900 * This function returns node identified by ID of type aggregator, and
3901 * based on traffic class (TC). This function needs to be called with
3902 * the scheduler lock held.
3903 */
3904static struct ice_sched_node *
3905ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id,
3906 enum ice_agg_type agg_type, u8 tc)
3907{
3908 struct ice_sched_node *node = NULL;
3909
3910 switch (agg_type) {
3911 case ICE_AGG_TYPE_VSI: {
3912 struct ice_vsi_ctx *vsi_ctx;
3913 u16 vsi_handle = (u16)id;
3914
3915 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
3916 break;
3917 /* Get sched_vsi_info */
3918 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
3919 if (!vsi_ctx)
3920 break;
3921 node = vsi_ctx->sched.vsi_node[tc];
3922 break;
3923 }
3924
3925 case ICE_AGG_TYPE_AGG: {
3926 struct ice_sched_node *tc_node;
3927
3928 tc_node = ice_sched_get_tc_node(pi, tc);
3929 if (tc_node)
3930 node = ice_sched_get_agg_node(pi, tc_node, id);
3931 break;
3932 }
3933
3934 default:
3935 break;
3936 }
3937
3938 return node;
3939}
3940
3941/**
3942 * ice_sched_set_node_bw_lmt_per_tc - set node BW limit per TC
3943 * @pi: port information structure
3944 * @id: ID (software VSI handle or AGG ID)
3945 * @agg_type: aggregator type (VSI or AGG type node)
3946 * @tc: traffic class
3947 * @rl_type: min or max
3948 * @bw: bandwidth in Kbps
3949 *
3950 * This function sets BW limit of VSI or Aggregator scheduling node
3951 * based on TC information from passed in argument BW.
3952 */
3953static int
3954ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id,
3955 enum ice_agg_type agg_type, u8 tc,
3956 enum ice_rl_type rl_type, u32 bw)
3957{
3958 struct ice_sched_node *node;
3959 int status = -EINVAL;
3960
3961 if (!pi)
3962 return status;
3963
3964 if (rl_type == ICE_UNKNOWN_BW)
3965 return status;
3966
3967 mutex_lock(&pi->sched_lock);
3968 node = ice_sched_get_node_by_id_type(pi, id, agg_type, tc);
3969 if (!node) {
3970 ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong id, agg type, or tc\n");
3971 goto exit_set_node_bw_lmt_per_tc;
3972 }
3973 if (bw == ICE_SCHED_DFLT_BW)
3974 status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type);
3975 else
3976 status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw);
3977
3978exit_set_node_bw_lmt_per_tc:
3979 mutex_unlock(&pi->sched_lock);
3980 return status;
3981}
3982
3983/**
3984 * ice_cfg_vsi_bw_lmt_per_tc - configure VSI BW limit per TC
3985 * @pi: port information structure
3986 * @vsi_handle: software VSI handle
3987 * @tc: traffic class
3988 * @rl_type: min or max
3989 * @bw: bandwidth in Kbps
3990 *
3991 * This function configures BW limit of VSI scheduling node based on TC
3992 * information.
3993 */
3994int
3995ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
3996 enum ice_rl_type rl_type, u32 bw)
3997{
3998 int status;
3999
4000 status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle,
4001 ICE_AGG_TYPE_VSI,
4002 tc, rl_type, bw);
4003 if (!status) {
4004 mutex_lock(&pi->sched_lock);
4005 status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, bw);
4006 mutex_unlock(&pi->sched_lock);
4007 }
4008 return status;
4009}
4010
4011/**
4012 * ice_cfg_vsi_bw_dflt_lmt_per_tc - configure default VSI BW limit per TC
4013 * @pi: port information structure
4014 * @vsi_handle: software VSI handle
4015 * @tc: traffic class
4016 * @rl_type: min or max
4017 *
4018 * This function configures default BW limit of VSI scheduling node based on TC
4019 * information.
4020 */
4021int
4022ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
4023 enum ice_rl_type rl_type)
4024{
4025 int status;
4026
4027 status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle,
4028 ICE_AGG_TYPE_VSI,
4029 tc, rl_type,
4030 ICE_SCHED_DFLT_BW);
4031 if (!status) {
4032 mutex_lock(&pi->sched_lock);
4033 status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type,
4034 ICE_SCHED_DFLT_BW);
4035 mutex_unlock(&pi->sched_lock);
4036 }
4037 return status;
4038}
4039
4040/**
4041 * ice_cfg_rl_burst_size - Set burst size value
4042 * @hw: pointer to the HW struct
4043 * @bytes: burst size in bytes
4044 *
4045 * This function configures/set the burst size to requested new value. The new
4046 * burst size value is used for future rate limit calls. It doesn't change the
4047 * existing or previously created RL profiles.
4048 */
4049int ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
4050{
4051 u16 burst_size_to_prog;
4052
4053 if (bytes < ICE_MIN_BURST_SIZE_ALLOWED ||
4054 bytes > ICE_MAX_BURST_SIZE_ALLOWED)
4055 return -EINVAL;
4056 if (ice_round_to_num(bytes, 64) <=
4057 ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) {
4058 /* 64 byte granularity case */
4059 /* Disable MSB granularity bit */
4060 burst_size_to_prog = ICE_64_BYTE_GRANULARITY;
4061 /* round number to nearest 64 byte granularity */
4062 bytes = ice_round_to_num(bytes, 64);
4063 /* The value is in 64 byte chunks */
4064 burst_size_to_prog |= (u16)(bytes / 64);
4065 } else {
4066 /* k bytes granularity case */
4067 /* Enable MSB granularity bit */
4068 burst_size_to_prog = ICE_KBYTE_GRANULARITY;
4069 /* round number to nearest 1024 granularity */
4070 bytes = ice_round_to_num(bytes, 1024);
4071 /* check rounding doesn't go beyond allowed */
4072 if (bytes > ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY)
4073 bytes = ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY;
4074 /* The value is in k bytes */
4075 burst_size_to_prog |= (u16)(bytes / 1024);
4076 }
4077 hw->max_burst_size = burst_size_to_prog;
4078 return 0;
4079}
4080
4081/**
4082 * ice_sched_replay_node_prio - re-configure node priority
4083 * @hw: pointer to the HW struct
4084 * @node: sched node to configure
4085 * @priority: priority value
4086 *
4087 * This function configures node element's priority value. It
4088 * needs to be called with scheduler lock held.
4089 */
4090static int
4091ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node,
4092 u8 priority)
4093{
4094 struct ice_aqc_txsched_elem_data buf;
4095 struct ice_aqc_txsched_elem *data;
4096 int status;
4097
4098 buf = node->info;
4099 data = &buf.data;
4100 data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC;
4101 data->generic = priority;
4102
4103 /* Configure element */
4104 status = ice_sched_update_elem(hw, node, &buf);
4105 return status;
4106}
4107
4108/**
4109 * ice_sched_replay_node_bw - replay node(s) BW
4110 * @hw: pointer to the HW struct
4111 * @node: sched node to configure
4112 * @bw_t_info: BW type information
4113 *
4114 * This function restores node's BW from bw_t_info. The caller needs
4115 * to hold the scheduler lock.
4116 */
4117static int
4118ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node,
4119 struct ice_bw_type_info *bw_t_info)
4120{
4121 struct ice_port_info *pi = hw->port_info;
4122 int status = -EINVAL;
4123 u16 bw_alloc;
4124
4125 if (!node)
4126 return status;
4127 if (bitmap_empty(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CNT))
4128 return 0;
4129 if (test_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap)) {
4130 status = ice_sched_replay_node_prio(hw, node,
4131 bw_t_info->generic);
4132 if (status)
4133 return status;
4134 }
4135 if (test_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap)) {
4136 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW,
4137 bw_t_info->cir_bw.bw);
4138 if (status)
4139 return status;
4140 }
4141 if (test_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap)) {
4142 bw_alloc = bw_t_info->cir_bw.bw_alloc;
4143 status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MIN_BW,
4144 bw_alloc);
4145 if (status)
4146 return status;
4147 }
4148 if (test_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap)) {
4149 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW,
4150 bw_t_info->eir_bw.bw);
4151 if (status)
4152 return status;
4153 }
4154 if (test_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap)) {
4155 bw_alloc = bw_t_info->eir_bw.bw_alloc;
4156 status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MAX_BW,
4157 bw_alloc);
4158 if (status)
4159 return status;
4160 }
4161 if (test_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap))
4162 status = ice_sched_set_node_bw_lmt(pi, node, ICE_SHARED_BW,
4163 bw_t_info->shared_bw);
4164 return status;
4165}
4166
4167/**
4168 * ice_sched_get_ena_tc_bitmap - get enabled TC bitmap
4169 * @pi: port info struct
4170 * @tc_bitmap: 8 bits TC bitmap to check
4171 * @ena_tc_bitmap: 8 bits enabled TC bitmap to return
4172 *
4173 * This function returns enabled TC bitmap in variable ena_tc_bitmap. Some TCs
4174 * may be missing, it returns enabled TCs. This function needs to be called with
4175 * scheduler lock held.
4176 */
4177static void
4178ice_sched_get_ena_tc_bitmap(struct ice_port_info *pi,
4179 unsigned long *tc_bitmap,
4180 unsigned long *ena_tc_bitmap)
4181{
4182 u8 tc;
4183
4184 /* Some TC(s) may be missing after reset, adjust for replay */
4185 ice_for_each_traffic_class(tc)
4186 if (ice_is_tc_ena(*tc_bitmap, tc) &&
4187 (ice_sched_get_tc_node(pi, tc)))
4188 set_bit(tc, ena_tc_bitmap);
4189}
4190
4191/**
4192 * ice_sched_replay_agg - recreate aggregator node(s)
4193 * @hw: pointer to the HW struct
4194 *
4195 * This function recreate aggregator type nodes which are not replayed earlier.
4196 * It also replay aggregator BW information. These aggregator nodes are not
4197 * associated with VSI type node yet.
4198 */
4199void ice_sched_replay_agg(struct ice_hw *hw)
4200{
4201 struct ice_port_info *pi = hw->port_info;
4202 struct ice_sched_agg_info *agg_info;
4203
4204 mutex_lock(&pi->sched_lock);
4205 list_for_each_entry(agg_info, &hw->agg_list, list_entry)
4206 /* replay aggregator (re-create aggregator node) */
4207 if (!bitmap_equal(agg_info->tc_bitmap, agg_info->replay_tc_bitmap,
4208 ICE_MAX_TRAFFIC_CLASS)) {
4209 DECLARE_BITMAP(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
4210 int status;
4211
4212 bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
4213 ice_sched_get_ena_tc_bitmap(pi,
4214 agg_info->replay_tc_bitmap,
4215 replay_bitmap);
4216 status = ice_sched_cfg_agg(hw->port_info,
4217 agg_info->agg_id,
4218 ICE_AGG_TYPE_AGG,
4219 replay_bitmap);
4220 if (status) {
4221 dev_info(ice_hw_to_dev(hw),
4222 "Replay agg id[%d] failed\n",
4223 agg_info->agg_id);
4224 /* Move on to next one */
4225 continue;
4226 }
4227 }
4228 mutex_unlock(&pi->sched_lock);
4229}
4230
4231/**
4232 * ice_sched_replay_agg_vsi_preinit - Agg/VSI replay pre initialization
4233 * @hw: pointer to the HW struct
4234 *
4235 * This function initialize aggregator(s) TC bitmap to zero. A required
4236 * preinit step for replaying aggregators.
4237 */
4238void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw)
4239{
4240 struct ice_port_info *pi = hw->port_info;
4241 struct ice_sched_agg_info *agg_info;
4242
4243 mutex_lock(&pi->sched_lock);
4244 list_for_each_entry(agg_info, &hw->agg_list, list_entry) {
4245 struct ice_sched_agg_vsi_info *agg_vsi_info;
4246
4247 agg_info->tc_bitmap[0] = 0;
4248 list_for_each_entry(agg_vsi_info, &agg_info->agg_vsi_list,
4249 list_entry)
4250 agg_vsi_info->tc_bitmap[0] = 0;
4251 }
4252 mutex_unlock(&pi->sched_lock);
4253}
4254
4255/**
4256 * ice_sched_replay_vsi_agg - replay aggregator & VSI to aggregator node(s)
4257 * @hw: pointer to the HW struct
4258 * @vsi_handle: software VSI handle
4259 *
4260 * This function replays aggregator node, VSI to aggregator type nodes, and
4261 * their node bandwidth information. This function needs to be called with
4262 * scheduler lock held.
4263 */
4264static int ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
4265{
4266 DECLARE_BITMAP(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
4267 struct ice_sched_agg_vsi_info *agg_vsi_info;
4268 struct ice_port_info *pi = hw->port_info;
4269 struct ice_sched_agg_info *agg_info;
4270 int status;
4271
4272 bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
4273 if (!ice_is_vsi_valid(hw, vsi_handle))
4274 return -EINVAL;
4275 agg_info = ice_get_vsi_agg_info(hw, vsi_handle);
4276 if (!agg_info)
4277 return 0; /* Not present in list - default Agg case */
4278 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
4279 if (!agg_vsi_info)
4280 return 0; /* Not present in list - default Agg case */
4281 ice_sched_get_ena_tc_bitmap(pi, agg_info->replay_tc_bitmap,
4282 replay_bitmap);
4283 /* Replay aggregator node associated to vsi_handle */
4284 status = ice_sched_cfg_agg(hw->port_info, agg_info->agg_id,
4285 ICE_AGG_TYPE_AGG, replay_bitmap);
4286 if (status)
4287 return status;
4288
4289 bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
4290 ice_sched_get_ena_tc_bitmap(pi, agg_vsi_info->replay_tc_bitmap,
4291 replay_bitmap);
4292 /* Move this VSI (vsi_handle) to above aggregator */
4293 return ice_sched_assoc_vsi_to_agg(pi, agg_info->agg_id, vsi_handle,
4294 replay_bitmap);
4295}
4296
4297/**
4298 * ice_replay_vsi_agg - replay VSI to aggregator node
4299 * @hw: pointer to the HW struct
4300 * @vsi_handle: software VSI handle
4301 *
4302 * This function replays association of VSI to aggregator type nodes, and
4303 * node bandwidth information.
4304 */
4305int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
4306{
4307 struct ice_port_info *pi = hw->port_info;
4308 int status;
4309
4310 mutex_lock(&pi->sched_lock);
4311 status = ice_sched_replay_vsi_agg(hw, vsi_handle);
4312 mutex_unlock(&pi->sched_lock);
4313 return status;
4314}
4315
4316/**
4317 * ice_sched_replay_q_bw - replay queue type node BW
4318 * @pi: port information structure
4319 * @q_ctx: queue context structure
4320 *
4321 * This function replays queue type node bandwidth. This function needs to be
4322 * called with scheduler lock held.
4323 */
4324int ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx)
4325{
4326 struct ice_sched_node *q_node;
4327
4328 /* Following also checks the presence of node in tree */
4329 q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
4330 if (!q_node)
4331 return -EINVAL;
4332 return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info);
4333}
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4#include "ice_sched.h"
5
6/**
7 * ice_sched_add_root_node - Insert the Tx scheduler root node in SW DB
8 * @pi: port information structure
9 * @info: Scheduler element information from firmware
10 *
11 * This function inserts the root node of the scheduling tree topology
12 * to the SW DB.
13 */
14static enum ice_status
15ice_sched_add_root_node(struct ice_port_info *pi,
16 struct ice_aqc_txsched_elem_data *info)
17{
18 struct ice_sched_node *root;
19 struct ice_hw *hw;
20
21 if (!pi)
22 return ICE_ERR_PARAM;
23
24 hw = pi->hw;
25
26 root = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*root), GFP_KERNEL);
27 if (!root)
28 return ICE_ERR_NO_MEMORY;
29
30 /* coverity[suspicious_sizeof] */
31 root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0],
32 sizeof(*root), GFP_KERNEL);
33 if (!root->children) {
34 devm_kfree(ice_hw_to_dev(hw), root);
35 return ICE_ERR_NO_MEMORY;
36 }
37
38 memcpy(&root->info, info, sizeof(*info));
39 pi->root = root;
40 return 0;
41}
42
43/**
44 * ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB
45 * @start_node: pointer to the starting ice_sched_node struct in a sub-tree
46 * @teid: node TEID to search
47 *
48 * This function searches for a node matching the TEID in the scheduling tree
49 * from the SW DB. The search is recursive and is restricted by the number of
50 * layers it has searched through; stopping at the max supported layer.
51 *
52 * This function needs to be called when holding the port_info->sched_lock
53 */
54struct ice_sched_node *
55ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
56{
57 u16 i;
58
59 /* The TEID is same as that of the start_node */
60 if (ICE_TXSCHED_GET_NODE_TEID(start_node) == teid)
61 return start_node;
62
63 /* The node has no children or is at the max layer */
64 if (!start_node->num_children ||
65 start_node->tx_sched_layer >= ICE_AQC_TOPO_MAX_LEVEL_NUM ||
66 start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF)
67 return NULL;
68
69 /* Check if TEID matches to any of the children nodes */
70 for (i = 0; i < start_node->num_children; i++)
71 if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid)
72 return start_node->children[i];
73
74 /* Search within each child's sub-tree */
75 for (i = 0; i < start_node->num_children; i++) {
76 struct ice_sched_node *tmp;
77
78 tmp = ice_sched_find_node_by_teid(start_node->children[i],
79 teid);
80 if (tmp)
81 return tmp;
82 }
83
84 return NULL;
85}
86
87/**
88 * ice_aqc_send_sched_elem_cmd - send scheduling elements cmd
89 * @hw: pointer to the HW struct
90 * @cmd_opc: cmd opcode
91 * @elems_req: number of elements to request
92 * @buf: pointer to buffer
93 * @buf_size: buffer size in bytes
94 * @elems_resp: returns total number of elements response
95 * @cd: pointer to command details structure or NULL
96 *
97 * This function sends a scheduling elements cmd (cmd_opc)
98 */
99static enum ice_status
100ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
101 u16 elems_req, void *buf, u16 buf_size,
102 u16 *elems_resp, struct ice_sq_cd *cd)
103{
104 struct ice_aqc_sched_elem_cmd *cmd;
105 struct ice_aq_desc desc;
106 enum ice_status status;
107
108 cmd = &desc.params.sched_elem_cmd;
109 ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc);
110 cmd->num_elem_req = cpu_to_le16(elems_req);
111 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
112 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
113 if (!status && elems_resp)
114 *elems_resp = le16_to_cpu(cmd->num_elem_resp);
115
116 return status;
117}
118
119/**
120 * ice_aq_query_sched_elems - query scheduler elements
121 * @hw: pointer to the HW struct
122 * @elems_req: number of elements to query
123 * @buf: pointer to buffer
124 * @buf_size: buffer size in bytes
125 * @elems_ret: returns total number of elements returned
126 * @cd: pointer to command details structure or NULL
127 *
128 * Query scheduling elements (0x0404)
129 */
130enum ice_status
131ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
132 struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
133 u16 *elems_ret, struct ice_sq_cd *cd)
134{
135 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_get_sched_elems,
136 elems_req, (void *)buf, buf_size,
137 elems_ret, cd);
138}
139
140/**
141 * ice_sched_add_node - Insert the Tx scheduler node in SW DB
142 * @pi: port information structure
143 * @layer: Scheduler layer of the node
144 * @info: Scheduler element information from firmware
145 *
146 * This function inserts a scheduler node to the SW DB.
147 */
148enum ice_status
149ice_sched_add_node(struct ice_port_info *pi, u8 layer,
150 struct ice_aqc_txsched_elem_data *info)
151{
152 struct ice_aqc_txsched_elem_data elem;
153 struct ice_sched_node *parent;
154 struct ice_sched_node *node;
155 enum ice_status status;
156 struct ice_hw *hw;
157
158 if (!pi)
159 return ICE_ERR_PARAM;
160
161 hw = pi->hw;
162
163 /* A valid parent node should be there */
164 parent = ice_sched_find_node_by_teid(pi->root,
165 le32_to_cpu(info->parent_teid));
166 if (!parent) {
167 ice_debug(hw, ICE_DBG_SCHED,
168 "Parent Node not found for parent_teid=0x%x\n",
169 le32_to_cpu(info->parent_teid));
170 return ICE_ERR_PARAM;
171 }
172
173 /* query the current node information from FW before adding it
174 * to the SW DB
175 */
176 status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), &elem);
177 if (status)
178 return status;
179
180 node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL);
181 if (!node)
182 return ICE_ERR_NO_MEMORY;
183 if (hw->max_children[layer]) {
184 /* coverity[suspicious_sizeof] */
185 node->children = devm_kcalloc(ice_hw_to_dev(hw),
186 hw->max_children[layer],
187 sizeof(*node), GFP_KERNEL);
188 if (!node->children) {
189 devm_kfree(ice_hw_to_dev(hw), node);
190 return ICE_ERR_NO_MEMORY;
191 }
192 }
193
194 node->in_use = true;
195 node->parent = parent;
196 node->tx_sched_layer = layer;
197 parent->children[parent->num_children++] = node;
198 node->info = elem;
199 return 0;
200}
201
202/**
203 * ice_aq_delete_sched_elems - delete scheduler elements
204 * @hw: pointer to the HW struct
205 * @grps_req: number of groups to delete
206 * @buf: pointer to buffer
207 * @buf_size: buffer size in bytes
208 * @grps_del: returns total number of elements deleted
209 * @cd: pointer to command details structure or NULL
210 *
211 * Delete scheduling elements (0x040F)
212 */
213static enum ice_status
214ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
215 struct ice_aqc_delete_elem *buf, u16 buf_size,
216 u16 *grps_del, struct ice_sq_cd *cd)
217{
218 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_delete_sched_elems,
219 grps_req, (void *)buf, buf_size,
220 grps_del, cd);
221}
222
223/**
224 * ice_sched_remove_elems - remove nodes from HW
225 * @hw: pointer to the HW struct
226 * @parent: pointer to the parent node
227 * @num_nodes: number of nodes
228 * @node_teids: array of node teids to be deleted
229 *
230 * This function remove nodes from HW
231 */
232static enum ice_status
233ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
234 u16 num_nodes, u32 *node_teids)
235{
236 struct ice_aqc_delete_elem *buf;
237 u16 i, num_groups_removed = 0;
238 enum ice_status status;
239 u16 buf_size;
240
241 buf_size = struct_size(buf, teid, num_nodes);
242 buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
243 if (!buf)
244 return ICE_ERR_NO_MEMORY;
245
246 buf->hdr.parent_teid = parent->info.node_teid;
247 buf->hdr.num_elems = cpu_to_le16(num_nodes);
248 for (i = 0; i < num_nodes; i++)
249 buf->teid[i] = cpu_to_le32(node_teids[i]);
250
251 status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
252 &num_groups_removed, NULL);
253 if (status || num_groups_removed != 1)
254 ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n",
255 hw->adminq.sq_last_status);
256
257 devm_kfree(ice_hw_to_dev(hw), buf);
258 return status;
259}
260
261/**
262 * ice_sched_get_first_node - get the first node of the given layer
263 * @pi: port information structure
264 * @parent: pointer the base node of the subtree
265 * @layer: layer number
266 *
267 * This function retrieves the first node of the given layer from the subtree
268 */
269static struct ice_sched_node *
270ice_sched_get_first_node(struct ice_port_info *pi,
271 struct ice_sched_node *parent, u8 layer)
272{
273 return pi->sib_head[parent->tc_num][layer];
274}
275
276/**
277 * ice_sched_get_tc_node - get pointer to TC node
278 * @pi: port information structure
279 * @tc: TC number
280 *
281 * This function returns the TC node pointer
282 */
283struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc)
284{
285 u8 i;
286
287 if (!pi || !pi->root)
288 return NULL;
289 for (i = 0; i < pi->root->num_children; i++)
290 if (pi->root->children[i]->tc_num == tc)
291 return pi->root->children[i];
292 return NULL;
293}
294
295/**
296 * ice_free_sched_node - Free a Tx scheduler node from SW DB
297 * @pi: port information structure
298 * @node: pointer to the ice_sched_node struct
299 *
300 * This function frees up a node from SW DB as well as from HW
301 *
302 * This function needs to be called with the port_info->sched_lock held
303 */
304void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
305{
306 struct ice_sched_node *parent;
307 struct ice_hw *hw = pi->hw;
308 u8 i, j;
309
310 /* Free the children before freeing up the parent node
311 * The parent array is updated below and that shifts the nodes
312 * in the array. So always pick the first child if num children > 0
313 */
314 while (node->num_children)
315 ice_free_sched_node(pi, node->children[0]);
316
317 /* Leaf, TC and root nodes can't be deleted by SW */
318 if (node->tx_sched_layer >= hw->sw_entry_point_layer &&
319 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
320 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT &&
321 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) {
322 u32 teid = le32_to_cpu(node->info.node_teid);
323
324 ice_sched_remove_elems(hw, node->parent, 1, &teid);
325 }
326 parent = node->parent;
327 /* root has no parent */
328 if (parent) {
329 struct ice_sched_node *p;
330
331 /* update the parent */
332 for (i = 0; i < parent->num_children; i++)
333 if (parent->children[i] == node) {
334 for (j = i + 1; j < parent->num_children; j++)
335 parent->children[j - 1] =
336 parent->children[j];
337 parent->num_children--;
338 break;
339 }
340
341 p = ice_sched_get_first_node(pi, node, node->tx_sched_layer);
342 while (p) {
343 if (p->sibling == node) {
344 p->sibling = node->sibling;
345 break;
346 }
347 p = p->sibling;
348 }
349
350 /* update the sibling head if head is getting removed */
351 if (pi->sib_head[node->tc_num][node->tx_sched_layer] == node)
352 pi->sib_head[node->tc_num][node->tx_sched_layer] =
353 node->sibling;
354 }
355
356 /* leaf nodes have no children */
357 if (node->children)
358 devm_kfree(ice_hw_to_dev(hw), node->children);
359 devm_kfree(ice_hw_to_dev(hw), node);
360}
361
362/**
363 * ice_aq_get_dflt_topo - gets default scheduler topology
364 * @hw: pointer to the HW struct
365 * @lport: logical port number
366 * @buf: pointer to buffer
367 * @buf_size: buffer size in bytes
368 * @num_branches: returns total number of queue to port branches
369 * @cd: pointer to command details structure or NULL
370 *
371 * Get default scheduler topology (0x400)
372 */
373static enum ice_status
374ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
375 struct ice_aqc_get_topo_elem *buf, u16 buf_size,
376 u8 *num_branches, struct ice_sq_cd *cd)
377{
378 struct ice_aqc_get_topo *cmd;
379 struct ice_aq_desc desc;
380 enum ice_status status;
381
382 cmd = &desc.params.get_topo;
383 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo);
384 cmd->port_num = lport;
385 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
386 if (!status && num_branches)
387 *num_branches = cmd->num_branches;
388
389 return status;
390}
391
392/**
393 * ice_aq_add_sched_elems - adds scheduling element
394 * @hw: pointer to the HW struct
395 * @grps_req: the number of groups that are requested to be added
396 * @buf: pointer to buffer
397 * @buf_size: buffer size in bytes
398 * @grps_added: returns total number of groups added
399 * @cd: pointer to command details structure or NULL
400 *
401 * Add scheduling elements (0x0401)
402 */
403static enum ice_status
404ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
405 struct ice_aqc_add_elem *buf, u16 buf_size,
406 u16 *grps_added, struct ice_sq_cd *cd)
407{
408 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_add_sched_elems,
409 grps_req, (void *)buf, buf_size,
410 grps_added, cd);
411}
412
413/**
414 * ice_aq_cfg_sched_elems - configures scheduler elements
415 * @hw: pointer to the HW struct
416 * @elems_req: number of elements to configure
417 * @buf: pointer to buffer
418 * @buf_size: buffer size in bytes
419 * @elems_cfgd: returns total number of elements configured
420 * @cd: pointer to command details structure or NULL
421 *
422 * Configure scheduling elements (0x0403)
423 */
424static enum ice_status
425ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
426 struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
427 u16 *elems_cfgd, struct ice_sq_cd *cd)
428{
429 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_cfg_sched_elems,
430 elems_req, (void *)buf, buf_size,
431 elems_cfgd, cd);
432}
433
434/**
435 * ice_aq_suspend_sched_elems - suspend scheduler elements
436 * @hw: pointer to the HW struct
437 * @elems_req: number of elements to suspend
438 * @buf: pointer to buffer
439 * @buf_size: buffer size in bytes
440 * @elems_ret: returns total number of elements suspended
441 * @cd: pointer to command details structure or NULL
442 *
443 * Suspend scheduling elements (0x0409)
444 */
445static enum ice_status
446ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
447 u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
448{
449 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_suspend_sched_elems,
450 elems_req, (void *)buf, buf_size,
451 elems_ret, cd);
452}
453
454/**
455 * ice_aq_resume_sched_elems - resume scheduler elements
456 * @hw: pointer to the HW struct
457 * @elems_req: number of elements to resume
458 * @buf: pointer to buffer
459 * @buf_size: buffer size in bytes
460 * @elems_ret: returns total number of elements resumed
461 * @cd: pointer to command details structure or NULL
462 *
463 * resume scheduling elements (0x040A)
464 */
465static enum ice_status
466ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
467 u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
468{
469 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_resume_sched_elems,
470 elems_req, (void *)buf, buf_size,
471 elems_ret, cd);
472}
473
474/**
475 * ice_aq_query_sched_res - query scheduler resource
476 * @hw: pointer to the HW struct
477 * @buf_size: buffer size in bytes
478 * @buf: pointer to buffer
479 * @cd: pointer to command details structure or NULL
480 *
481 * Query scheduler resource allocation (0x0412)
482 */
483static enum ice_status
484ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
485 struct ice_aqc_query_txsched_res_resp *buf,
486 struct ice_sq_cd *cd)
487{
488 struct ice_aq_desc desc;
489
490 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res);
491 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
492}
493
494/**
495 * ice_sched_suspend_resume_elems - suspend or resume HW nodes
496 * @hw: pointer to the HW struct
497 * @num_nodes: number of nodes
498 * @node_teids: array of node teids to be suspended or resumed
499 * @suspend: true means suspend / false means resume
500 *
501 * This function suspends or resumes HW nodes
502 */
503static enum ice_status
504ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
505 bool suspend)
506{
507 u16 i, buf_size, num_elem_ret = 0;
508 enum ice_status status;
509 __le32 *buf;
510
511 buf_size = sizeof(*buf) * num_nodes;
512 buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
513 if (!buf)
514 return ICE_ERR_NO_MEMORY;
515
516 for (i = 0; i < num_nodes; i++)
517 buf[i] = cpu_to_le32(node_teids[i]);
518
519 if (suspend)
520 status = ice_aq_suspend_sched_elems(hw, num_nodes, buf,
521 buf_size, &num_elem_ret,
522 NULL);
523 else
524 status = ice_aq_resume_sched_elems(hw, num_nodes, buf,
525 buf_size, &num_elem_ret,
526 NULL);
527 if (status || num_elem_ret != num_nodes)
528 ice_debug(hw, ICE_DBG_SCHED, "suspend/resume failed\n");
529
530 devm_kfree(ice_hw_to_dev(hw), buf);
531 return status;
532}
533
534/**
535 * ice_alloc_lan_q_ctx - allocate LAN queue contexts for the given VSI and TC
536 * @hw: pointer to the HW struct
537 * @vsi_handle: VSI handle
538 * @tc: TC number
539 * @new_numqs: number of queues
540 */
541static enum ice_status
542ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
543{
544 struct ice_vsi_ctx *vsi_ctx;
545 struct ice_q_ctx *q_ctx;
546
547 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
548 if (!vsi_ctx)
549 return ICE_ERR_PARAM;
550 /* allocate LAN queue contexts */
551 if (!vsi_ctx->lan_q_ctx[tc]) {
552 vsi_ctx->lan_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
553 new_numqs,
554 sizeof(*q_ctx),
555 GFP_KERNEL);
556 if (!vsi_ctx->lan_q_ctx[tc])
557 return ICE_ERR_NO_MEMORY;
558 vsi_ctx->num_lan_q_entries[tc] = new_numqs;
559 return 0;
560 }
561 /* num queues are increased, update the queue contexts */
562 if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) {
563 u16 prev_num = vsi_ctx->num_lan_q_entries[tc];
564
565 q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
566 sizeof(*q_ctx), GFP_KERNEL);
567 if (!q_ctx)
568 return ICE_ERR_NO_MEMORY;
569 memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc],
570 prev_num * sizeof(*q_ctx));
571 devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]);
572 vsi_ctx->lan_q_ctx[tc] = q_ctx;
573 vsi_ctx->num_lan_q_entries[tc] = new_numqs;
574 }
575 return 0;
576}
577
578/**
579 * ice_aq_rl_profile - performs a rate limiting task
580 * @hw: pointer to the HW struct
581 * @opcode: opcode for add, query, or remove profile(s)
582 * @num_profiles: the number of profiles
583 * @buf: pointer to buffer
584 * @buf_size: buffer size in bytes
585 * @num_processed: number of processed add or remove profile(s) to return
586 * @cd: pointer to command details structure
587 *
588 * RL profile function to add, query, or remove profile(s)
589 */
590static enum ice_status
591ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
592 u16 num_profiles, struct ice_aqc_rl_profile_elem *buf,
593 u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd)
594{
595 struct ice_aqc_rl_profile *cmd;
596 struct ice_aq_desc desc;
597 enum ice_status status;
598
599 cmd = &desc.params.rl_profile;
600
601 ice_fill_dflt_direct_cmd_desc(&desc, opcode);
602 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
603 cmd->num_profiles = cpu_to_le16(num_profiles);
604 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
605 if (!status && num_processed)
606 *num_processed = le16_to_cpu(cmd->num_processed);
607 return status;
608}
609
610/**
611 * ice_aq_add_rl_profile - adds rate limiting profile(s)
612 * @hw: pointer to the HW struct
613 * @num_profiles: the number of profile(s) to be add
614 * @buf: pointer to buffer
615 * @buf_size: buffer size in bytes
616 * @num_profiles_added: total number of profiles added to return
617 * @cd: pointer to command details structure
618 *
619 * Add RL profile (0x0410)
620 */
621static enum ice_status
622ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
623 struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
624 u16 *num_profiles_added, struct ice_sq_cd *cd)
625{
626 return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles, num_profiles,
627 buf, buf_size, num_profiles_added, cd);
628}
629
630/**
631 * ice_aq_remove_rl_profile - removes RL profile(s)
632 * @hw: pointer to the HW struct
633 * @num_profiles: the number of profile(s) to remove
634 * @buf: pointer to buffer
635 * @buf_size: buffer size in bytes
636 * @num_profiles_removed: total number of profiles removed to return
637 * @cd: pointer to command details structure or NULL
638 *
639 * Remove RL profile (0x0415)
640 */
641static enum ice_status
642ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,
643 struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
644 u16 *num_profiles_removed, struct ice_sq_cd *cd)
645{
646 return ice_aq_rl_profile(hw, ice_aqc_opc_remove_rl_profiles,
647 num_profiles, buf, buf_size,
648 num_profiles_removed, cd);
649}
650
651/**
652 * ice_sched_del_rl_profile - remove RL profile
653 * @hw: pointer to the HW struct
654 * @rl_info: rate limit profile information
655 *
656 * If the profile ID is not referenced anymore, it removes profile ID with
657 * its associated parameters from HW DB,and locally. The caller needs to
658 * hold scheduler lock.
659 */
660static enum ice_status
661ice_sched_del_rl_profile(struct ice_hw *hw,
662 struct ice_aqc_rl_profile_info *rl_info)
663{
664 struct ice_aqc_rl_profile_elem *buf;
665 u16 num_profiles_removed;
666 enum ice_status status;
667 u16 num_profiles = 1;
668
669 if (rl_info->prof_id_ref != 0)
670 return ICE_ERR_IN_USE;
671
672 /* Safe to remove profile ID */
673 buf = &rl_info->profile;
674 status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf),
675 &num_profiles_removed, NULL);
676 if (status || num_profiles_removed != num_profiles)
677 return ICE_ERR_CFG;
678
679 /* Delete stale entry now */
680 list_del(&rl_info->list_entry);
681 devm_kfree(ice_hw_to_dev(hw), rl_info);
682 return status;
683}
684
685/**
686 * ice_sched_clear_rl_prof - clears RL prof entries
687 * @pi: port information structure
688 *
689 * This function removes all RL profile from HW as well as from SW DB.
690 */
691static void ice_sched_clear_rl_prof(struct ice_port_info *pi)
692{
693 u16 ln;
694
695 for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {
696 struct ice_aqc_rl_profile_info *rl_prof_elem;
697 struct ice_aqc_rl_profile_info *rl_prof_tmp;
698
699 list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp,
700 &pi->rl_prof_list[ln], list_entry) {
701 struct ice_hw *hw = pi->hw;
702 enum ice_status status;
703
704 rl_prof_elem->prof_id_ref = 0;
705 status = ice_sched_del_rl_profile(hw, rl_prof_elem);
706 if (status) {
707 ice_debug(hw, ICE_DBG_SCHED,
708 "Remove rl profile failed\n");
709 /* On error, free mem required */
710 list_del(&rl_prof_elem->list_entry);
711 devm_kfree(ice_hw_to_dev(hw), rl_prof_elem);
712 }
713 }
714 }
715}
716
717/**
718 * ice_sched_clear_agg - clears the aggregator related information
719 * @hw: pointer to the hardware structure
720 *
721 * This function removes aggregator list and free up aggregator related memory
722 * previously allocated.
723 */
724void ice_sched_clear_agg(struct ice_hw *hw)
725{
726 struct ice_sched_agg_info *agg_info;
727 struct ice_sched_agg_info *atmp;
728
729 list_for_each_entry_safe(agg_info, atmp, &hw->agg_list, list_entry) {
730 struct ice_sched_agg_vsi_info *agg_vsi_info;
731 struct ice_sched_agg_vsi_info *vtmp;
732
733 list_for_each_entry_safe(agg_vsi_info, vtmp,
734 &agg_info->agg_vsi_list, list_entry) {
735 list_del(&agg_vsi_info->list_entry);
736 devm_kfree(ice_hw_to_dev(hw), agg_vsi_info);
737 }
738 list_del(&agg_info->list_entry);
739 devm_kfree(ice_hw_to_dev(hw), agg_info);
740 }
741}
742
743/**
744 * ice_sched_clear_tx_topo - clears the scheduler tree nodes
745 * @pi: port information structure
746 *
747 * This function removes all the nodes from HW as well as from SW DB.
748 */
749static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
750{
751 if (!pi)
752 return;
753 /* remove RL profiles related lists */
754 ice_sched_clear_rl_prof(pi);
755 if (pi->root) {
756 ice_free_sched_node(pi, pi->root);
757 pi->root = NULL;
758 }
759}
760
761/**
762 * ice_sched_clear_port - clear the scheduler elements from SW DB for a port
763 * @pi: port information structure
764 *
765 * Cleanup scheduling elements from SW DB
766 */
767void ice_sched_clear_port(struct ice_port_info *pi)
768{
769 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
770 return;
771
772 pi->port_state = ICE_SCHED_PORT_STATE_INIT;
773 mutex_lock(&pi->sched_lock);
774 ice_sched_clear_tx_topo(pi);
775 mutex_unlock(&pi->sched_lock);
776 mutex_destroy(&pi->sched_lock);
777}
778
779/**
780 * ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports
781 * @hw: pointer to the HW struct
782 *
783 * Cleanup scheduling elements from SW DB for all the ports
784 */
785void ice_sched_cleanup_all(struct ice_hw *hw)
786{
787 if (!hw)
788 return;
789
790 if (hw->layer_info) {
791 devm_kfree(ice_hw_to_dev(hw), hw->layer_info);
792 hw->layer_info = NULL;
793 }
794
795 ice_sched_clear_port(hw->port_info);
796
797 hw->num_tx_sched_layers = 0;
798 hw->num_tx_sched_phys_layers = 0;
799 hw->flattened_layers = 0;
800 hw->max_cgds = 0;
801}
802
803/**
804 * ice_sched_add_elems - add nodes to HW and SW DB
805 * @pi: port information structure
806 * @tc_node: pointer to the branch node
807 * @parent: pointer to the parent node
808 * @layer: layer number to add nodes
809 * @num_nodes: number of nodes
810 * @num_nodes_added: pointer to num nodes added
811 * @first_node_teid: if new nodes are added then return the TEID of first node
812 *
813 * This function add nodes to HW as well as to SW DB for a given layer
814 */
815static enum ice_status
816ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
817 struct ice_sched_node *parent, u8 layer, u16 num_nodes,
818 u16 *num_nodes_added, u32 *first_node_teid)
819{
820 struct ice_sched_node *prev, *new_node;
821 struct ice_aqc_add_elem *buf;
822 u16 i, num_groups_added = 0;
823 enum ice_status status = 0;
824 struct ice_hw *hw = pi->hw;
825 size_t buf_size;
826 u32 teid;
827
828 buf_size = struct_size(buf, generic, num_nodes);
829 buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
830 if (!buf)
831 return ICE_ERR_NO_MEMORY;
832
833 buf->hdr.parent_teid = parent->info.node_teid;
834 buf->hdr.num_elems = cpu_to_le16(num_nodes);
835 for (i = 0; i < num_nodes; i++) {
836 buf->generic[i].parent_teid = parent->info.node_teid;
837 buf->generic[i].data.elem_type = ICE_AQC_ELEM_TYPE_SE_GENERIC;
838 buf->generic[i].data.valid_sections =
839 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
840 ICE_AQC_ELEM_VALID_EIR;
841 buf->generic[i].data.generic = 0;
842 buf->generic[i].data.cir_bw.bw_profile_idx =
843 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
844 buf->generic[i].data.cir_bw.bw_alloc =
845 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
846 buf->generic[i].data.eir_bw.bw_profile_idx =
847 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
848 buf->generic[i].data.eir_bw.bw_alloc =
849 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
850 }
851
852 status = ice_aq_add_sched_elems(hw, 1, buf, buf_size,
853 &num_groups_added, NULL);
854 if (status || num_groups_added != 1) {
855 ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n",
856 hw->adminq.sq_last_status);
857 devm_kfree(ice_hw_to_dev(hw), buf);
858 return ICE_ERR_CFG;
859 }
860
861 *num_nodes_added = num_nodes;
862 /* add nodes to the SW DB */
863 for (i = 0; i < num_nodes; i++) {
864 status = ice_sched_add_node(pi, layer, &buf->generic[i]);
865 if (status) {
866 ice_debug(hw, ICE_DBG_SCHED,
867 "add nodes in SW DB failed status =%d\n",
868 status);
869 break;
870 }
871
872 teid = le32_to_cpu(buf->generic[i].node_teid);
873 new_node = ice_sched_find_node_by_teid(parent, teid);
874 if (!new_node) {
875 ice_debug(hw, ICE_DBG_SCHED,
876 "Node is missing for teid =%d\n", teid);
877 break;
878 }
879
880 new_node->sibling = NULL;
881 new_node->tc_num = tc_node->tc_num;
882
883 /* add it to previous node sibling pointer */
884 /* Note: siblings are not linked across branches */
885 prev = ice_sched_get_first_node(pi, tc_node, layer);
886 if (prev && prev != new_node) {
887 while (prev->sibling)
888 prev = prev->sibling;
889 prev->sibling = new_node;
890 }
891
892 /* initialize the sibling head */
893 if (!pi->sib_head[tc_node->tc_num][layer])
894 pi->sib_head[tc_node->tc_num][layer] = new_node;
895
896 if (i == 0)
897 *first_node_teid = teid;
898 }
899
900 devm_kfree(ice_hw_to_dev(hw), buf);
901 return status;
902}
903
904/**
905 * ice_sched_add_nodes_to_layer - Add nodes to a given layer
906 * @pi: port information structure
907 * @tc_node: pointer to TC node
908 * @parent: pointer to parent node
909 * @layer: layer number to add nodes
910 * @num_nodes: number of nodes to be added
911 * @first_node_teid: pointer to the first node TEID
912 * @num_nodes_added: pointer to number of nodes added
913 *
914 * This function add nodes to a given layer.
915 */
916static enum ice_status
917ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
918 struct ice_sched_node *tc_node,
919 struct ice_sched_node *parent, u8 layer,
920 u16 num_nodes, u32 *first_node_teid,
921 u16 *num_nodes_added)
922{
923 u32 *first_teid_ptr = first_node_teid;
924 u16 new_num_nodes, max_child_nodes;
925 enum ice_status status = 0;
926 struct ice_hw *hw = pi->hw;
927 u16 num_added = 0;
928 u32 temp;
929
930 *num_nodes_added = 0;
931
932 if (!num_nodes)
933 return status;
934
935 if (!parent || layer < hw->sw_entry_point_layer)
936 return ICE_ERR_PARAM;
937
938 /* max children per node per layer */
939 max_child_nodes = hw->max_children[parent->tx_sched_layer];
940
941 /* current number of children + required nodes exceed max children ? */
942 if ((parent->num_children + num_nodes) > max_child_nodes) {
943 /* Fail if the parent is a TC node */
944 if (parent == tc_node)
945 return ICE_ERR_CFG;
946
947 /* utilize all the spaces if the parent is not full */
948 if (parent->num_children < max_child_nodes) {
949 new_num_nodes = max_child_nodes - parent->num_children;
950 /* this recursion is intentional, and wouldn't
951 * go more than 2 calls
952 */
953 status = ice_sched_add_nodes_to_layer(pi, tc_node,
954 parent, layer,
955 new_num_nodes,
956 first_node_teid,
957 &num_added);
958 if (status)
959 return status;
960
961 *num_nodes_added += num_added;
962 }
963 /* Don't modify the first node TEID memory if the first node was
964 * added already in the above call. Instead send some temp
965 * memory for all other recursive calls.
966 */
967 if (num_added)
968 first_teid_ptr = &temp;
969
970 new_num_nodes = num_nodes - num_added;
971
972 /* This parent is full, try the next sibling */
973 parent = parent->sibling;
974
975 /* this recursion is intentional, for 1024 queues
976 * per VSI, it goes max of 16 iterations.
977 * 1024 / 8 = 128 layer 8 nodes
978 * 128 /8 = 16 (add 8 nodes per iteration)
979 */
980 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
981 layer, new_num_nodes,
982 first_teid_ptr,
983 &num_added);
984 *num_nodes_added += num_added;
985 return status;
986 }
987
988 status = ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes,
989 num_nodes_added, first_node_teid);
990 return status;
991}
992
993/**
994 * ice_sched_get_qgrp_layer - get the current queue group layer number
995 * @hw: pointer to the HW struct
996 *
997 * This function returns the current queue group layer number
998 */
999static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw)
1000{
1001 /* It's always total layers - 1, the array is 0 relative so -2 */
1002 return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET;
1003}
1004
1005/**
1006 * ice_sched_get_vsi_layer - get the current VSI layer number
1007 * @hw: pointer to the HW struct
1008 *
1009 * This function returns the current VSI layer number
1010 */
1011static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
1012{
1013 /* Num Layers VSI layer
1014 * 9 6
1015 * 7 4
1016 * 5 or less sw_entry_point_layer
1017 */
1018 /* calculate the VSI layer based on number of layers. */
1019 if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) {
1020 u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
1021
1022 if (layer > hw->sw_entry_point_layer)
1023 return layer;
1024 }
1025 return hw->sw_entry_point_layer;
1026}
1027
1028/**
1029 * ice_rm_dflt_leaf_node - remove the default leaf node in the tree
1030 * @pi: port information structure
1031 *
1032 * This function removes the leaf node that was created by the FW
1033 * during initialization
1034 */
1035static void ice_rm_dflt_leaf_node(struct ice_port_info *pi)
1036{
1037 struct ice_sched_node *node;
1038
1039 node = pi->root;
1040 while (node) {
1041 if (!node->num_children)
1042 break;
1043 node = node->children[0];
1044 }
1045 if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) {
1046 u32 teid = le32_to_cpu(node->info.node_teid);
1047 enum ice_status status;
1048
1049 /* remove the default leaf node */
1050 status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid);
1051 if (!status)
1052 ice_free_sched_node(pi, node);
1053 }
1054}
1055
1056/**
1057 * ice_sched_rm_dflt_nodes - free the default nodes in the tree
1058 * @pi: port information structure
1059 *
1060 * This function frees all the nodes except root and TC that were created by
1061 * the FW during initialization
1062 */
1063static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
1064{
1065 struct ice_sched_node *node;
1066
1067 ice_rm_dflt_leaf_node(pi);
1068
1069 /* remove the default nodes except TC and root nodes */
1070 node = pi->root;
1071 while (node) {
1072 if (node->tx_sched_layer >= pi->hw->sw_entry_point_layer &&
1073 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
1074 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT) {
1075 ice_free_sched_node(pi, node);
1076 break;
1077 }
1078
1079 if (!node->num_children)
1080 break;
1081 node = node->children[0];
1082 }
1083}
1084
1085/**
1086 * ice_sched_init_port - Initialize scheduler by querying information from FW
1087 * @pi: port info structure for the tree to cleanup
1088 *
1089 * This function is the initial call to find the total number of Tx scheduler
1090 * resources, default topology created by firmware and storing the information
1091 * in SW DB.
1092 */
1093enum ice_status ice_sched_init_port(struct ice_port_info *pi)
1094{
1095 struct ice_aqc_get_topo_elem *buf;
1096 enum ice_status status;
1097 struct ice_hw *hw;
1098 u8 num_branches;
1099 u16 num_elems;
1100 u8 i, j;
1101
1102 if (!pi)
1103 return ICE_ERR_PARAM;
1104 hw = pi->hw;
1105
1106 /* Query the Default Topology from FW */
1107 buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
1108 if (!buf)
1109 return ICE_ERR_NO_MEMORY;
1110
1111 /* Query default scheduling tree topology */
1112 status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN,
1113 &num_branches, NULL);
1114 if (status)
1115 goto err_init_port;
1116
1117 /* num_branches should be between 1-8 */
1118 if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) {
1119 ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n",
1120 num_branches);
1121 status = ICE_ERR_PARAM;
1122 goto err_init_port;
1123 }
1124
1125 /* get the number of elements on the default/first branch */
1126 num_elems = le16_to_cpu(buf[0].hdr.num_elems);
1127
1128 /* num_elems should always be between 1-9 */
1129 if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) {
1130 ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n",
1131 num_elems);
1132 status = ICE_ERR_PARAM;
1133 goto err_init_port;
1134 }
1135
1136 /* If the last node is a leaf node then the index of the queue group
1137 * layer is two less than the number of elements.
1138 */
1139 if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type ==
1140 ICE_AQC_ELEM_TYPE_LEAF)
1141 pi->last_node_teid =
1142 le32_to_cpu(buf[0].generic[num_elems - 2].node_teid);
1143 else
1144 pi->last_node_teid =
1145 le32_to_cpu(buf[0].generic[num_elems - 1].node_teid);
1146
1147 /* Insert the Tx Sched root node */
1148 status = ice_sched_add_root_node(pi, &buf[0].generic[0]);
1149 if (status)
1150 goto err_init_port;
1151
1152 /* Parse the default tree and cache the information */
1153 for (i = 0; i < num_branches; i++) {
1154 num_elems = le16_to_cpu(buf[i].hdr.num_elems);
1155
1156 /* Skip root element as already inserted */
1157 for (j = 1; j < num_elems; j++) {
1158 /* update the sw entry point */
1159 if (buf[0].generic[j].data.elem_type ==
1160 ICE_AQC_ELEM_TYPE_ENTRY_POINT)
1161 hw->sw_entry_point_layer = j;
1162
1163 status = ice_sched_add_node(pi, j, &buf[i].generic[j]);
1164 if (status)
1165 goto err_init_port;
1166 }
1167 }
1168
1169 /* Remove the default nodes. */
1170 if (pi->root)
1171 ice_sched_rm_dflt_nodes(pi);
1172
1173 /* initialize the port for handling the scheduler tree */
1174 pi->port_state = ICE_SCHED_PORT_STATE_READY;
1175 mutex_init(&pi->sched_lock);
1176 for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
1177 INIT_LIST_HEAD(&pi->rl_prof_list[i]);
1178
1179err_init_port:
1180 if (status && pi->root) {
1181 ice_free_sched_node(pi, pi->root);
1182 pi->root = NULL;
1183 }
1184
1185 devm_kfree(ice_hw_to_dev(hw), buf);
1186 return status;
1187}
1188
1189/**
1190 * ice_sched_query_res_alloc - query the FW for num of logical sched layers
1191 * @hw: pointer to the HW struct
1192 *
1193 * query FW for allocated scheduler resources and store in HW struct
1194 */
1195enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
1196{
1197 struct ice_aqc_query_txsched_res_resp *buf;
1198 enum ice_status status = 0;
1199 __le16 max_sibl;
1200 u16 i;
1201
1202 if (hw->layer_info)
1203 return status;
1204
1205 buf = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*buf), GFP_KERNEL);
1206 if (!buf)
1207 return ICE_ERR_NO_MEMORY;
1208
1209 status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL);
1210 if (status)
1211 goto sched_query_out;
1212
1213 hw->num_tx_sched_layers = le16_to_cpu(buf->sched_props.logical_levels);
1214 hw->num_tx_sched_phys_layers =
1215 le16_to_cpu(buf->sched_props.phys_levels);
1216 hw->flattened_layers = buf->sched_props.flattening_bitmap;
1217 hw->max_cgds = buf->sched_props.max_pf_cgds;
1218
1219 /* max sibling group size of current layer refers to the max children
1220 * of the below layer node.
1221 * layer 1 node max children will be layer 2 max sibling group size
1222 * layer 2 node max children will be layer 3 max sibling group size
1223 * and so on. This array will be populated from root (index 0) to
1224 * qgroup layer 7. Leaf node has no children.
1225 */
1226 for (i = 0; i < hw->num_tx_sched_layers - 1; i++) {
1227 max_sibl = buf->layer_props[i + 1].max_sibl_grp_sz;
1228 hw->max_children[i] = le16_to_cpu(max_sibl);
1229 }
1230
1231 hw->layer_info = devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props,
1232 (hw->num_tx_sched_layers *
1233 sizeof(*hw->layer_info)),
1234 GFP_KERNEL);
1235 if (!hw->layer_info) {
1236 status = ICE_ERR_NO_MEMORY;
1237 goto sched_query_out;
1238 }
1239
1240sched_query_out:
1241 devm_kfree(ice_hw_to_dev(hw), buf);
1242 return status;
1243}
1244
1245/**
1246 * ice_sched_find_node_in_subtree - Find node in part of base node subtree
1247 * @hw: pointer to the HW struct
1248 * @base: pointer to the base node
1249 * @node: pointer to the node to search
1250 *
1251 * This function checks whether a given node is part of the base node
1252 * subtree or not
1253 */
1254static bool
1255ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
1256 struct ice_sched_node *node)
1257{
1258 u8 i;
1259
1260 for (i = 0; i < base->num_children; i++) {
1261 struct ice_sched_node *child = base->children[i];
1262
1263 if (node == child)
1264 return true;
1265
1266 if (child->tx_sched_layer > node->tx_sched_layer)
1267 return false;
1268
1269 /* this recursion is intentional, and wouldn't
1270 * go more than 8 calls
1271 */
1272 if (ice_sched_find_node_in_subtree(hw, child, node))
1273 return true;
1274 }
1275 return false;
1276}
1277
1278/**
1279 * ice_sched_get_free_qgrp - Scan all queue group siblings and find a free node
1280 * @pi: port information structure
1281 * @vsi_node: software VSI handle
1282 * @qgrp_node: first queue group node identified for scanning
1283 * @owner: LAN or RDMA
1284 *
1285 * This function retrieves a free LAN or RDMA queue group node by scanning
1286 * qgrp_node and its siblings for the queue group with the fewest number
1287 * of queues currently assigned.
1288 */
1289static struct ice_sched_node *
1290ice_sched_get_free_qgrp(struct ice_port_info *pi,
1291 struct ice_sched_node *vsi_node,
1292 struct ice_sched_node *qgrp_node, u8 owner)
1293{
1294 struct ice_sched_node *min_qgrp;
1295 u8 min_children;
1296
1297 if (!qgrp_node)
1298 return qgrp_node;
1299 min_children = qgrp_node->num_children;
1300 if (!min_children)
1301 return qgrp_node;
1302 min_qgrp = qgrp_node;
1303 /* scan all queue groups until find a node which has less than the
1304 * minimum number of children. This way all queue group nodes get
1305 * equal number of shares and active. The bandwidth will be equally
1306 * distributed across all queues.
1307 */
1308 while (qgrp_node) {
1309 /* make sure the qgroup node is part of the VSI subtree */
1310 if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
1311 if (qgrp_node->num_children < min_children &&
1312 qgrp_node->owner == owner) {
1313 /* replace the new min queue group node */
1314 min_qgrp = qgrp_node;
1315 min_children = min_qgrp->num_children;
1316 /* break if it has no children, */
1317 if (!min_children)
1318 break;
1319 }
1320 qgrp_node = qgrp_node->sibling;
1321 }
1322 return min_qgrp;
1323}
1324
1325/**
1326 * ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node
1327 * @pi: port information structure
1328 * @vsi_handle: software VSI handle
1329 * @tc: branch number
1330 * @owner: LAN or RDMA
1331 *
1332 * This function retrieves a free LAN or RDMA queue group node
1333 */
1334struct ice_sched_node *
1335ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
1336 u8 owner)
1337{
1338 struct ice_sched_node *vsi_node, *qgrp_node;
1339 struct ice_vsi_ctx *vsi_ctx;
1340 u16 max_children;
1341 u8 qgrp_layer;
1342
1343 qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
1344 max_children = pi->hw->max_children[qgrp_layer];
1345
1346 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
1347 if (!vsi_ctx)
1348 return NULL;
1349 vsi_node = vsi_ctx->sched.vsi_node[tc];
1350 /* validate invalid VSI ID */
1351 if (!vsi_node)
1352 return NULL;
1353
1354 /* get the first queue group node from VSI sub-tree */
1355 qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
1356 while (qgrp_node) {
1357 /* make sure the qgroup node is part of the VSI subtree */
1358 if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
1359 if (qgrp_node->num_children < max_children &&
1360 qgrp_node->owner == owner)
1361 break;
1362 qgrp_node = qgrp_node->sibling;
1363 }
1364
1365 /* Select the best queue group */
1366 return ice_sched_get_free_qgrp(pi, vsi_node, qgrp_node, owner);
1367}
1368
1369/**
1370 * ice_sched_get_vsi_node - Get a VSI node based on VSI ID
1371 * @hw: pointer to the HW struct
1372 * @tc_node: pointer to the TC node
1373 * @vsi_handle: software VSI handle
1374 *
1375 * This function retrieves a VSI node for a given VSI ID from a given
1376 * TC branch
1377 */
1378static struct ice_sched_node *
1379ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node,
1380 u16 vsi_handle)
1381{
1382 struct ice_sched_node *node;
1383 u8 vsi_layer;
1384
1385 vsi_layer = ice_sched_get_vsi_layer(hw);
1386 node = ice_sched_get_first_node(hw->port_info, tc_node, vsi_layer);
1387
1388 /* Check whether it already exists */
1389 while (node) {
1390 if (node->vsi_handle == vsi_handle)
1391 return node;
1392 node = node->sibling;
1393 }
1394
1395 return node;
1396}
1397
1398/**
1399 * ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes
1400 * @hw: pointer to the HW struct
1401 * @num_qs: number of queues
1402 * @num_nodes: num nodes array
1403 *
1404 * This function calculates the number of VSI child nodes based on the
1405 * number of queues.
1406 */
1407static void
1408ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
1409{
1410 u16 num = num_qs;
1411 u8 i, qgl, vsil;
1412
1413 qgl = ice_sched_get_qgrp_layer(hw);
1414 vsil = ice_sched_get_vsi_layer(hw);
1415
1416 /* calculate num nodes from queue group to VSI layer */
1417 for (i = qgl; i > vsil; i--) {
1418 /* round to the next integer if there is a remainder */
1419 num = DIV_ROUND_UP(num, hw->max_children[i]);
1420
1421 /* need at least one node */
1422 num_nodes[i] = num ? num : 1;
1423 }
1424}
1425
1426/**
1427 * ice_sched_add_vsi_child_nodes - add VSI child nodes to tree
1428 * @pi: port information structure
1429 * @vsi_handle: software VSI handle
1430 * @tc_node: pointer to the TC node
1431 * @num_nodes: pointer to the num nodes that needs to be added per layer
1432 * @owner: node owner (LAN or RDMA)
1433 *
1434 * This function adds the VSI child nodes to tree. It gets called for
1435 * LAN and RDMA separately.
1436 */
1437static enum ice_status
1438ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
1439 struct ice_sched_node *tc_node, u16 *num_nodes,
1440 u8 owner)
1441{
1442 struct ice_sched_node *parent, *node;
1443 struct ice_hw *hw = pi->hw;
1444 enum ice_status status;
1445 u32 first_node_teid;
1446 u16 num_added = 0;
1447 u8 i, qgl, vsil;
1448
1449 qgl = ice_sched_get_qgrp_layer(hw);
1450 vsil = ice_sched_get_vsi_layer(hw);
1451 parent = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
1452 for (i = vsil + 1; i <= qgl; i++) {
1453 if (!parent)
1454 return ICE_ERR_CFG;
1455
1456 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
1457 num_nodes[i],
1458 &first_node_teid,
1459 &num_added);
1460 if (status || num_nodes[i] != num_added)
1461 return ICE_ERR_CFG;
1462
1463 /* The newly added node can be a new parent for the next
1464 * layer nodes
1465 */
1466 if (num_added) {
1467 parent = ice_sched_find_node_by_teid(tc_node,
1468 first_node_teid);
1469 node = parent;
1470 while (node) {
1471 node->owner = owner;
1472 node = node->sibling;
1473 }
1474 } else {
1475 parent = parent->children[0];
1476 }
1477 }
1478
1479 return 0;
1480}
1481
1482/**
1483 * ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes
1484 * @hw: pointer to the HW struct
1485 * @tc_node: pointer to TC node
1486 * @num_nodes: pointer to num nodes array
1487 *
1488 * This function calculates the number of supported nodes needed to add this
1489 * VSI into Tx tree including the VSI, parent and intermediate nodes in below
1490 * layers
1491 */
1492static void
1493ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
1494 struct ice_sched_node *tc_node, u16 *num_nodes)
1495{
1496 struct ice_sched_node *node;
1497 u8 vsil;
1498 int i;
1499
1500 vsil = ice_sched_get_vsi_layer(hw);
1501 for (i = vsil; i >= hw->sw_entry_point_layer; i--)
1502 /* Add intermediate nodes if TC has no children and
1503 * need at least one node for VSI
1504 */
1505 if (!tc_node->num_children || i == vsil) {
1506 num_nodes[i]++;
1507 } else {
1508 /* If intermediate nodes are reached max children
1509 * then add a new one.
1510 */
1511 node = ice_sched_get_first_node(hw->port_info, tc_node,
1512 (u8)i);
1513 /* scan all the siblings */
1514 while (node) {
1515 if (node->num_children < hw->max_children[i])
1516 break;
1517 node = node->sibling;
1518 }
1519
1520 /* tree has one intermediate node to add this new VSI.
1521 * So no need to calculate supported nodes for below
1522 * layers.
1523 */
1524 if (node)
1525 break;
1526 /* all the nodes are full, allocate a new one */
1527 num_nodes[i]++;
1528 }
1529}
1530
1531/**
1532 * ice_sched_add_vsi_support_nodes - add VSI supported nodes into Tx tree
1533 * @pi: port information structure
1534 * @vsi_handle: software VSI handle
1535 * @tc_node: pointer to TC node
1536 * @num_nodes: pointer to num nodes array
1537 *
1538 * This function adds the VSI supported nodes into Tx tree including the
1539 * VSI, its parent and intermediate nodes in below layers
1540 */
1541static enum ice_status
1542ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
1543 struct ice_sched_node *tc_node, u16 *num_nodes)
1544{
1545 struct ice_sched_node *parent = tc_node;
1546 enum ice_status status;
1547 u32 first_node_teid;
1548 u16 num_added = 0;
1549 u8 i, vsil;
1550
1551 if (!pi)
1552 return ICE_ERR_PARAM;
1553
1554 vsil = ice_sched_get_vsi_layer(pi->hw);
1555 for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
1556 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
1557 i, num_nodes[i],
1558 &first_node_teid,
1559 &num_added);
1560 if (status || num_nodes[i] != num_added)
1561 return ICE_ERR_CFG;
1562
1563 /* The newly added node can be a new parent for the next
1564 * layer nodes
1565 */
1566 if (num_added)
1567 parent = ice_sched_find_node_by_teid(tc_node,
1568 first_node_teid);
1569 else
1570 parent = parent->children[0];
1571
1572 if (!parent)
1573 return ICE_ERR_CFG;
1574
1575 if (i == vsil)
1576 parent->vsi_handle = vsi_handle;
1577 }
1578
1579 return 0;
1580}
1581
1582/**
1583 * ice_sched_add_vsi_to_topo - add a new VSI into tree
1584 * @pi: port information structure
1585 * @vsi_handle: software VSI handle
1586 * @tc: TC number
1587 *
1588 * This function adds a new VSI into scheduler tree
1589 */
1590static enum ice_status
1591ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
1592{
1593 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
1594 struct ice_sched_node *tc_node;
1595 struct ice_hw *hw = pi->hw;
1596
1597 tc_node = ice_sched_get_tc_node(pi, tc);
1598 if (!tc_node)
1599 return ICE_ERR_PARAM;
1600
1601 /* calculate number of supported nodes needed for this VSI */
1602 ice_sched_calc_vsi_support_nodes(hw, tc_node, num_nodes);
1603
1604 /* add VSI supported nodes to TC subtree */
1605 return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node,
1606 num_nodes);
1607}
1608
1609/**
1610 * ice_sched_update_vsi_child_nodes - update VSI child nodes
1611 * @pi: port information structure
1612 * @vsi_handle: software VSI handle
1613 * @tc: TC number
1614 * @new_numqs: new number of max queues
1615 * @owner: owner of this subtree
1616 *
1617 * This function updates the VSI child nodes based on the number of queues
1618 */
1619static enum ice_status
1620ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
1621 u8 tc, u16 new_numqs, u8 owner)
1622{
1623 u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
1624 struct ice_sched_node *vsi_node;
1625 struct ice_sched_node *tc_node;
1626 struct ice_vsi_ctx *vsi_ctx;
1627 enum ice_status status = 0;
1628 struct ice_hw *hw = pi->hw;
1629 u16 prev_numqs;
1630
1631 tc_node = ice_sched_get_tc_node(pi, tc);
1632 if (!tc_node)
1633 return ICE_ERR_CFG;
1634
1635 vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
1636 if (!vsi_node)
1637 return ICE_ERR_CFG;
1638
1639 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1640 if (!vsi_ctx)
1641 return ICE_ERR_PARAM;
1642
1643 prev_numqs = vsi_ctx->sched.max_lanq[tc];
1644 /* num queues are not changed or less than the previous number */
1645 if (new_numqs <= prev_numqs)
1646 return status;
1647 status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);
1648 if (status)
1649 return status;
1650
1651 if (new_numqs)
1652 ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
1653 /* Keep the max number of queue configuration all the time. Update the
1654 * tree only if number of queues > previous number of queues. This may
1655 * leave some extra nodes in the tree if number of queues < previous
1656 * number but that wouldn't harm anything. Removing those extra nodes
1657 * may complicate the code if those nodes are part of SRL or
1658 * individually rate limited.
1659 */
1660 status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
1661 new_num_nodes, owner);
1662 if (status)
1663 return status;
1664 vsi_ctx->sched.max_lanq[tc] = new_numqs;
1665
1666 return 0;
1667}
1668
1669/**
1670 * ice_sched_cfg_vsi - configure the new/existing VSI
1671 * @pi: port information structure
1672 * @vsi_handle: software VSI handle
1673 * @tc: TC number
1674 * @maxqs: max number of queues
1675 * @owner: LAN or RDMA
1676 * @enable: TC enabled or disabled
1677 *
1678 * This function adds/updates VSI nodes based on the number of queues. If TC is
1679 * enabled and VSI is in suspended state then resume the VSI back. If TC is
1680 * disabled then suspend the VSI if it is not already.
1681 */
1682enum ice_status
1683ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
1684 u8 owner, bool enable)
1685{
1686 struct ice_sched_node *vsi_node, *tc_node;
1687 struct ice_vsi_ctx *vsi_ctx;
1688 enum ice_status status = 0;
1689 struct ice_hw *hw = pi->hw;
1690
1691 ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle);
1692 tc_node = ice_sched_get_tc_node(pi, tc);
1693 if (!tc_node)
1694 return ICE_ERR_PARAM;
1695 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1696 if (!vsi_ctx)
1697 return ICE_ERR_PARAM;
1698 vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
1699
1700 /* suspend the VSI if TC is not enabled */
1701 if (!enable) {
1702 if (vsi_node && vsi_node->in_use) {
1703 u32 teid = le32_to_cpu(vsi_node->info.node_teid);
1704
1705 status = ice_sched_suspend_resume_elems(hw, 1, &teid,
1706 true);
1707 if (!status)
1708 vsi_node->in_use = false;
1709 }
1710 return status;
1711 }
1712
1713 /* TC is enabled, if it is a new VSI then add it to the tree */
1714 if (!vsi_node) {
1715 status = ice_sched_add_vsi_to_topo(pi, vsi_handle, tc);
1716 if (status)
1717 return status;
1718
1719 vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
1720 if (!vsi_node)
1721 return ICE_ERR_CFG;
1722
1723 vsi_ctx->sched.vsi_node[tc] = vsi_node;
1724 vsi_node->in_use = true;
1725 /* invalidate the max queues whenever VSI gets added first time
1726 * into the scheduler tree (boot or after reset). We need to
1727 * recreate the child nodes all the time in these cases.
1728 */
1729 vsi_ctx->sched.max_lanq[tc] = 0;
1730 }
1731
1732 /* update the VSI child nodes */
1733 status = ice_sched_update_vsi_child_nodes(pi, vsi_handle, tc, maxqs,
1734 owner);
1735 if (status)
1736 return status;
1737
1738 /* TC is enabled, resume the VSI if it is in the suspend state */
1739 if (!vsi_node->in_use) {
1740 u32 teid = le32_to_cpu(vsi_node->info.node_teid);
1741
1742 status = ice_sched_suspend_resume_elems(hw, 1, &teid, false);
1743 if (!status)
1744 vsi_node->in_use = true;
1745 }
1746
1747 return status;
1748}
1749
1750/**
1751 * ice_sched_rm_agg_vsi_entry - remove aggregator related VSI info entry
1752 * @pi: port information structure
1753 * @vsi_handle: software VSI handle
1754 *
1755 * This function removes single aggregator VSI info entry from
1756 * aggregator list.
1757 */
1758static void ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
1759{
1760 struct ice_sched_agg_info *agg_info;
1761 struct ice_sched_agg_info *atmp;
1762
1763 list_for_each_entry_safe(agg_info, atmp, &pi->hw->agg_list,
1764 list_entry) {
1765 struct ice_sched_agg_vsi_info *agg_vsi_info;
1766 struct ice_sched_agg_vsi_info *vtmp;
1767
1768 list_for_each_entry_safe(agg_vsi_info, vtmp,
1769 &agg_info->agg_vsi_list, list_entry)
1770 if (agg_vsi_info->vsi_handle == vsi_handle) {
1771 list_del(&agg_vsi_info->list_entry);
1772 devm_kfree(ice_hw_to_dev(pi->hw),
1773 agg_vsi_info);
1774 return;
1775 }
1776 }
1777}
1778
1779/**
1780 * ice_sched_is_leaf_node_present - check for a leaf node in the sub-tree
1781 * @node: pointer to the sub-tree node
1782 *
1783 * This function checks for a leaf node presence in a given sub-tree node.
1784 */
1785static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node)
1786{
1787 u8 i;
1788
1789 for (i = 0; i < node->num_children; i++)
1790 if (ice_sched_is_leaf_node_present(node->children[i]))
1791 return true;
1792 /* check for a leaf node */
1793 return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF);
1794}
1795
1796/**
1797 * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes
1798 * @pi: port information structure
1799 * @vsi_handle: software VSI handle
1800 * @owner: LAN or RDMA
1801 *
1802 * This function removes the VSI and its LAN or RDMA children nodes from the
1803 * scheduler tree.
1804 */
1805static enum ice_status
1806ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
1807{
1808 enum ice_status status = ICE_ERR_PARAM;
1809 struct ice_vsi_ctx *vsi_ctx;
1810 u8 i;
1811
1812 ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle);
1813 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
1814 return status;
1815 mutex_lock(&pi->sched_lock);
1816 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
1817 if (!vsi_ctx)
1818 goto exit_sched_rm_vsi_cfg;
1819
1820 ice_for_each_traffic_class(i) {
1821 struct ice_sched_node *vsi_node, *tc_node;
1822 u8 j = 0;
1823
1824 tc_node = ice_sched_get_tc_node(pi, i);
1825 if (!tc_node)
1826 continue;
1827
1828 vsi_node = ice_sched_get_vsi_node(pi->hw, tc_node, vsi_handle);
1829 if (!vsi_node)
1830 continue;
1831
1832 if (ice_sched_is_leaf_node_present(vsi_node)) {
1833 ice_debug(pi->hw, ICE_DBG_SCHED,
1834 "VSI has leaf nodes in TC %d\n", i);
1835 status = ICE_ERR_IN_USE;
1836 goto exit_sched_rm_vsi_cfg;
1837 }
1838 while (j < vsi_node->num_children) {
1839 if (vsi_node->children[j]->owner == owner) {
1840 ice_free_sched_node(pi, vsi_node->children[j]);
1841
1842 /* reset the counter again since the num
1843 * children will be updated after node removal
1844 */
1845 j = 0;
1846 } else {
1847 j++;
1848 }
1849 }
1850 /* remove the VSI if it has no children */
1851 if (!vsi_node->num_children) {
1852 ice_free_sched_node(pi, vsi_node);
1853 vsi_ctx->sched.vsi_node[i] = NULL;
1854
1855 /* clean up aggregator related VSI info if any */
1856 ice_sched_rm_agg_vsi_info(pi, vsi_handle);
1857 }
1858 if (owner == ICE_SCHED_NODE_OWNER_LAN)
1859 vsi_ctx->sched.max_lanq[i] = 0;
1860 }
1861 status = 0;
1862
1863exit_sched_rm_vsi_cfg:
1864 mutex_unlock(&pi->sched_lock);
1865 return status;
1866}
1867
1868/**
1869 * ice_rm_vsi_lan_cfg - remove VSI and its LAN children nodes
1870 * @pi: port information structure
1871 * @vsi_handle: software VSI handle
1872 *
1873 * This function clears the VSI and its LAN children nodes from scheduler tree
1874 * for all TCs.
1875 */
1876enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
1877{
1878 return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
1879}
1880
1881/**
1882 * ice_sched_rm_unused_rl_prof - remove unused RL profile
1883 * @pi: port information structure
1884 *
1885 * This function removes unused rate limit profiles from the HW and
1886 * SW DB. The caller needs to hold scheduler lock.
1887 */
1888static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi)
1889{
1890 u16 ln;
1891
1892 for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {
1893 struct ice_aqc_rl_profile_info *rl_prof_elem;
1894 struct ice_aqc_rl_profile_info *rl_prof_tmp;
1895
1896 list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp,
1897 &pi->rl_prof_list[ln], list_entry) {
1898 if (!ice_sched_del_rl_profile(pi->hw, rl_prof_elem))
1899 ice_debug(pi->hw, ICE_DBG_SCHED,
1900 "Removed rl profile\n");
1901 }
1902 }
1903}
1904
1905/**
1906 * ice_sched_update_elem - update element
1907 * @hw: pointer to the HW struct
1908 * @node: pointer to node
1909 * @info: node info to update
1910 *
1911 * Update the HW DB, and local SW DB of node. Update the scheduling
1912 * parameters of node from argument info data buffer (Info->data buf) and
1913 * returns success or error on config sched element failure. The caller
1914 * needs to hold scheduler lock.
1915 */
1916static enum ice_status
1917ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
1918 struct ice_aqc_txsched_elem_data *info)
1919{
1920 struct ice_aqc_txsched_elem_data buf;
1921 enum ice_status status;
1922 u16 elem_cfgd = 0;
1923 u16 num_elems = 1;
1924
1925 buf = *info;
1926 /* Parent TEID is reserved field in this aq call */
1927 buf.parent_teid = 0;
1928 /* Element type is reserved field in this aq call */
1929 buf.data.elem_type = 0;
1930 /* Flags is reserved field in this aq call */
1931 buf.data.flags = 0;
1932
1933 /* Update HW DB */
1934 /* Configure element node */
1935 status = ice_aq_cfg_sched_elems(hw, num_elems, &buf, sizeof(buf),
1936 &elem_cfgd, NULL);
1937 if (status || elem_cfgd != num_elems) {
1938 ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n");
1939 return ICE_ERR_CFG;
1940 }
1941
1942 /* Config success case */
1943 /* Now update local SW DB */
1944 /* Only copy the data portion of info buffer */
1945 node->info.data = info->data;
1946 return status;
1947}
1948
1949/**
1950 * ice_sched_cfg_node_bw_alloc - configure node BW weight/alloc params
1951 * @hw: pointer to the HW struct
1952 * @node: sched node to configure
1953 * @rl_type: rate limit type CIR, EIR, or shared
1954 * @bw_alloc: BW weight/allocation
1955 *
1956 * This function configures node element's BW allocation.
1957 */
1958static enum ice_status
1959ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
1960 enum ice_rl_type rl_type, u16 bw_alloc)
1961{
1962 struct ice_aqc_txsched_elem_data buf;
1963 struct ice_aqc_txsched_elem *data;
1964 enum ice_status status;
1965
1966 buf = node->info;
1967 data = &buf.data;
1968 if (rl_type == ICE_MIN_BW) {
1969 data->valid_sections |= ICE_AQC_ELEM_VALID_CIR;
1970 data->cir_bw.bw_alloc = cpu_to_le16(bw_alloc);
1971 } else if (rl_type == ICE_MAX_BW) {
1972 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
1973 data->eir_bw.bw_alloc = cpu_to_le16(bw_alloc);
1974 } else {
1975 return ICE_ERR_PARAM;
1976 }
1977
1978 /* Configure element */
1979 status = ice_sched_update_elem(hw, node, &buf);
1980 return status;
1981}
1982
1983/**
1984 * ice_set_clear_cir_bw - set or clear CIR BW
1985 * @bw_t_info: bandwidth type information structure
1986 * @bw: bandwidth in Kbps - Kilo bits per sec
1987 *
1988 * Save or clear CIR bandwidth (BW) in the passed param bw_t_info.
1989 */
1990static void ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
1991{
1992 if (bw == ICE_SCHED_DFLT_BW) {
1993 clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
1994 bw_t_info->cir_bw.bw = 0;
1995 } else {
1996 /* Save type of BW information */
1997 set_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
1998 bw_t_info->cir_bw.bw = bw;
1999 }
2000}
2001
2002/**
2003 * ice_set_clear_eir_bw - set or clear EIR BW
2004 * @bw_t_info: bandwidth type information structure
2005 * @bw: bandwidth in Kbps - Kilo bits per sec
2006 *
2007 * Save or clear EIR bandwidth (BW) in the passed param bw_t_info.
2008 */
2009static void ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
2010{
2011 if (bw == ICE_SCHED_DFLT_BW) {
2012 clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
2013 bw_t_info->eir_bw.bw = 0;
2014 } else {
2015 /* EIR BW and Shared BW profiles are mutually exclusive and
2016 * hence only one of them may be set for any given element.
2017 * First clear earlier saved shared BW information.
2018 */
2019 clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
2020 bw_t_info->shared_bw = 0;
2021 /* save EIR BW information */
2022 set_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
2023 bw_t_info->eir_bw.bw = bw;
2024 }
2025}
2026
2027/**
2028 * ice_set_clear_shared_bw - set or clear shared BW
2029 * @bw_t_info: bandwidth type information structure
2030 * @bw: bandwidth in Kbps - Kilo bits per sec
2031 *
2032 * Save or clear shared bandwidth (BW) in the passed param bw_t_info.
2033 */
2034static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
2035{
2036 if (bw == ICE_SCHED_DFLT_BW) {
2037 clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
2038 bw_t_info->shared_bw = 0;
2039 } else {
2040 /* EIR BW and Shared BW profiles are mutually exclusive and
2041 * hence only one of them may be set for any given element.
2042 * First clear earlier saved EIR BW information.
2043 */
2044 clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
2045 bw_t_info->eir_bw.bw = 0;
2046 /* save shared BW information */
2047 set_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
2048 bw_t_info->shared_bw = bw;
2049 }
2050}
2051
2052/**
2053 * ice_sched_calc_wakeup - calculate RL profile wakeup parameter
2054 * @bw: bandwidth in Kbps
2055 *
2056 * This function calculates the wakeup parameter of RL profile.
2057 */
2058static u16 ice_sched_calc_wakeup(s32 bw)
2059{
2060 s64 bytes_per_sec, wakeup_int, wakeup_a, wakeup_b, wakeup_f;
2061 s32 wakeup_f_int;
2062 u16 wakeup = 0;
2063
2064 /* Get the wakeup integer value */
2065 bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE);
2066 wakeup_int = div64_long(ICE_RL_PROF_FREQUENCY, bytes_per_sec);
2067 if (wakeup_int > 63) {
2068 wakeup = (u16)((1 << 15) | wakeup_int);
2069 } else {
2070 /* Calculate fraction value up to 4 decimals
2071 * Convert Integer value to a constant multiplier
2072 */
2073 wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int;
2074 wakeup_a = div64_long((s64)ICE_RL_PROF_MULTIPLIER *
2075 ICE_RL_PROF_FREQUENCY,
2076 bytes_per_sec);
2077
2078 /* Get Fraction value */
2079 wakeup_f = wakeup_a - wakeup_b;
2080
2081 /* Round up the Fractional value via Ceil(Fractional value) */
2082 if (wakeup_f > div64_long(ICE_RL_PROF_MULTIPLIER, 2))
2083 wakeup_f += 1;
2084
2085 wakeup_f_int = (s32)div64_long(wakeup_f * ICE_RL_PROF_FRACTION,
2086 ICE_RL_PROF_MULTIPLIER);
2087 wakeup |= (u16)(wakeup_int << 9);
2088 wakeup |= (u16)(0x1ff & wakeup_f_int);
2089 }
2090
2091 return wakeup;
2092}
2093
2094/**
2095 * ice_sched_bw_to_rl_profile - convert BW to profile parameters
2096 * @bw: bandwidth in Kbps
2097 * @profile: profile parameters to return
2098 *
2099 * This function converts the BW to profile structure format.
2100 */
2101static enum ice_status
2102ice_sched_bw_to_rl_profile(u32 bw, struct ice_aqc_rl_profile_elem *profile)
2103{
2104 enum ice_status status = ICE_ERR_PARAM;
2105 s64 bytes_per_sec, ts_rate, mv_tmp;
2106 bool found = false;
2107 s32 encode = 0;
2108 s64 mv = 0;
2109 s32 i;
2110
2111 /* Bw settings range is from 0.5Mb/sec to 100Gb/sec */
2112 if (bw < ICE_SCHED_MIN_BW || bw > ICE_SCHED_MAX_BW)
2113 return status;
2114
2115 /* Bytes per second from Kbps */
2116 bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE);
2117
2118 /* encode is 6 bits but really useful are 5 bits */
2119 for (i = 0; i < 64; i++) {
2120 u64 pow_result = BIT_ULL(i);
2121
2122 ts_rate = div64_long((s64)ICE_RL_PROF_FREQUENCY,
2123 pow_result * ICE_RL_PROF_TS_MULTIPLIER);
2124 if (ts_rate <= 0)
2125 continue;
2126
2127 /* Multiplier value */
2128 mv_tmp = div64_long(bytes_per_sec * ICE_RL_PROF_MULTIPLIER,
2129 ts_rate);
2130
2131 /* Round to the nearest ICE_RL_PROF_MULTIPLIER */
2132 mv = round_up_64bit(mv_tmp, ICE_RL_PROF_MULTIPLIER);
2133
2134 /* First multiplier value greater than the given
2135 * accuracy bytes
2136 */
2137 if (mv > ICE_RL_PROF_ACCURACY_BYTES) {
2138 encode = i;
2139 found = true;
2140 break;
2141 }
2142 }
2143 if (found) {
2144 u16 wm;
2145
2146 wm = ice_sched_calc_wakeup(bw);
2147 profile->rl_multiply = cpu_to_le16(mv);
2148 profile->wake_up_calc = cpu_to_le16(wm);
2149 profile->rl_encode = cpu_to_le16(encode);
2150 status = 0;
2151 } else {
2152 status = ICE_ERR_DOES_NOT_EXIST;
2153 }
2154
2155 return status;
2156}
2157
2158/**
2159 * ice_sched_add_rl_profile - add RL profile
2160 * @pi: port information structure
2161 * @rl_type: type of rate limit BW - min, max, or shared
2162 * @bw: bandwidth in Kbps - Kilo bits per sec
2163 * @layer_num: specifies in which layer to create profile
2164 *
2165 * This function first checks the existing list for corresponding BW
2166 * parameter. If it exists, it returns the associated profile otherwise
2167 * it creates a new rate limit profile for requested BW, and adds it to
2168 * the HW DB and local list. It returns the new profile or null on error.
2169 * The caller needs to hold the scheduler lock.
2170 */
2171static struct ice_aqc_rl_profile_info *
2172ice_sched_add_rl_profile(struct ice_port_info *pi,
2173 enum ice_rl_type rl_type, u32 bw, u8 layer_num)
2174{
2175 struct ice_aqc_rl_profile_info *rl_prof_elem;
2176 u16 profiles_added = 0, num_profiles = 1;
2177 struct ice_aqc_rl_profile_elem *buf;
2178 enum ice_status status;
2179 struct ice_hw *hw;
2180 u8 profile_type;
2181
2182 if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
2183 return NULL;
2184 switch (rl_type) {
2185 case ICE_MIN_BW:
2186 profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;
2187 break;
2188 case ICE_MAX_BW:
2189 profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;
2190 break;
2191 case ICE_SHARED_BW:
2192 profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;
2193 break;
2194 default:
2195 return NULL;
2196 }
2197
2198 if (!pi)
2199 return NULL;
2200 hw = pi->hw;
2201 list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
2202 list_entry)
2203 if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
2204 profile_type && rl_prof_elem->bw == bw)
2205 /* Return existing profile ID info */
2206 return rl_prof_elem;
2207
2208 /* Create new profile ID */
2209 rl_prof_elem = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rl_prof_elem),
2210 GFP_KERNEL);
2211
2212 if (!rl_prof_elem)
2213 return NULL;
2214
2215 status = ice_sched_bw_to_rl_profile(bw, &rl_prof_elem->profile);
2216 if (status)
2217 goto exit_add_rl_prof;
2218
2219 rl_prof_elem->bw = bw;
2220 /* layer_num is zero relative, and fw expects level from 1 to 9 */
2221 rl_prof_elem->profile.level = layer_num + 1;
2222 rl_prof_elem->profile.flags = profile_type;
2223 rl_prof_elem->profile.max_burst_size = cpu_to_le16(hw->max_burst_size);
2224
2225 /* Create new entry in HW DB */
2226 buf = &rl_prof_elem->profile;
2227 status = ice_aq_add_rl_profile(hw, num_profiles, buf, sizeof(*buf),
2228 &profiles_added, NULL);
2229 if (status || profiles_added != num_profiles)
2230 goto exit_add_rl_prof;
2231
2232 /* Good entry - add in the list */
2233 rl_prof_elem->prof_id_ref = 0;
2234 list_add(&rl_prof_elem->list_entry, &pi->rl_prof_list[layer_num]);
2235 return rl_prof_elem;
2236
2237exit_add_rl_prof:
2238 devm_kfree(ice_hw_to_dev(hw), rl_prof_elem);
2239 return NULL;
2240}
2241
2242/**
2243 * ice_sched_cfg_node_bw_lmt - configure node sched params
2244 * @hw: pointer to the HW struct
2245 * @node: sched node to configure
2246 * @rl_type: rate limit type CIR, EIR, or shared
2247 * @rl_prof_id: rate limit profile ID
2248 *
2249 * This function configures node element's BW limit.
2250 */
2251static enum ice_status
2252ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
2253 enum ice_rl_type rl_type, u16 rl_prof_id)
2254{
2255 struct ice_aqc_txsched_elem_data buf;
2256 struct ice_aqc_txsched_elem *data;
2257
2258 buf = node->info;
2259 data = &buf.data;
2260 switch (rl_type) {
2261 case ICE_MIN_BW:
2262 data->valid_sections |= ICE_AQC_ELEM_VALID_CIR;
2263 data->cir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id);
2264 break;
2265 case ICE_MAX_BW:
2266 /* EIR BW and Shared BW profiles are mutually exclusive and
2267 * hence only one of them may be set for any given element
2268 */
2269 if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
2270 return ICE_ERR_CFG;
2271 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
2272 data->eir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id);
2273 break;
2274 case ICE_SHARED_BW:
2275 /* Check for removing shared BW */
2276 if (rl_prof_id == ICE_SCHED_NO_SHARED_RL_PROF_ID) {
2277 /* remove shared profile */
2278 data->valid_sections &= ~ICE_AQC_ELEM_VALID_SHARED;
2279 data->srl_id = 0; /* clear SRL field */
2280
2281 /* enable back EIR to default profile */
2282 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
2283 data->eir_bw.bw_profile_idx =
2284 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
2285 break;
2286 }
2287 /* EIR BW and Shared BW profiles are mutually exclusive and
2288 * hence only one of them may be set for any given element
2289 */
2290 if ((data->valid_sections & ICE_AQC_ELEM_VALID_EIR) &&
2291 (le16_to_cpu(data->eir_bw.bw_profile_idx) !=
2292 ICE_SCHED_DFLT_RL_PROF_ID))
2293 return ICE_ERR_CFG;
2294 /* EIR BW is set to default, disable it */
2295 data->valid_sections &= ~ICE_AQC_ELEM_VALID_EIR;
2296 /* Okay to enable shared BW now */
2297 data->valid_sections |= ICE_AQC_ELEM_VALID_SHARED;
2298 data->srl_id = cpu_to_le16(rl_prof_id);
2299 break;
2300 default:
2301 /* Unknown rate limit type */
2302 return ICE_ERR_PARAM;
2303 }
2304
2305 /* Configure element */
2306 return ice_sched_update_elem(hw, node, &buf);
2307}
2308
2309/**
2310 * ice_sched_get_node_rl_prof_id - get node's rate limit profile ID
2311 * @node: sched node
2312 * @rl_type: rate limit type
2313 *
2314 * If existing profile matches, it returns the corresponding rate
2315 * limit profile ID, otherwise it returns an invalid ID as error.
2316 */
2317static u16
2318ice_sched_get_node_rl_prof_id(struct ice_sched_node *node,
2319 enum ice_rl_type rl_type)
2320{
2321 u16 rl_prof_id = ICE_SCHED_INVAL_PROF_ID;
2322 struct ice_aqc_txsched_elem *data;
2323
2324 data = &node->info.data;
2325 switch (rl_type) {
2326 case ICE_MIN_BW:
2327 if (data->valid_sections & ICE_AQC_ELEM_VALID_CIR)
2328 rl_prof_id = le16_to_cpu(data->cir_bw.bw_profile_idx);
2329 break;
2330 case ICE_MAX_BW:
2331 if (data->valid_sections & ICE_AQC_ELEM_VALID_EIR)
2332 rl_prof_id = le16_to_cpu(data->eir_bw.bw_profile_idx);
2333 break;
2334 case ICE_SHARED_BW:
2335 if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
2336 rl_prof_id = le16_to_cpu(data->srl_id);
2337 break;
2338 default:
2339 break;
2340 }
2341
2342 return rl_prof_id;
2343}
2344
2345/**
2346 * ice_sched_get_rl_prof_layer - selects rate limit profile creation layer
2347 * @pi: port information structure
2348 * @rl_type: type of rate limit BW - min, max, or shared
2349 * @layer_index: layer index
2350 *
2351 * This function returns requested profile creation layer.
2352 */
2353static u8
2354ice_sched_get_rl_prof_layer(struct ice_port_info *pi, enum ice_rl_type rl_type,
2355 u8 layer_index)
2356{
2357 struct ice_hw *hw = pi->hw;
2358
2359 if (layer_index >= hw->num_tx_sched_layers)
2360 return ICE_SCHED_INVAL_LAYER_NUM;
2361 switch (rl_type) {
2362 case ICE_MIN_BW:
2363 if (hw->layer_info[layer_index].max_cir_rl_profiles)
2364 return layer_index;
2365 break;
2366 case ICE_MAX_BW:
2367 if (hw->layer_info[layer_index].max_eir_rl_profiles)
2368 return layer_index;
2369 break;
2370 case ICE_SHARED_BW:
2371 /* if current layer doesn't support SRL profile creation
2372 * then try a layer up or down.
2373 */
2374 if (hw->layer_info[layer_index].max_srl_profiles)
2375 return layer_index;
2376 else if (layer_index < hw->num_tx_sched_layers - 1 &&
2377 hw->layer_info[layer_index + 1].max_srl_profiles)
2378 return layer_index + 1;
2379 else if (layer_index > 0 &&
2380 hw->layer_info[layer_index - 1].max_srl_profiles)
2381 return layer_index - 1;
2382 break;
2383 default:
2384 break;
2385 }
2386 return ICE_SCHED_INVAL_LAYER_NUM;
2387}
2388
2389/**
2390 * ice_sched_get_srl_node - get shared rate limit node
2391 * @node: tree node
2392 * @srl_layer: shared rate limit layer
2393 *
2394 * This function returns SRL node to be used for shared rate limit purpose.
2395 * The caller needs to hold scheduler lock.
2396 */
2397static struct ice_sched_node *
2398ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer)
2399{
2400 if (srl_layer > node->tx_sched_layer)
2401 return node->children[0];
2402 else if (srl_layer < node->tx_sched_layer)
2403 /* Node can't be created without a parent. It will always
2404 * have a valid parent except root node.
2405 */
2406 return node->parent;
2407 else
2408 return node;
2409}
2410
2411/**
2412 * ice_sched_rm_rl_profile - remove RL profile ID
2413 * @pi: port information structure
2414 * @layer_num: layer number where profiles are saved
2415 * @profile_type: profile type like EIR, CIR, or SRL
2416 * @profile_id: profile ID to remove
2417 *
2418 * This function removes rate limit profile from layer 'layer_num' of type
2419 * 'profile_type' and profile ID as 'profile_id'. The caller needs to hold
2420 * scheduler lock.
2421 */
2422static enum ice_status
2423ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
2424 u16 profile_id)
2425{
2426 struct ice_aqc_rl_profile_info *rl_prof_elem;
2427 enum ice_status status = 0;
2428
2429 if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
2430 return ICE_ERR_PARAM;
2431 /* Check the existing list for RL profile */
2432 list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
2433 list_entry)
2434 if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
2435 profile_type &&
2436 le16_to_cpu(rl_prof_elem->profile.profile_id) ==
2437 profile_id) {
2438 if (rl_prof_elem->prof_id_ref)
2439 rl_prof_elem->prof_id_ref--;
2440
2441 /* Remove old profile ID from database */
2442 status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem);
2443 if (status && status != ICE_ERR_IN_USE)
2444 ice_debug(pi->hw, ICE_DBG_SCHED,
2445 "Remove rl profile failed\n");
2446 break;
2447 }
2448 if (status == ICE_ERR_IN_USE)
2449 status = 0;
2450 return status;
2451}
2452
2453/**
2454 * ice_sched_set_node_bw_dflt - set node's bandwidth limit to default
2455 * @pi: port information structure
2456 * @node: pointer to node structure
2457 * @rl_type: rate limit type min, max, or shared
2458 * @layer_num: layer number where RL profiles are saved
2459 *
2460 * This function configures node element's BW rate limit profile ID of
2461 * type CIR, EIR, or SRL to default. This function needs to be called
2462 * with the scheduler lock held.
2463 */
2464static enum ice_status
2465ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
2466 struct ice_sched_node *node,
2467 enum ice_rl_type rl_type, u8 layer_num)
2468{
2469 enum ice_status status;
2470 struct ice_hw *hw;
2471 u8 profile_type;
2472 u16 rl_prof_id;
2473 u16 old_id;
2474
2475 hw = pi->hw;
2476 switch (rl_type) {
2477 case ICE_MIN_BW:
2478 profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;
2479 rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;
2480 break;
2481 case ICE_MAX_BW:
2482 profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;
2483 rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;
2484 break;
2485 case ICE_SHARED_BW:
2486 profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;
2487 /* No SRL is configured for default case */
2488 rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID;
2489 break;
2490 default:
2491 return ICE_ERR_PARAM;
2492 }
2493 /* Save existing RL prof ID for later clean up */
2494 old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
2495 /* Configure BW scheduling parameters */
2496 status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
2497 if (status)
2498 return status;
2499
2500 /* Remove stale RL profile ID */
2501 if (old_id == ICE_SCHED_DFLT_RL_PROF_ID ||
2502 old_id == ICE_SCHED_INVAL_PROF_ID)
2503 return 0;
2504
2505 return ice_sched_rm_rl_profile(pi, layer_num, profile_type, old_id);
2506}
2507
2508/**
2509 * ice_sched_set_eir_srl_excl - set EIR/SRL exclusiveness
2510 * @pi: port information structure
2511 * @node: pointer to node structure
2512 * @layer_num: layer number where rate limit profiles are saved
2513 * @rl_type: rate limit type min, max, or shared
2514 * @bw: bandwidth value
2515 *
2516 * This function prepares node element's bandwidth to SRL or EIR exclusively.
2517 * EIR BW and Shared BW profiles are mutually exclusive and hence only one of
2518 * them may be set for any given element. This function needs to be called
2519 * with the scheduler lock held.
2520 */
2521static enum ice_status
2522ice_sched_set_eir_srl_excl(struct ice_port_info *pi,
2523 struct ice_sched_node *node,
2524 u8 layer_num, enum ice_rl_type rl_type, u32 bw)
2525{
2526 if (rl_type == ICE_SHARED_BW) {
2527 /* SRL node passed in this case, it may be different node */
2528 if (bw == ICE_SCHED_DFLT_BW)
2529 /* SRL being removed, ice_sched_cfg_node_bw_lmt()
2530 * enables EIR to default. EIR is not set in this
2531 * case, so no additional action is required.
2532 */
2533 return 0;
2534
2535 /* SRL being configured, set EIR to default here.
2536 * ice_sched_cfg_node_bw_lmt() disables EIR when it
2537 * configures SRL
2538 */
2539 return ice_sched_set_node_bw_dflt(pi, node, ICE_MAX_BW,
2540 layer_num);
2541 } else if (rl_type == ICE_MAX_BW &&
2542 node->info.data.valid_sections & ICE_AQC_ELEM_VALID_SHARED) {
2543 /* Remove Shared profile. Set default shared BW call
2544 * removes shared profile for a node.
2545 */
2546 return ice_sched_set_node_bw_dflt(pi, node,
2547 ICE_SHARED_BW,
2548 layer_num);
2549 }
2550 return 0;
2551}
2552
2553/**
2554 * ice_sched_set_node_bw - set node's bandwidth
2555 * @pi: port information structure
2556 * @node: tree node
2557 * @rl_type: rate limit type min, max, or shared
2558 * @bw: bandwidth in Kbps - Kilo bits per sec
2559 * @layer_num: layer number
2560 *
2561 * This function adds new profile corresponding to requested BW, configures
2562 * node's RL profile ID of type CIR, EIR, or SRL, and removes old profile
2563 * ID from local database. The caller needs to hold scheduler lock.
2564 */
2565static enum ice_status
2566ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
2567 enum ice_rl_type rl_type, u32 bw, u8 layer_num)
2568{
2569 struct ice_aqc_rl_profile_info *rl_prof_info;
2570 enum ice_status status = ICE_ERR_PARAM;
2571 struct ice_hw *hw = pi->hw;
2572 u16 old_id, rl_prof_id;
2573
2574 rl_prof_info = ice_sched_add_rl_profile(pi, rl_type, bw, layer_num);
2575 if (!rl_prof_info)
2576 return status;
2577
2578 rl_prof_id = le16_to_cpu(rl_prof_info->profile.profile_id);
2579
2580 /* Save existing RL prof ID for later clean up */
2581 old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
2582 /* Configure BW scheduling parameters */
2583 status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
2584 if (status)
2585 return status;
2586
2587 /* New changes has been applied */
2588 /* Increment the profile ID reference count */
2589 rl_prof_info->prof_id_ref++;
2590
2591 /* Check for old ID removal */
2592 if ((old_id == ICE_SCHED_DFLT_RL_PROF_ID && rl_type != ICE_SHARED_BW) ||
2593 old_id == ICE_SCHED_INVAL_PROF_ID || old_id == rl_prof_id)
2594 return 0;
2595
2596 return ice_sched_rm_rl_profile(pi, layer_num,
2597 rl_prof_info->profile.flags &
2598 ICE_AQC_RL_PROFILE_TYPE_M, old_id);
2599}
2600
2601/**
2602 * ice_sched_set_node_bw_lmt - set node's BW limit
2603 * @pi: port information structure
2604 * @node: tree node
2605 * @rl_type: rate limit type min, max, or shared
2606 * @bw: bandwidth in Kbps - Kilo bits per sec
2607 *
2608 * It updates node's BW limit parameters like BW RL profile ID of type CIR,
2609 * EIR, or SRL. The caller needs to hold scheduler lock.
2610 */
2611static enum ice_status
2612ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
2613 enum ice_rl_type rl_type, u32 bw)
2614{
2615 struct ice_sched_node *cfg_node = node;
2616 enum ice_status status;
2617
2618 struct ice_hw *hw;
2619 u8 layer_num;
2620
2621 if (!pi)
2622 return ICE_ERR_PARAM;
2623 hw = pi->hw;
2624 /* Remove unused RL profile IDs from HW and SW DB */
2625 ice_sched_rm_unused_rl_prof(pi);
2626 layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
2627 node->tx_sched_layer);
2628 if (layer_num >= hw->num_tx_sched_layers)
2629 return ICE_ERR_PARAM;
2630
2631 if (rl_type == ICE_SHARED_BW) {
2632 /* SRL node may be different */
2633 cfg_node = ice_sched_get_srl_node(node, layer_num);
2634 if (!cfg_node)
2635 return ICE_ERR_CFG;
2636 }
2637 /* EIR BW and Shared BW profiles are mutually exclusive and
2638 * hence only one of them may be set for any given element
2639 */
2640 status = ice_sched_set_eir_srl_excl(pi, cfg_node, layer_num, rl_type,
2641 bw);
2642 if (status)
2643 return status;
2644 if (bw == ICE_SCHED_DFLT_BW)
2645 return ice_sched_set_node_bw_dflt(pi, cfg_node, rl_type,
2646 layer_num);
2647 return ice_sched_set_node_bw(pi, cfg_node, rl_type, bw, layer_num);
2648}
2649
2650/**
2651 * ice_sched_set_node_bw_dflt_lmt - set node's BW limit to default
2652 * @pi: port information structure
2653 * @node: pointer to node structure
2654 * @rl_type: rate limit type min, max, or shared
2655 *
2656 * This function configures node element's BW rate limit profile ID of
2657 * type CIR, EIR, or SRL to default. This function needs to be called
2658 * with the scheduler lock held.
2659 */
2660static enum ice_status
2661ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi,
2662 struct ice_sched_node *node,
2663 enum ice_rl_type rl_type)
2664{
2665 return ice_sched_set_node_bw_lmt(pi, node, rl_type,
2666 ICE_SCHED_DFLT_BW);
2667}
2668
2669/**
2670 * ice_sched_validate_srl_node - Check node for SRL applicability
2671 * @node: sched node to configure
2672 * @sel_layer: selected SRL layer
2673 *
2674 * This function checks if the SRL can be applied to a selected layer node on
2675 * behalf of the requested node (first argument). This function needs to be
2676 * called with scheduler lock held.
2677 */
2678static enum ice_status
2679ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
2680{
2681 /* SRL profiles are not available on all layers. Check if the
2682 * SRL profile can be applied to a node above or below the
2683 * requested node. SRL configuration is possible only if the
2684 * selected layer's node has single child.
2685 */
2686 if (sel_layer == node->tx_sched_layer ||
2687 ((sel_layer == node->tx_sched_layer + 1) &&
2688 node->num_children == 1) ||
2689 ((sel_layer == node->tx_sched_layer - 1) &&
2690 (node->parent && node->parent->num_children == 1)))
2691 return 0;
2692
2693 return ICE_ERR_CFG;
2694}
2695
2696/**
2697 * ice_sched_save_q_bw - save queue node's BW information
2698 * @q_ctx: queue context structure
2699 * @rl_type: rate limit type min, max, or shared
2700 * @bw: bandwidth in Kbps - Kilo bits per sec
2701 *
2702 * Save BW information of queue type node for post replay use.
2703 */
2704static enum ice_status
2705ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)
2706{
2707 switch (rl_type) {
2708 case ICE_MIN_BW:
2709 ice_set_clear_cir_bw(&q_ctx->bw_t_info, bw);
2710 break;
2711 case ICE_MAX_BW:
2712 ice_set_clear_eir_bw(&q_ctx->bw_t_info, bw);
2713 break;
2714 case ICE_SHARED_BW:
2715 ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw);
2716 break;
2717 default:
2718 return ICE_ERR_PARAM;
2719 }
2720 return 0;
2721}
2722
2723/**
2724 * ice_sched_set_q_bw_lmt - sets queue BW limit
2725 * @pi: port information structure
2726 * @vsi_handle: sw VSI handle
2727 * @tc: traffic class
2728 * @q_handle: software queue handle
2729 * @rl_type: min, max, or shared
2730 * @bw: bandwidth in Kbps
2731 *
2732 * This function sets BW limit of queue scheduling node.
2733 */
2734static enum ice_status
2735ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
2736 u16 q_handle, enum ice_rl_type rl_type, u32 bw)
2737{
2738 enum ice_status status = ICE_ERR_PARAM;
2739 struct ice_sched_node *node;
2740 struct ice_q_ctx *q_ctx;
2741
2742 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
2743 return ICE_ERR_PARAM;
2744 mutex_lock(&pi->sched_lock);
2745 q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle);
2746 if (!q_ctx)
2747 goto exit_q_bw_lmt;
2748 node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
2749 if (!node) {
2750 ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong q_teid\n");
2751 goto exit_q_bw_lmt;
2752 }
2753
2754 /* Return error if it is not a leaf node */
2755 if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF)
2756 goto exit_q_bw_lmt;
2757
2758 /* SRL bandwidth layer selection */
2759 if (rl_type == ICE_SHARED_BW) {
2760 u8 sel_layer; /* selected layer */
2761
2762 sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type,
2763 node->tx_sched_layer);
2764 if (sel_layer >= pi->hw->num_tx_sched_layers) {
2765 status = ICE_ERR_PARAM;
2766 goto exit_q_bw_lmt;
2767 }
2768 status = ice_sched_validate_srl_node(node, sel_layer);
2769 if (status)
2770 goto exit_q_bw_lmt;
2771 }
2772
2773 if (bw == ICE_SCHED_DFLT_BW)
2774 status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type);
2775 else
2776 status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw);
2777
2778 if (!status)
2779 status = ice_sched_save_q_bw(q_ctx, rl_type, bw);
2780
2781exit_q_bw_lmt:
2782 mutex_unlock(&pi->sched_lock);
2783 return status;
2784}
2785
2786/**
2787 * ice_cfg_q_bw_lmt - configure queue BW limit
2788 * @pi: port information structure
2789 * @vsi_handle: sw VSI handle
2790 * @tc: traffic class
2791 * @q_handle: software queue handle
2792 * @rl_type: min, max, or shared
2793 * @bw: bandwidth in Kbps
2794 *
2795 * This function configures BW limit of queue scheduling node.
2796 */
2797enum ice_status
2798ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
2799 u16 q_handle, enum ice_rl_type rl_type, u32 bw)
2800{
2801 return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,
2802 bw);
2803}
2804
2805/**
2806 * ice_cfg_q_bw_dflt_lmt - configure queue BW default limit
2807 * @pi: port information structure
2808 * @vsi_handle: sw VSI handle
2809 * @tc: traffic class
2810 * @q_handle: software queue handle
2811 * @rl_type: min, max, or shared
2812 *
2813 * This function configures BW default limit of queue scheduling node.
2814 */
2815enum ice_status
2816ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
2817 u16 q_handle, enum ice_rl_type rl_type)
2818{
2819 return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,
2820 ICE_SCHED_DFLT_BW);
2821}
2822
2823/**
2824 * ice_cfg_rl_burst_size - Set burst size value
2825 * @hw: pointer to the HW struct
2826 * @bytes: burst size in bytes
2827 *
2828 * This function configures/set the burst size to requested new value. The new
2829 * burst size value is used for future rate limit calls. It doesn't change the
2830 * existing or previously created RL profiles.
2831 */
2832enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
2833{
2834 u16 burst_size_to_prog;
2835
2836 if (bytes < ICE_MIN_BURST_SIZE_ALLOWED ||
2837 bytes > ICE_MAX_BURST_SIZE_ALLOWED)
2838 return ICE_ERR_PARAM;
2839 if (ice_round_to_num(bytes, 64) <=
2840 ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) {
2841 /* 64 byte granularity case */
2842 /* Disable MSB granularity bit */
2843 burst_size_to_prog = ICE_64_BYTE_GRANULARITY;
2844 /* round number to nearest 64 byte granularity */
2845 bytes = ice_round_to_num(bytes, 64);
2846 /* The value is in 64 byte chunks */
2847 burst_size_to_prog |= (u16)(bytes / 64);
2848 } else {
2849 /* k bytes granularity case */
2850 /* Enable MSB granularity bit */
2851 burst_size_to_prog = ICE_KBYTE_GRANULARITY;
2852 /* round number to nearest 1024 granularity */
2853 bytes = ice_round_to_num(bytes, 1024);
2854 /* check rounding doesn't go beyond allowed */
2855 if (bytes > ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY)
2856 bytes = ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY;
2857 /* The value is in k bytes */
2858 burst_size_to_prog |= (u16)(bytes / 1024);
2859 }
2860 hw->max_burst_size = burst_size_to_prog;
2861 return 0;
2862}
2863
2864/**
2865 * ice_sched_replay_node_prio - re-configure node priority
2866 * @hw: pointer to the HW struct
2867 * @node: sched node to configure
2868 * @priority: priority value
2869 *
2870 * This function configures node element's priority value. It
2871 * needs to be called with scheduler lock held.
2872 */
2873static enum ice_status
2874ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node,
2875 u8 priority)
2876{
2877 struct ice_aqc_txsched_elem_data buf;
2878 struct ice_aqc_txsched_elem *data;
2879 enum ice_status status;
2880
2881 buf = node->info;
2882 data = &buf.data;
2883 data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC;
2884 data->generic = priority;
2885
2886 /* Configure element */
2887 status = ice_sched_update_elem(hw, node, &buf);
2888 return status;
2889}
2890
2891/**
2892 * ice_sched_replay_node_bw - replay node(s) BW
2893 * @hw: pointer to the HW struct
2894 * @node: sched node to configure
2895 * @bw_t_info: BW type information
2896 *
2897 * This function restores node's BW from bw_t_info. The caller needs
2898 * to hold the scheduler lock.
2899 */
2900static enum ice_status
2901ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node,
2902 struct ice_bw_type_info *bw_t_info)
2903{
2904 struct ice_port_info *pi = hw->port_info;
2905 enum ice_status status = ICE_ERR_PARAM;
2906 u16 bw_alloc;
2907
2908 if (!node)
2909 return status;
2910 if (bitmap_empty(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CNT))
2911 return 0;
2912 if (test_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap)) {
2913 status = ice_sched_replay_node_prio(hw, node,
2914 bw_t_info->generic);
2915 if (status)
2916 return status;
2917 }
2918 if (test_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap)) {
2919 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW,
2920 bw_t_info->cir_bw.bw);
2921 if (status)
2922 return status;
2923 }
2924 if (test_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap)) {
2925 bw_alloc = bw_t_info->cir_bw.bw_alloc;
2926 status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MIN_BW,
2927 bw_alloc);
2928 if (status)
2929 return status;
2930 }
2931 if (test_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap)) {
2932 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW,
2933 bw_t_info->eir_bw.bw);
2934 if (status)
2935 return status;
2936 }
2937 if (test_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap)) {
2938 bw_alloc = bw_t_info->eir_bw.bw_alloc;
2939 status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MAX_BW,
2940 bw_alloc);
2941 if (status)
2942 return status;
2943 }
2944 if (test_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap))
2945 status = ice_sched_set_node_bw_lmt(pi, node, ICE_SHARED_BW,
2946 bw_t_info->shared_bw);
2947 return status;
2948}
2949
2950/**
2951 * ice_sched_replay_q_bw - replay queue type node BW
2952 * @pi: port information structure
2953 * @q_ctx: queue context structure
2954 *
2955 * This function replays queue type node bandwidth. This function needs to be
2956 * called with scheduler lock held.
2957 */
2958enum ice_status
2959ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx)
2960{
2961 struct ice_sched_node *q_node;
2962
2963 /* Following also checks the presence of node in tree */
2964 q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
2965 if (!q_node)
2966 return ICE_ERR_PARAM;
2967 return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info);
2968}