Linux Audio

Check our new training course

Loading...
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2018, Intel Corporation. */
   3
   4#include "ice_sched.h"
   5
   6/**
   7 * ice_sched_add_root_node - Insert the Tx scheduler root node in SW DB
   8 * @pi: port information structure
   9 * @info: Scheduler element information from firmware
  10 *
  11 * This function inserts the root node of the scheduling tree topology
  12 * to the SW DB.
  13 */
  14static enum ice_status
  15ice_sched_add_root_node(struct ice_port_info *pi,
  16			struct ice_aqc_txsched_elem_data *info)
  17{
  18	struct ice_sched_node *root;
  19	struct ice_hw *hw;
  20	u16 max_children;
  21
  22	if (!pi)
  23		return ICE_ERR_PARAM;
  24
  25	hw = pi->hw;
  26
  27	root = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*root), GFP_KERNEL);
  28	if (!root)
  29		return ICE_ERR_NO_MEMORY;
  30
  31	max_children = le16_to_cpu(hw->layer_info[0].max_children);
  32	root->children = devm_kcalloc(ice_hw_to_dev(hw), max_children,
  33				      sizeof(*root), GFP_KERNEL);
  34	if (!root->children) {
  35		devm_kfree(ice_hw_to_dev(hw), root);
  36		return ICE_ERR_NO_MEMORY;
  37	}
  38
  39	memcpy(&root->info, info, sizeof(*info));
  40	pi->root = root;
  41	return 0;
  42}
  43
  44/**
  45 * ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB
  46 * @start_node: pointer to the starting ice_sched_node struct in a sub-tree
  47 * @teid: node teid to search
  48 *
  49 * This function searches for a node matching the teid in the scheduling tree
  50 * from the SW DB. The search is recursive and is restricted by the number of
  51 * layers it has searched through; stopping at the max supported layer.
  52 *
  53 * This function needs to be called when holding the port_info->sched_lock
  54 */
  55struct ice_sched_node *
  56ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
  57{
  58	u16 i;
  59
  60	/* The TEID is same as that of the start_node */
  61	if (ICE_TXSCHED_GET_NODE_TEID(start_node) == teid)
  62		return start_node;
  63
  64	/* The node has no children or is at the max layer */
  65	if (!start_node->num_children ||
  66	    start_node->tx_sched_layer >= ICE_AQC_TOPO_MAX_LEVEL_NUM ||
  67	    start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF)
  68		return NULL;
  69
  70	/* Check if teid matches to any of the children nodes */
  71	for (i = 0; i < start_node->num_children; i++)
  72		if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid)
  73			return start_node->children[i];
  74
  75	/* Search within each child's sub-tree */
  76	for (i = 0; i < start_node->num_children; i++) {
  77		struct ice_sched_node *tmp;
  78
  79		tmp = ice_sched_find_node_by_teid(start_node->children[i],
  80						  teid);
  81		if (tmp)
  82			return tmp;
  83	}
  84
  85	return NULL;
  86}
  87
  88/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  89 * ice_sched_add_node - Insert the Tx scheduler node in SW DB
  90 * @pi: port information structure
  91 * @layer: Scheduler layer of the node
  92 * @info: Scheduler element information from firmware
  93 *
  94 * This function inserts a scheduler node to the SW DB.
  95 */
  96enum ice_status
  97ice_sched_add_node(struct ice_port_info *pi, u8 layer,
  98		   struct ice_aqc_txsched_elem_data *info)
  99{
 100	struct ice_sched_node *parent;
 
 101	struct ice_sched_node *node;
 
 102	struct ice_hw *hw;
 103	u16 max_children;
 104
 105	if (!pi)
 106		return ICE_ERR_PARAM;
 107
 108	hw = pi->hw;
 109
 110	/* A valid parent node should be there */
 111	parent = ice_sched_find_node_by_teid(pi->root,
 112					     le32_to_cpu(info->parent_teid));
 113	if (!parent) {
 114		ice_debug(hw, ICE_DBG_SCHED,
 115			  "Parent Node not found for parent_teid=0x%x\n",
 116			  le32_to_cpu(info->parent_teid));
 117		return ICE_ERR_PARAM;
 118	}
 119
 
 
 
 
 
 
 
 120	node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL);
 121	if (!node)
 122		return ICE_ERR_NO_MEMORY;
 123	max_children = le16_to_cpu(hw->layer_info[layer].max_children);
 124	if (max_children) {
 125		node->children = devm_kcalloc(ice_hw_to_dev(hw), max_children,
 
 126					      sizeof(*node), GFP_KERNEL);
 127		if (!node->children) {
 128			devm_kfree(ice_hw_to_dev(hw), node);
 129			return ICE_ERR_NO_MEMORY;
 130		}
 131	}
 132
 133	node->in_use = true;
 134	node->parent = parent;
 135	node->tx_sched_layer = layer;
 136	parent->children[parent->num_children++] = node;
 137	memcpy(&node->info, info, sizeof(*info));
 138	return 0;
 139}
 140
 141/**
 142 * ice_aq_delete_sched_elems - delete scheduler elements
 143 * @hw: pointer to the hw struct
 144 * @grps_req: number of groups to delete
 145 * @buf: pointer to buffer
 146 * @buf_size: buffer size in bytes
 147 * @grps_del: returns total number of elements deleted
 148 * @cd: pointer to command details structure or NULL
 149 *
 150 * Delete scheduling elements (0x040F)
 151 */
 152static enum ice_status
 153ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
 154			  struct ice_aqc_delete_elem *buf, u16 buf_size,
 155			  u16 *grps_del, struct ice_sq_cd *cd)
 156{
 157	struct ice_aqc_add_move_delete_elem *cmd;
 158	struct ice_aq_desc desc;
 159	enum ice_status status;
 160
 161	cmd = &desc.params.add_move_delete_elem;
 162	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_delete_sched_elems);
 163	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
 164	cmd->num_grps_req = cpu_to_le16(grps_req);
 165
 166	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
 167	if (!status && grps_del)
 168		*grps_del = le16_to_cpu(cmd->num_grps_updated);
 169
 170	return status;
 171}
 172
 173/**
 174 * ice_sched_remove_elems - remove nodes from hw
 175 * @hw: pointer to the hw struct
 176 * @parent: pointer to the parent node
 177 * @num_nodes: number of nodes
 178 * @node_teids: array of node teids to be deleted
 179 *
 180 * This function remove nodes from hw
 181 */
 182static enum ice_status
 183ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
 184		       u16 num_nodes, u32 *node_teids)
 185{
 186	struct ice_aqc_delete_elem *buf;
 187	u16 i, num_groups_removed = 0;
 188	enum ice_status status;
 189	u16 buf_size;
 190
 191	buf_size = sizeof(*buf) + sizeof(u32) * (num_nodes - 1);
 192	buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
 193	if (!buf)
 194		return ICE_ERR_NO_MEMORY;
 
 195	buf->hdr.parent_teid = parent->info.node_teid;
 196	buf->hdr.num_elems = cpu_to_le16(num_nodes);
 197	for (i = 0; i < num_nodes; i++)
 198		buf->teid[i] = cpu_to_le32(node_teids[i]);
 
 199	status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
 200					   &num_groups_removed, NULL);
 201	if (status || num_groups_removed != 1)
 202		ice_debug(hw, ICE_DBG_SCHED, "remove elements failed\n");
 
 
 203	devm_kfree(ice_hw_to_dev(hw), buf);
 204	return status;
 205}
 206
 207/**
 208 * ice_sched_get_first_node - get the first node of the given layer
 209 * @hw: pointer to the hw struct
 210 * @parent: pointer the base node of the subtree
 211 * @layer: layer number
 212 *
 213 * This function retrieves the first node of the given layer from the subtree
 214 */
 215static struct ice_sched_node *
 216ice_sched_get_first_node(struct ice_hw *hw, struct ice_sched_node *parent,
 217			 u8 layer)
 218{
 219	u8 i;
 220
 221	if (layer < hw->sw_entry_point_layer)
 222		return NULL;
 223	for (i = 0; i < parent->num_children; i++) {
 224		struct ice_sched_node *node = parent->children[i];
 225
 226		if (node) {
 227			if (node->tx_sched_layer == layer)
 228				return node;
 229			/* this recursion is intentional, and wouldn't
 230			 * go more than 9 calls
 231			 */
 232			return ice_sched_get_first_node(hw, node, layer);
 233		}
 234	}
 235	return NULL;
 236}
 237
 238/**
 239 * ice_sched_get_tc_node - get pointer to TC node
 240 * @pi: port information structure
 241 * @tc: TC number
 242 *
 243 * This function returns the TC node pointer
 244 */
 245struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc)
 246{
 247	u8 i;
 248
 249	if (!pi)
 250		return NULL;
 251	for (i = 0; i < pi->root->num_children; i++)
 252		if (pi->root->children[i]->tc_num == tc)
 253			return pi->root->children[i];
 254	return NULL;
 255}
 256
 257/**
 258 * ice_free_sched_node - Free a Tx scheduler node from SW DB
 259 * @pi: port information structure
 260 * @node: pointer to the ice_sched_node struct
 261 *
 262 * This function frees up a node from SW DB as well as from HW
 263 *
 264 * This function needs to be called with the port_info->sched_lock held
 265 */
 266void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
 267{
 268	struct ice_sched_node *parent;
 269	struct ice_hw *hw = pi->hw;
 270	u8 i, j;
 271
 272	/* Free the children before freeing up the parent node
 273	 * The parent array is updated below and that shifts the nodes
 274	 * in the array. So always pick the first child if num children > 0
 275	 */
 276	while (node->num_children)
 277		ice_free_sched_node(pi, node->children[0]);
 278
 279	/* Leaf, TC and root nodes can't be deleted by SW */
 280	if (node->tx_sched_layer >= hw->sw_entry_point_layer &&
 281	    node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
 282	    node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT &&
 283	    node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) {
 284		u32 teid = le32_to_cpu(node->info.node_teid);
 285		enum ice_status status;
 286
 287		status = ice_sched_remove_elems(hw, node->parent, 1, &teid);
 288		if (status)
 289			ice_debug(hw, ICE_DBG_SCHED,
 290				  "remove element failed %d\n", status);
 291	}
 292	parent = node->parent;
 293	/* root has no parent */
 294	if (parent) {
 295		struct ice_sched_node *p, *tc_node;
 296
 297		/* update the parent */
 298		for (i = 0; i < parent->num_children; i++)
 299			if (parent->children[i] == node) {
 300				for (j = i + 1; j < parent->num_children; j++)
 301					parent->children[j - 1] =
 302						parent->children[j];
 303				parent->num_children--;
 304				break;
 305			}
 306
 307		/* search for previous sibling that points to this node and
 308		 * remove the reference
 309		 */
 310		tc_node = ice_sched_get_tc_node(pi, node->tc_num);
 311		if (!tc_node) {
 312			ice_debug(hw, ICE_DBG_SCHED,
 313				  "Invalid TC number %d\n", node->tc_num);
 314			goto err_exit;
 315		}
 316		p = ice_sched_get_first_node(hw, tc_node, node->tx_sched_layer);
 317		while (p) {
 318			if (p->sibling == node) {
 319				p->sibling = node->sibling;
 320				break;
 321			}
 322			p = p->sibling;
 323		}
 
 
 
 
 
 324	}
 325err_exit:
 326	/* leaf nodes have no children */
 327	if (node->children)
 328		devm_kfree(ice_hw_to_dev(hw), node->children);
 329	devm_kfree(ice_hw_to_dev(hw), node);
 330}
 331
 332/**
 333 * ice_aq_get_dflt_topo - gets default scheduler topology
 334 * @hw: pointer to the hw struct
 335 * @lport: logical port number
 336 * @buf: pointer to buffer
 337 * @buf_size: buffer size in bytes
 338 * @num_branches: returns total number of queue to port branches
 339 * @cd: pointer to command details structure or NULL
 340 *
 341 * Get default scheduler topology (0x400)
 342 */
 343static enum ice_status
 344ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
 345		     struct ice_aqc_get_topo_elem *buf, u16 buf_size,
 346		     u8 *num_branches, struct ice_sq_cd *cd)
 347{
 348	struct ice_aqc_get_topo *cmd;
 349	struct ice_aq_desc desc;
 350	enum ice_status status;
 351
 352	cmd = &desc.params.get_topo;
 353	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo);
 354	cmd->port_num = lport;
 355	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
 356	if (!status && num_branches)
 357		*num_branches = cmd->num_branches;
 358
 359	return status;
 360}
 361
 362/**
 363 * ice_aq_add_sched_elems - adds scheduling element
 364 * @hw: pointer to the hw struct
 365 * @grps_req: the number of groups that are requested to be added
 366 * @buf: pointer to buffer
 367 * @buf_size: buffer size in bytes
 368 * @grps_added: returns total number of groups added
 369 * @cd: pointer to command details structure or NULL
 370 *
 371 * Add scheduling elements (0x0401)
 372 */
 373static enum ice_status
 374ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
 375		       struct ice_aqc_add_elem *buf, u16 buf_size,
 376		       u16 *grps_added, struct ice_sq_cd *cd)
 377{
 378	struct ice_aqc_add_move_delete_elem *cmd;
 379	struct ice_aq_desc desc;
 380	enum ice_status status;
 381
 382	cmd = &desc.params.add_move_delete_elem;
 383	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_sched_elems);
 384	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
 385
 386	cmd->num_grps_req = cpu_to_le16(grps_req);
 387	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
 388	if (!status && grps_added)
 389		*grps_added = le16_to_cpu(cmd->num_grps_updated);
 390
 391	return status;
 392}
 393
 394/**
 395 * ice_suspend_resume_elems - suspend/resume scheduler elements
 396 * @hw: pointer to the hw struct
 397 * @elems_req: number of elements to suspend
 398 * @buf: pointer to buffer
 399 * @buf_size: buffer size in bytes
 400 * @elems_ret: returns total number of elements suspended
 401 * @cd: pointer to command details structure or NULL
 402 * @cmd_code: command code for suspend or resume
 403 *
 404 * suspend/resume scheduler elements
 405 */
 406static enum ice_status
 407ice_suspend_resume_elems(struct ice_hw *hw, u16 elems_req,
 408			 struct ice_aqc_suspend_resume_elem *buf, u16 buf_size,
 409			 u16 *elems_ret, struct ice_sq_cd *cd,
 410			 enum ice_adminq_opc cmd_code)
 411{
 412	struct ice_aqc_get_cfg_elem *cmd;
 413	struct ice_aq_desc desc;
 414	enum ice_status status;
 415
 416	cmd = &desc.params.get_update_elem;
 417	ice_fill_dflt_direct_cmd_desc(&desc, cmd_code);
 418	cmd->num_elem_req = cpu_to_le16(elems_req);
 419	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
 420	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
 421	if (!status && elems_ret)
 422		*elems_ret = le16_to_cpu(cmd->num_elem_resp);
 423	return status;
 424}
 425
 426/**
 427 * ice_aq_suspend_sched_elems - suspend scheduler elements
 428 * @hw: pointer to the hw struct
 429 * @elems_req: number of elements to suspend
 430 * @buf: pointer to buffer
 431 * @buf_size: buffer size in bytes
 432 * @elems_ret: returns total number of elements suspended
 433 * @cd: pointer to command details structure or NULL
 434 *
 435 * Suspend scheduling elements (0x0409)
 436 */
 437static enum ice_status
 438ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req,
 439			   struct ice_aqc_suspend_resume_elem *buf,
 440			   u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
 441{
 442	return ice_suspend_resume_elems(hw, elems_req, buf, buf_size, elems_ret,
 443					cd, ice_aqc_opc_suspend_sched_elems);
 
 444}
 445
 446/**
 447 * ice_aq_resume_sched_elems - resume scheduler elements
 448 * @hw: pointer to the hw struct
 449 * @elems_req: number of elements to resume
 450 * @buf: pointer to buffer
 451 * @buf_size: buffer size in bytes
 452 * @elems_ret: returns total number of elements resumed
 453 * @cd: pointer to command details structure or NULL
 454 *
 455 * resume scheduling elements (0x040A)
 456 */
 457static enum ice_status
 458ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req,
 459			  struct ice_aqc_suspend_resume_elem *buf,
 460			  u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
 461{
 462	return ice_suspend_resume_elems(hw, elems_req, buf, buf_size, elems_ret,
 463					cd, ice_aqc_opc_resume_sched_elems);
 
 464}
 465
 466/**
 467 * ice_aq_query_sched_res - query scheduler resource
 468 * @hw: pointer to the hw struct
 469 * @buf_size: buffer size in bytes
 470 * @buf: pointer to buffer
 471 * @cd: pointer to command details structure or NULL
 472 *
 473 * Query scheduler resource allocation (0x0412)
 474 */
 475static enum ice_status
 476ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
 477		       struct ice_aqc_query_txsched_res_resp *buf,
 478		       struct ice_sq_cd *cd)
 479{
 480	struct ice_aq_desc desc;
 481
 482	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res);
 483	return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
 484}
 485
 486/**
 487 * ice_sched_suspend_resume_elems - suspend or resume hw nodes
 488 * @hw: pointer to the hw struct
 489 * @num_nodes: number of nodes
 490 * @node_teids: array of node teids to be suspended or resumed
 491 * @suspend: true means suspend / false means resume
 492 *
 493 * This function suspends or resumes hw nodes
 494 */
 495static enum ice_status
 496ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
 497			       bool suspend)
 498{
 499	struct ice_aqc_suspend_resume_elem *buf;
 500	u16 i, buf_size, num_elem_ret = 0;
 501	enum ice_status status;
 502
 503	buf_size = sizeof(*buf) * num_nodes;
 504	buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
 505	if (!buf)
 506		return ICE_ERR_NO_MEMORY;
 507
 508	for (i = 0; i < num_nodes; i++)
 509		buf->teid[i] = cpu_to_le32(node_teids[i]);
 510
 511	if (suspend)
 512		status = ice_aq_suspend_sched_elems(hw, num_nodes, buf,
 513						    buf_size, &num_elem_ret,
 514						    NULL);
 515	else
 516		status = ice_aq_resume_sched_elems(hw, num_nodes, buf,
 517						   buf_size, &num_elem_ret,
 518						   NULL);
 519	if (status || num_elem_ret != num_nodes)
 520		ice_debug(hw, ICE_DBG_SCHED, "suspend/resume failed\n");
 521
 522	devm_kfree(ice_hw_to_dev(hw), buf);
 523	return status;
 524}
 525
 526/**
 527 * ice_sched_clear_tx_topo - clears the schduler tree nodes
 528 * @pi: port information structure
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 529 *
 530 * This function removes all the nodes from HW as well as from SW DB.
 
 531 */
 532static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
 533{
 534	struct ice_sched_agg_info *agg_info;
 535	struct ice_sched_vsi_info *vsi_elem;
 536	struct ice_sched_agg_info *atmp;
 537	struct ice_sched_vsi_info *tmp;
 538	struct ice_hw *hw;
 539
 540	if (!pi)
 541		return;
 542
 543	hw = pi->hw;
 544
 545	list_for_each_entry_safe(agg_info, atmp, &pi->agg_list, list_entry) {
 546		struct ice_sched_agg_vsi_info *agg_vsi_info;
 547		struct ice_sched_agg_vsi_info *vtmp;
 548
 549		list_for_each_entry_safe(agg_vsi_info, vtmp,
 550					 &agg_info->agg_vsi_list, list_entry) {
 551			list_del(&agg_vsi_info->list_entry);
 552			devm_kfree(ice_hw_to_dev(hw), agg_vsi_info);
 553		}
 
 
 554	}
 
 555
 556	/* remove the vsi list */
 557	list_for_each_entry_safe(vsi_elem, tmp, &pi->vsi_info_list,
 558				 list_entry) {
 559		list_del(&vsi_elem->list_entry);
 560		devm_kfree(ice_hw_to_dev(hw), vsi_elem);
 561	}
 562
 
 
 
 563	if (pi->root) {
 564		ice_free_sched_node(pi, pi->root);
 565		pi->root = NULL;
 566	}
 567}
 568
 569/**
 570 * ice_sched_clear_port - clear the scheduler elements from SW DB for a port
 571 * @pi: port information structure
 572 *
 573 * Cleanup scheduling elements from SW DB
 574 */
 575static void ice_sched_clear_port(struct ice_port_info *pi)
 576{
 577	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
 578		return;
 579
 580	pi->port_state = ICE_SCHED_PORT_STATE_INIT;
 581	mutex_lock(&pi->sched_lock);
 582	ice_sched_clear_tx_topo(pi);
 583	mutex_unlock(&pi->sched_lock);
 584	mutex_destroy(&pi->sched_lock);
 585}
 586
 587/**
 588 * ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports
 589 * @hw: pointer to the hw struct
 590 *
 591 * Cleanup scheduling elements from SW DB for all the ports
 592 */
 593void ice_sched_cleanup_all(struct ice_hw *hw)
 594{
 595	if (!hw || !hw->port_info)
 596		return;
 597
 598	if (hw->layer_info)
 599		devm_kfree(ice_hw_to_dev(hw), hw->layer_info);
 
 
 600
 601	ice_sched_clear_port(hw->port_info);
 
 602
 603	hw->num_tx_sched_layers = 0;
 604	hw->num_tx_sched_phys_layers = 0;
 605	hw->flattened_layers = 0;
 606	hw->max_cgds = 0;
 607}
 608
 609/**
 610 * ice_sched_create_vsi_info_entry - create an empty new VSI entry
 611 * @pi: port information structure
 612 * @vsi_id: VSI Id
 613 *
 614 * This function creates a new VSI entry and adds it to list
 615 */
 616static struct ice_sched_vsi_info *
 617ice_sched_create_vsi_info_entry(struct ice_port_info *pi, u16 vsi_id)
 618{
 619	struct ice_sched_vsi_info *vsi_elem;
 620
 621	if (!pi)
 622		return NULL;
 623
 624	vsi_elem = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*vsi_elem),
 625				GFP_KERNEL);
 626	if (!vsi_elem)
 627		return NULL;
 628
 629	list_add(&vsi_elem->list_entry, &pi->vsi_info_list);
 630	vsi_elem->vsi_id = vsi_id;
 631	return vsi_elem;
 632}
 633
 634/**
 635 * ice_sched_add_elems - add nodes to hw and SW DB
 636 * @pi: port information structure
 637 * @tc_node: pointer to the branch node
 638 * @parent: pointer to the parent node
 639 * @layer: layer number to add nodes
 640 * @num_nodes: number of nodes
 641 * @num_nodes_added: pointer to num nodes added
 642 * @first_node_teid: if new nodes are added then return the teid of first node
 643 *
 644 * This function add nodes to hw as well as to SW DB for a given layer
 645 */
 646static enum ice_status
 647ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
 648		    struct ice_sched_node *parent, u8 layer, u16 num_nodes,
 649		    u16 *num_nodes_added, u32 *first_node_teid)
 650{
 651	struct ice_sched_node *prev, *new_node;
 652	struct ice_aqc_add_elem *buf;
 653	u16 i, num_groups_added = 0;
 654	enum ice_status status = 0;
 655	struct ice_hw *hw = pi->hw;
 656	u16 buf_size;
 657	u32 teid;
 658
 659	buf_size = sizeof(*buf) + sizeof(*buf->generic) * (num_nodes - 1);
 660	buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
 661	if (!buf)
 662		return ICE_ERR_NO_MEMORY;
 663
 664	buf->hdr.parent_teid = parent->info.node_teid;
 665	buf->hdr.num_elems = cpu_to_le16(num_nodes);
 666	for (i = 0; i < num_nodes; i++) {
 667		buf->generic[i].parent_teid = parent->info.node_teid;
 668		buf->generic[i].data.elem_type = ICE_AQC_ELEM_TYPE_SE_GENERIC;
 669		buf->generic[i].data.valid_sections =
 670			ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
 671			ICE_AQC_ELEM_VALID_EIR;
 672		buf->generic[i].data.generic = 0;
 673		buf->generic[i].data.cir_bw.bw_profile_idx =
 674			ICE_SCHED_DFLT_RL_PROF_ID;
 
 
 675		buf->generic[i].data.eir_bw.bw_profile_idx =
 676			ICE_SCHED_DFLT_RL_PROF_ID;
 
 
 677	}
 678
 679	status = ice_aq_add_sched_elems(hw, 1, buf, buf_size,
 680					&num_groups_added, NULL);
 681	if (status || num_groups_added != 1) {
 682		ice_debug(hw, ICE_DBG_SCHED, "add elements failed\n");
 
 683		devm_kfree(ice_hw_to_dev(hw), buf);
 684		return ICE_ERR_CFG;
 685	}
 686
 687	*num_nodes_added = num_nodes;
 688	/* add nodes to the SW DB */
 689	for (i = 0; i < num_nodes; i++) {
 690		status = ice_sched_add_node(pi, layer, &buf->generic[i]);
 691		if (status) {
 692			ice_debug(hw, ICE_DBG_SCHED,
 693				  "add nodes in SW DB failed status =%d\n",
 694				  status);
 695			break;
 696		}
 697
 698		teid = le32_to_cpu(buf->generic[i].node_teid);
 699		new_node = ice_sched_find_node_by_teid(parent, teid);
 700
 701		if (!new_node) {
 702			ice_debug(hw, ICE_DBG_SCHED,
 703				  "Node is missing for teid =%d\n", teid);
 704			break;
 705		}
 706
 707		new_node->sibling = NULL;
 708		new_node->tc_num = tc_node->tc_num;
 709
 710		/* add it to previous node sibling pointer */
 711		/* Note: siblings are not linked across branches */
 712		prev = ice_sched_get_first_node(hw, tc_node, layer);
 713
 714		if (prev && prev != new_node) {
 715			while (prev->sibling)
 716				prev = prev->sibling;
 717			prev->sibling = new_node;
 718		}
 719
 
 
 
 
 720		if (i == 0)
 721			*first_node_teid = teid;
 722	}
 723
 724	devm_kfree(ice_hw_to_dev(hw), buf);
 725	return status;
 726}
 727
 728/**
 729 * ice_sched_add_nodes_to_layer - Add nodes to a given layer
 730 * @pi: port information structure
 731 * @tc_node: pointer to TC node
 732 * @parent: pointer to parent node
 733 * @layer: layer number to add nodes
 734 * @num_nodes: number of nodes to be added
 735 * @first_node_teid: pointer to the first node teid
 736 * @num_nodes_added: pointer to number of nodes added
 737 *
 738 * This function add nodes to a given layer.
 739 */
 740static enum ice_status
 741ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
 742			     struct ice_sched_node *tc_node,
 743			     struct ice_sched_node *parent, u8 layer,
 744			     u16 num_nodes, u32 *first_node_teid,
 745			     u16 *num_nodes_added)
 746{
 747	u32 *first_teid_ptr = first_node_teid;
 748	u16 new_num_nodes, max_child_nodes;
 749	enum ice_status status = 0;
 750	struct ice_hw *hw = pi->hw;
 751	u16 num_added = 0;
 752	u32 temp;
 753
 754	*num_nodes_added = 0;
 755
 756	if (!num_nodes)
 757		return status;
 758
 759	if (!parent || layer < hw->sw_entry_point_layer)
 760		return ICE_ERR_PARAM;
 761
 762	/* max children per node per layer */
 763	max_child_nodes =
 764	    le16_to_cpu(hw->layer_info[parent->tx_sched_layer].max_children);
 765
 766	/* current number of children + required nodes exceed max children ? */
 767	if ((parent->num_children + num_nodes) > max_child_nodes) {
 768		/* Fail if the parent is a TC node */
 769		if (parent == tc_node)
 770			return ICE_ERR_CFG;
 771
 772		/* utilize all the spaces if the parent is not full */
 773		if (parent->num_children < max_child_nodes) {
 774			new_num_nodes = max_child_nodes - parent->num_children;
 775			/* this recursion is intentional, and wouldn't
 776			 * go more than 2 calls
 777			 */
 778			status = ice_sched_add_nodes_to_layer(pi, tc_node,
 779							      parent, layer,
 780							      new_num_nodes,
 781							      first_node_teid,
 782							      &num_added);
 783			if (status)
 784				return status;
 785
 786			*num_nodes_added += num_added;
 787		}
 788		/* Don't modify the first node teid memory if the first node was
 789		 * added already in the above call. Instead send some temp
 790		 * memory for all other recursive calls.
 791		 */
 792		if (num_added)
 793			first_teid_ptr = &temp;
 794
 795		new_num_nodes = num_nodes - num_added;
 796
 797		/* This parent is full, try the next sibling */
 798		parent = parent->sibling;
 799
 800		/* this recursion is intentional, for 1024 queues
 801		 * per VSI, it goes max of 16 iterations.
 802		 * 1024 / 8 = 128 layer 8 nodes
 803		 * 128 /8 = 16 (add 8 nodes per iteration)
 804		 */
 805		status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
 806						      layer, new_num_nodes,
 807						      first_teid_ptr,
 808						      &num_added);
 809		*num_nodes_added += num_added;
 810		return status;
 811	}
 812
 813	status = ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes,
 814				     num_nodes_added, first_node_teid);
 815	return status;
 816}
 817
 818/**
 819 * ice_sched_get_qgrp_layer - get the current queue group layer number
 820 * @hw: pointer to the hw struct
 821 *
 822 * This function returns the current queue group layer number
 823 */
 824static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw)
 825{
 826	/* It's always total layers - 1, the array is 0 relative so -2 */
 827	return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET;
 828}
 829
 830/**
 831 * ice_sched_get_vsi_layer - get the current VSI layer number
 832 * @hw: pointer to the hw struct
 833 *
 834 * This function returns the current VSI layer number
 835 */
 836static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
 837{
 838	/* Num Layers       VSI layer
 839	 *     9               6
 840	 *     7               4
 841	 *     5 or less       sw_entry_point_layer
 842	 */
 843	/* calculate the vsi layer based on number of layers. */
 844	if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) {
 845		u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
 846
 847		if (layer > hw->sw_entry_point_layer)
 848			return layer;
 849	}
 850	return hw->sw_entry_point_layer;
 851}
 852
 853/**
 854 * ice_sched_get_num_nodes_per_layer - Get the total number of nodes per layer
 855 * @pi: pointer to the port info struct
 856 * @layer: layer number
 857 *
 858 * This function calculates the number of nodes present in the scheduler tree
 859 * including all the branches for a given layer
 860 */
 861static u16
 862ice_sched_get_num_nodes_per_layer(struct ice_port_info *pi, u8 layer)
 863{
 864	struct ice_hw *hw;
 865	u16 num_nodes = 0;
 866	u8 i;
 867
 868	if (!pi)
 869		return num_nodes;
 870
 871	hw = pi->hw;
 872
 873	/* Calculate the number of nodes for all TCs */
 874	for (i = 0; i < pi->root->num_children; i++) {
 875		struct ice_sched_node *tc_node, *node;
 876
 877		tc_node = pi->root->children[i];
 878
 879		/* Get the first node */
 880		node = ice_sched_get_first_node(hw, tc_node, layer);
 881		if (!node)
 882			continue;
 883
 884		/* count the siblings */
 885		while (node) {
 886			num_nodes++;
 887			node = node->sibling;
 888		}
 889	}
 890
 891	return num_nodes;
 892}
 893
 894/**
 895 * ice_sched_val_max_nodes - check max number of nodes reached or not
 896 * @pi: port information structure
 897 * @new_num_nodes_per_layer: pointer to the new number of nodes array
 898 *
 899 * This function checks whether the scheduler tree layers have enough space to
 900 * add new nodes
 901 */
 902static enum ice_status
 903ice_sched_validate_for_max_nodes(struct ice_port_info *pi,
 904				 u16 *new_num_nodes_per_layer)
 905{
 906	struct ice_hw *hw = pi->hw;
 907	u8 i, qg_layer;
 908	u16 num_nodes;
 909
 910	qg_layer = ice_sched_get_qgrp_layer(hw);
 911
 912	/* walk through all the layers from SW entry point to qgroup layer */
 913	for (i = hw->sw_entry_point_layer; i <= qg_layer; i++) {
 914		num_nodes = ice_sched_get_num_nodes_per_layer(pi, i);
 915		if (num_nodes + new_num_nodes_per_layer[i] >
 916		    le16_to_cpu(hw->layer_info[i].max_pf_nodes)) {
 917			ice_debug(hw, ICE_DBG_SCHED,
 918				  "max nodes reached for layer = %d\n", i);
 919			return ICE_ERR_CFG;
 920		}
 921	}
 922	return 0;
 923}
 924
 925/**
 926 * ice_rm_dflt_leaf_node - remove the default leaf node in the tree
 927 * @pi: port information structure
 928 *
 929 * This function removes the leaf node that was created by the FW
 930 * during initialization
 931 */
 932static void
 933ice_rm_dflt_leaf_node(struct ice_port_info *pi)
 934{
 935	struct ice_sched_node *node;
 936
 937	node = pi->root;
 938	while (node) {
 939		if (!node->num_children)
 940			break;
 941		node = node->children[0];
 942	}
 943	if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) {
 944		u32 teid = le32_to_cpu(node->info.node_teid);
 945		enum ice_status status;
 946
 947		/* remove the default leaf node */
 948		status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid);
 949		if (!status)
 950			ice_free_sched_node(pi, node);
 951	}
 952}
 953
 954/**
 955 * ice_sched_rm_dflt_nodes - free the default nodes in the tree
 956 * @pi: port information structure
 957 *
 958 * This function frees all the nodes except root and TC that were created by
 959 * the FW during initialization
 960 */
 961static void
 962ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
 963{
 964	struct ice_sched_node *node;
 965
 966	ice_rm_dflt_leaf_node(pi);
 967
 968	/* remove the default nodes except TC and root nodes */
 969	node = pi->root;
 970	while (node) {
 971		if (node->tx_sched_layer >= pi->hw->sw_entry_point_layer &&
 972		    node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
 973		    node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT) {
 974			ice_free_sched_node(pi, node);
 975			break;
 976		}
 977
 978		if (!node->num_children)
 979			break;
 980		node = node->children[0];
 981	}
 982}
 983
 984/**
 985 * ice_sched_init_port - Initialize scheduler by querying information from FW
 986 * @pi: port info structure for the tree to cleanup
 987 *
 988 * This function is the initial call to find the total number of Tx scheduler
 989 * resources, default topology created by firmware and storing the information
 990 * in SW DB.
 991 */
 992enum ice_status ice_sched_init_port(struct ice_port_info *pi)
 993{
 994	struct ice_aqc_get_topo_elem *buf;
 995	enum ice_status status;
 996	struct ice_hw *hw;
 997	u8 num_branches;
 998	u16 num_elems;
 999	u8 i, j;
1000
1001	if (!pi)
1002		return ICE_ERR_PARAM;
1003	hw = pi->hw;
1004
1005	/* Query the Default Topology from FW */
1006	buf = devm_kcalloc(ice_hw_to_dev(hw), ICE_TXSCHED_MAX_BRANCHES,
1007			   sizeof(*buf), GFP_KERNEL);
1008	if (!buf)
1009		return ICE_ERR_NO_MEMORY;
1010
1011	/* Query default scheduling tree topology */
1012	status = ice_aq_get_dflt_topo(hw, pi->lport, buf,
1013				      sizeof(*buf) * ICE_TXSCHED_MAX_BRANCHES,
1014				      &num_branches, NULL);
1015	if (status)
1016		goto err_init_port;
1017
1018	/* num_branches should be between 1-8 */
1019	if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) {
1020		ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n",
1021			  num_branches);
1022		status = ICE_ERR_PARAM;
1023		goto err_init_port;
1024	}
1025
1026	/* get the number of elements on the default/first branch */
1027	num_elems = le16_to_cpu(buf[0].hdr.num_elems);
1028
1029	/* num_elems should always be between 1-9 */
1030	if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) {
1031		ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n",
1032			  num_elems);
1033		status = ICE_ERR_PARAM;
1034		goto err_init_port;
1035	}
1036
1037	/* If the last node is a leaf node then the index of the Q group
1038	 * layer is two less than the number of elements.
1039	 */
1040	if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type ==
1041	    ICE_AQC_ELEM_TYPE_LEAF)
1042		pi->last_node_teid =
1043			le32_to_cpu(buf[0].generic[num_elems - 2].node_teid);
1044	else
1045		pi->last_node_teid =
1046			le32_to_cpu(buf[0].generic[num_elems - 1].node_teid);
1047
1048	/* Insert the Tx Sched root node */
1049	status = ice_sched_add_root_node(pi, &buf[0].generic[0]);
1050	if (status)
1051		goto err_init_port;
1052
1053	/* Parse the default tree and cache the information */
1054	for (i = 0; i < num_branches; i++) {
1055		num_elems = le16_to_cpu(buf[i].hdr.num_elems);
1056
1057		/* Skip root element as already inserted */
1058		for (j = 1; j < num_elems; j++) {
1059			/* update the sw entry point */
1060			if (buf[0].generic[j].data.elem_type ==
1061			    ICE_AQC_ELEM_TYPE_ENTRY_POINT)
1062				hw->sw_entry_point_layer = j;
1063
1064			status = ice_sched_add_node(pi, j, &buf[i].generic[j]);
1065			if (status)
1066				goto err_init_port;
1067		}
1068	}
1069
1070	/* Remove the default nodes. */
1071	if (pi->root)
1072		ice_sched_rm_dflt_nodes(pi);
1073
1074	/* initialize the port for handling the scheduler tree */
1075	pi->port_state = ICE_SCHED_PORT_STATE_READY;
1076	mutex_init(&pi->sched_lock);
1077	INIT_LIST_HEAD(&pi->agg_list);
1078	INIT_LIST_HEAD(&pi->vsi_info_list);
1079
1080err_init_port:
1081	if (status && pi->root) {
1082		ice_free_sched_node(pi, pi->root);
1083		pi->root = NULL;
1084	}
1085
1086	devm_kfree(ice_hw_to_dev(hw), buf);
1087	return status;
1088}
1089
1090/**
1091 * ice_sched_query_res_alloc - query the FW for num of logical sched layers
1092 * @hw: pointer to the HW struct
1093 *
1094 * query FW for allocated scheduler resources and store in HW struct
1095 */
1096enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
1097{
1098	struct ice_aqc_query_txsched_res_resp *buf;
1099	enum ice_status status = 0;
 
 
1100
1101	if (hw->layer_info)
1102		return status;
1103
1104	buf = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*buf), GFP_KERNEL);
1105	if (!buf)
1106		return ICE_ERR_NO_MEMORY;
1107
1108	status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL);
1109	if (status)
1110		goto sched_query_out;
1111
1112	hw->num_tx_sched_layers = le16_to_cpu(buf->sched_props.logical_levels);
1113	hw->num_tx_sched_phys_layers =
1114		le16_to_cpu(buf->sched_props.phys_levels);
1115	hw->flattened_layers = buf->sched_props.flattening_bitmap;
1116	hw->max_cgds = buf->sched_props.max_pf_cgds;
1117
1118	 hw->layer_info = devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props,
1119				       (hw->num_tx_sched_layers *
1120					sizeof(*hw->layer_info)),
1121				       GFP_KERNEL);
 
 
 
 
 
 
 
 
 
 
 
 
1122	if (!hw->layer_info) {
1123		status = ICE_ERR_NO_MEMORY;
1124		goto sched_query_out;
1125	}
1126
1127sched_query_out:
1128	devm_kfree(ice_hw_to_dev(hw), buf);
1129	return status;
1130}
1131
1132/**
1133 * ice_sched_get_vsi_info_entry - Get the vsi entry list for given vsi_id
1134 * @pi: port information structure
1135 * @vsi_id: vsi id
1136 *
1137 * This function retrieves the vsi list for the given vsi id
1138 */
1139static struct ice_sched_vsi_info *
1140ice_sched_get_vsi_info_entry(struct ice_port_info *pi, u16 vsi_id)
1141{
1142	struct ice_sched_vsi_info *list_elem;
1143
1144	if (!pi)
1145		return NULL;
1146
1147	list_for_each_entry(list_elem, &pi->vsi_info_list, list_entry)
1148		if (list_elem->vsi_id == vsi_id)
1149			return list_elem;
1150	return NULL;
1151}
1152
1153/**
1154 * ice_sched_find_node_in_subtree - Find node in part of base node subtree
1155 * @hw: pointer to the hw struct
1156 * @base: pointer to the base node
1157 * @node: pointer to the node to search
1158 *
1159 * This function checks whether a given node is part of the base node
1160 * subtree or not
1161 */
1162static bool
1163ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
1164			       struct ice_sched_node *node)
1165{
1166	u8 i;
1167
1168	for (i = 0; i < base->num_children; i++) {
1169		struct ice_sched_node *child = base->children[i];
1170
1171		if (node == child)
1172			return true;
1173
1174		if (child->tx_sched_layer > node->tx_sched_layer)
1175			return false;
1176
1177		/* this recursion is intentional, and wouldn't
1178		 * go more than 8 calls
1179		 */
1180		if (ice_sched_find_node_in_subtree(hw, child, node))
1181			return true;
1182	}
1183	return false;
1184}
1185
1186/**
1187 * ice_sched_get_free_qparent - Get a free lan or rdma q group node
1188 * @pi: port information structure
1189 * @vsi_id: vsi id
1190 * @tc: branch number
1191 * @owner: lan or rdma
1192 *
1193 * This function retrieves a free lan or rdma q group node
1194 */
1195struct ice_sched_node *
1196ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_id, u8 tc,
1197			   u8 owner)
1198{
1199	struct ice_sched_node *vsi_node, *qgrp_node = NULL;
1200	struct ice_sched_vsi_info *list_elem;
1201	u16 max_children;
1202	u8 qgrp_layer;
1203
1204	qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
1205	max_children = le16_to_cpu(pi->hw->layer_info[qgrp_layer].max_children);
1206
1207	list_elem = ice_sched_get_vsi_info_entry(pi, vsi_id);
1208	if (!list_elem)
1209		goto lan_q_exit;
1210
1211	vsi_node = list_elem->vsi_node[tc];
1212
1213	/* validate invalid VSI id */
 
 
 
 
1214	if (!vsi_node)
1215		goto lan_q_exit;
1216
1217	/* get the first q group node from VSI sub-tree */
1218	qgrp_node = ice_sched_get_first_node(pi->hw, vsi_node, qgrp_layer);
1219	while (qgrp_node) {
1220		/* make sure the qgroup node is part of the VSI subtree */
1221		if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
1222			if (qgrp_node->num_children < max_children &&
1223			    qgrp_node->owner == owner)
1224				break;
1225		qgrp_node = qgrp_node->sibling;
1226	}
1227
1228lan_q_exit:
1229	return qgrp_node;
1230}
1231
1232/**
1233 * ice_sched_get_vsi_node - Get a VSI node based on VSI id
1234 * @hw: pointer to the hw struct
1235 * @tc_node: pointer to the TC node
1236 * @vsi_id: VSI id
1237 *
1238 * This function retrieves a VSI node for a given VSI id from a given
1239 * TC branch
1240 */
1241static struct ice_sched_node *
1242ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node,
1243		       u16 vsi_id)
1244{
1245	struct ice_sched_node *node;
1246	u8 vsi_layer;
1247
1248	vsi_layer = ice_sched_get_vsi_layer(hw);
1249	node = ice_sched_get_first_node(hw, tc_node, vsi_layer);
1250
1251	/* Check whether it already exists */
1252	while (node) {
1253		if (node->vsi_id == vsi_id)
1254			return node;
1255		node = node->sibling;
1256	}
1257
1258	return node;
1259}
1260
1261/**
1262 * ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes
1263 * @hw: pointer to the hw struct
1264 * @num_qs: number of queues
1265 * @num_nodes: num nodes array
1266 *
1267 * This function calculates the number of VSI child nodes based on the
1268 * number of queues.
1269 */
1270static void
1271ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
1272{
1273	u16 num = num_qs;
1274	u8 i, qgl, vsil;
1275
1276	qgl = ice_sched_get_qgrp_layer(hw);
1277	vsil = ice_sched_get_vsi_layer(hw);
1278
1279	/* calculate num nodes from q group to VSI layer */
1280	for (i = qgl; i > vsil; i--) {
1281		u16 max_children = le16_to_cpu(hw->layer_info[i].max_children);
1282
1283		/* round to the next integer if there is a remainder */
1284		num = DIV_ROUND_UP(num, max_children);
1285
1286		/* need at least one node */
1287		num_nodes[i] = num ? num : 1;
1288	}
1289}
1290
1291/**
1292 * ice_sched_add_vsi_child_nodes - add VSI child nodes to tree
1293 * @pi: port information structure
1294 * @vsi_id: VSI id
1295 * @tc_node: pointer to the TC node
1296 * @num_nodes: pointer to the num nodes that needs to be added per layer
1297 * @owner: node owner (lan or rdma)
1298 *
1299 * This function adds the VSI child nodes to tree. It gets called for
1300 * lan and rdma separately.
1301 */
1302static enum ice_status
1303ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id,
1304			      struct ice_sched_node *tc_node, u16 *num_nodes,
1305			      u8 owner)
1306{
1307	struct ice_sched_node *parent, *node;
1308	struct ice_hw *hw = pi->hw;
1309	enum ice_status status;
1310	u32 first_node_teid;
1311	u16 num_added = 0;
1312	u8 i, qgl, vsil;
1313
1314	status = ice_sched_validate_for_max_nodes(pi, num_nodes);
1315	if (status)
1316		return status;
1317
1318	qgl = ice_sched_get_qgrp_layer(hw);
1319	vsil = ice_sched_get_vsi_layer(hw);
1320	parent = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
1321	for (i = vsil + 1; i <= qgl; i++) {
1322		if (!parent)
1323			return ICE_ERR_CFG;
 
1324		status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
1325						      num_nodes[i],
1326						      &first_node_teid,
1327						      &num_added);
1328		if (status || num_nodes[i] != num_added)
1329			return ICE_ERR_CFG;
1330
1331		/* The newly added node can be a new parent for the next
1332		 * layer nodes
1333		 */
1334		if (num_added) {
1335			parent = ice_sched_find_node_by_teid(tc_node,
1336							     first_node_teid);
1337			node = parent;
1338			while (node) {
1339				node->owner = owner;
1340				node = node->sibling;
1341			}
1342		} else {
1343			parent = parent->children[0];
1344		}
1345	}
1346
1347	return 0;
1348}
1349
1350/**
1351 * ice_sched_rm_vsi_child_nodes - remove VSI child nodes from the tree
1352 * @pi: port information structure
1353 * @vsi_node: pointer to the VSI node
1354 * @num_nodes: pointer to the num nodes that needs to be removed per layer
1355 * @owner: node owner (lan or rdma)
1356 *
1357 * This function removes the VSI child nodes from the tree. It gets called for
1358 * lan and rdma separately.
1359 */
1360static void
1361ice_sched_rm_vsi_child_nodes(struct ice_port_info *pi,
1362			     struct ice_sched_node *vsi_node, u16 *num_nodes,
1363			     u8 owner)
1364{
1365	struct ice_sched_node *node, *next;
1366	u8 i, qgl, vsil;
1367	u16 num;
1368
1369	qgl = ice_sched_get_qgrp_layer(pi->hw);
1370	vsil = ice_sched_get_vsi_layer(pi->hw);
1371
1372	for (i = qgl; i > vsil; i--) {
1373		num = num_nodes[i];
1374		node = ice_sched_get_first_node(pi->hw, vsi_node, i);
1375		while (node && num) {
1376			next = node->sibling;
1377			if (node->owner == owner && !node->num_children) {
1378				ice_free_sched_node(pi, node);
1379				num--;
1380			}
1381			node = next;
1382		}
1383	}
1384}
1385
1386/**
1387 * ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes
1388 * @hw: pointer to the hw struct
1389 * @tc_node: pointer to TC node
1390 * @num_nodes: pointer to num nodes array
1391 *
1392 * This function calculates the number of supported nodes needed to add this
1393 * VSI into tx tree including the VSI, parent and intermediate nodes in below
1394 * layers
1395 */
1396static void
1397ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
1398				 struct ice_sched_node *tc_node, u16 *num_nodes)
1399{
1400	struct ice_sched_node *node;
1401	u16 max_child;
1402	u8 i, vsil;
1403
1404	vsil = ice_sched_get_vsi_layer(hw);
1405	for (i = vsil; i >= hw->sw_entry_point_layer; i--)
1406		/* Add intermediate nodes if TC has no children and
1407		 * need at least one node for VSI
1408		 */
1409		if (!tc_node->num_children || i == vsil) {
1410			num_nodes[i]++;
1411		} else {
1412			/* If intermediate nodes are reached max children
1413			 * then add a new one.
1414			 */
1415			node = ice_sched_get_first_node(hw, tc_node, i);
1416			max_child = le16_to_cpu(hw->layer_info[i].max_children);
1417
1418			/* scan all the siblings */
1419			while (node) {
1420				if (node->num_children < max_child)
1421					break;
1422				node = node->sibling;
1423			}
1424
 
 
 
 
 
 
1425			/* all the nodes are full, allocate a new one */
1426			if (!node)
1427				num_nodes[i]++;
1428		}
1429}
1430
1431/**
1432 * ice_sched_add_vsi_support_nodes - add VSI supported nodes into tx tree
1433 * @pi: port information structure
1434 * @vsi_id: VSI Id
1435 * @tc_node: pointer to TC node
1436 * @num_nodes: pointer to num nodes array
1437 *
1438 * This function adds the VSI supported nodes into tx tree including the
1439 * VSI, its parent and intermediate nodes in below layers
1440 */
1441static enum ice_status
1442ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_id,
1443				struct ice_sched_node *tc_node, u16 *num_nodes)
1444{
1445	struct ice_sched_node *parent = tc_node;
1446	enum ice_status status;
1447	u32 first_node_teid;
1448	u16 num_added = 0;
1449	u8 i, vsil;
1450
1451	if (!pi)
1452		return ICE_ERR_PARAM;
1453
1454	status = ice_sched_validate_for_max_nodes(pi, num_nodes);
1455	if (status)
1456		return status;
1457
1458	vsil = ice_sched_get_vsi_layer(pi->hw);
1459	for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
1460		status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
1461						      i, num_nodes[i],
1462						      &first_node_teid,
1463						      &num_added);
1464		if (status || num_nodes[i] != num_added)
1465			return ICE_ERR_CFG;
1466
1467		/* The newly added node can be a new parent for the next
1468		 * layer nodes
1469		 */
1470		if (num_added)
1471			parent = ice_sched_find_node_by_teid(tc_node,
1472							     first_node_teid);
1473		else
1474			parent = parent->children[0];
1475
1476		if (!parent)
1477			return ICE_ERR_CFG;
1478
1479		if (i == vsil)
1480			parent->vsi_id = vsi_id;
1481	}
 
1482	return 0;
1483}
1484
1485/**
1486 * ice_sched_add_vsi_to_topo - add a new VSI into tree
1487 * @pi: port information structure
1488 * @vsi_id: VSI Id
1489 * @tc: TC number
1490 *
1491 * This function adds a new VSI into scheduler tree
1492 */
1493static enum ice_status
1494ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_id, u8 tc)
1495{
1496	u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
1497	struct ice_sched_node *tc_node;
1498	struct ice_hw *hw = pi->hw;
1499
1500	tc_node = ice_sched_get_tc_node(pi, tc);
1501	if (!tc_node)
1502		return ICE_ERR_PARAM;
1503
1504	/* calculate number of supported nodes needed for this VSI */
1505	ice_sched_calc_vsi_support_nodes(hw, tc_node, num_nodes);
1506
1507	/* add vsi supported nodes to tc subtree */
1508	return ice_sched_add_vsi_support_nodes(pi, vsi_id, tc_node, num_nodes);
 
1509}
1510
1511/**
1512 * ice_sched_update_vsi_child_nodes - update VSI child nodes
1513 * @pi: port information structure
1514 * @vsi_id: VSI Id
1515 * @tc: TC number
1516 * @new_numqs: new number of max queues
1517 * @owner: owner of this subtree
1518 *
1519 * This function updates the VSI child nodes based on the number of queues
1520 */
1521static enum ice_status
1522ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,
1523				 u16 new_numqs, u8 owner)
1524{
1525	u16 prev_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
1526	u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
1527	struct ice_sched_node *vsi_node;
1528	struct ice_sched_node *tc_node;
1529	struct ice_sched_vsi_info *vsi;
1530	enum ice_status status = 0;
1531	struct ice_hw *hw = pi->hw;
1532	u16 prev_numqs;
1533	u8 i;
1534
1535	tc_node = ice_sched_get_tc_node(pi, tc);
1536	if (!tc_node)
1537		return ICE_ERR_CFG;
1538
1539	vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
1540	if (!vsi_node)
1541		return ICE_ERR_CFG;
1542
1543	vsi = ice_sched_get_vsi_info_entry(pi, vsi_id);
1544	if (!vsi)
1545		return ICE_ERR_CFG;
1546
1547	if (owner == ICE_SCHED_NODE_OWNER_LAN)
1548		prev_numqs = vsi->max_lanq[tc];
1549	else
1550		return ICE_ERR_PARAM;
1551
1552	/* num queues are not changed */
1553	if (prev_numqs == new_numqs)
 
 
 
 
1554		return status;
1555
1556	/* calculate number of nodes based on prev/new number of qs */
1557	if (prev_numqs)
1558		ice_sched_calc_vsi_child_nodes(hw, prev_numqs, prev_num_nodes);
1559
1560	if (new_numqs)
1561		ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
 
 
 
 
 
 
 
 
 
 
 
 
1562
1563	if (prev_numqs > new_numqs) {
1564		for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
1565			new_num_nodes[i] = prev_num_nodes[i] - new_num_nodes[i];
1566
1567		ice_sched_rm_vsi_child_nodes(pi, vsi_node, new_num_nodes,
1568					     owner);
1569	} else {
1570		for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
1571			new_num_nodes[i] -= prev_num_nodes[i];
1572
1573		status = ice_sched_add_vsi_child_nodes(pi, vsi_id, tc_node,
1574						       new_num_nodes, owner);
1575		if (status)
1576			return status;
1577	}
1578
1579	if (owner == ICE_SCHED_NODE_OWNER_LAN)
1580		vsi->max_lanq[tc] = new_numqs;
1581
1582	return status;
1583}
1584
1585/**
1586 * ice_sched_cfg_vsi - configure the new/exisiting VSI
1587 * @pi: port information structure
1588 * @vsi_id: VSI Id
1589 * @tc: TC number
1590 * @maxqs: max number of queues
1591 * @owner: lan or rdma
1592 * @enable: TC enabled or disabled
1593 *
1594 * This function adds/updates VSI nodes based on the number of queues. If TC is
1595 * enabled and VSI is in suspended state then resume the VSI back. If TC is
1596 * disabled then suspend the VSI if it is not already.
1597 */
1598enum ice_status
1599ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_id, u8 tc, u16 maxqs,
1600		  u8 owner, bool enable)
1601{
1602	struct ice_sched_node *vsi_node, *tc_node;
1603	struct ice_sched_vsi_info *vsi;
1604	enum ice_status status = 0;
1605	struct ice_hw *hw = pi->hw;
1606
 
1607	tc_node = ice_sched_get_tc_node(pi, tc);
1608	if (!tc_node)
1609		return ICE_ERR_PARAM;
 
 
 
 
1610
1611	vsi = ice_sched_get_vsi_info_entry(pi, vsi_id);
1612	if (!vsi)
1613		vsi = ice_sched_create_vsi_info_entry(pi, vsi_id);
1614	if (!vsi)
1615		return ICE_ERR_NO_MEMORY;
1616
1617	vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
1618
1619	/* suspend the VSI if tc is not enabled */
1620	if (!enable) {
1621		if (vsi_node && vsi_node->in_use) {
1622			u32 teid = le32_to_cpu(vsi_node->info.node_teid);
1623
1624			status = ice_sched_suspend_resume_elems(hw, 1, &teid,
1625								true);
1626			if (!status)
1627				vsi_node->in_use = false;
1628		}
1629		return status;
1630	}
1631
1632	/* TC is enabled, if it is a new VSI then add it to the tree */
1633	if (!vsi_node) {
1634		status = ice_sched_add_vsi_to_topo(pi, vsi_id, tc);
1635		if (status)
1636			return status;
1637		vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
 
1638		if (!vsi_node)
1639			return ICE_ERR_CFG;
1640		vsi->vsi_node[tc] = vsi_node;
 
1641		vsi_node->in_use = true;
 
 
 
 
 
1642	}
1643
1644	/* update the VSI child nodes */
1645	status = ice_sched_update_vsi_child_nodes(pi, vsi_id, tc, maxqs, owner);
 
1646	if (status)
1647		return status;
1648
1649	/* TC is enabled, resume the VSI if it is in the suspend state */
1650	if (!vsi_node->in_use) {
1651		u32 teid = le32_to_cpu(vsi_node->info.node_teid);
1652
1653		status = ice_sched_suspend_resume_elems(hw, 1, &teid, false);
1654		if (!status)
1655			vsi_node->in_use = true;
1656	}
1657
1658	return status;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1659}
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2018, Intel Corporation. */
   3
   4#include "ice_sched.h"
   5
   6/**
   7 * ice_sched_add_root_node - Insert the Tx scheduler root node in SW DB
   8 * @pi: port information structure
   9 * @info: Scheduler element information from firmware
  10 *
  11 * This function inserts the root node of the scheduling tree topology
  12 * to the SW DB.
  13 */
  14static enum ice_status
  15ice_sched_add_root_node(struct ice_port_info *pi,
  16			struct ice_aqc_txsched_elem_data *info)
  17{
  18	struct ice_sched_node *root;
  19	struct ice_hw *hw;
 
  20
  21	if (!pi)
  22		return ICE_ERR_PARAM;
  23
  24	hw = pi->hw;
  25
  26	root = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*root), GFP_KERNEL);
  27	if (!root)
  28		return ICE_ERR_NO_MEMORY;
  29
  30	/* coverity[suspicious_sizeof] */
  31	root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0],
  32				      sizeof(*root), GFP_KERNEL);
  33	if (!root->children) {
  34		devm_kfree(ice_hw_to_dev(hw), root);
  35		return ICE_ERR_NO_MEMORY;
  36	}
  37
  38	memcpy(&root->info, info, sizeof(*info));
  39	pi->root = root;
  40	return 0;
  41}
  42
  43/**
  44 * ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB
  45 * @start_node: pointer to the starting ice_sched_node struct in a sub-tree
  46 * @teid: node TEID to search
  47 *
  48 * This function searches for a node matching the TEID in the scheduling tree
  49 * from the SW DB. The search is recursive and is restricted by the number of
  50 * layers it has searched through; stopping at the max supported layer.
  51 *
  52 * This function needs to be called when holding the port_info->sched_lock
  53 */
  54struct ice_sched_node *
  55ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
  56{
  57	u16 i;
  58
  59	/* The TEID is same as that of the start_node */
  60	if (ICE_TXSCHED_GET_NODE_TEID(start_node) == teid)
  61		return start_node;
  62
  63	/* The node has no children or is at the max layer */
  64	if (!start_node->num_children ||
  65	    start_node->tx_sched_layer >= ICE_AQC_TOPO_MAX_LEVEL_NUM ||
  66	    start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF)
  67		return NULL;
  68
  69	/* Check if TEID matches to any of the children nodes */
  70	for (i = 0; i < start_node->num_children; i++)
  71		if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid)
  72			return start_node->children[i];
  73
  74	/* Search within each child's sub-tree */
  75	for (i = 0; i < start_node->num_children; i++) {
  76		struct ice_sched_node *tmp;
  77
  78		tmp = ice_sched_find_node_by_teid(start_node->children[i],
  79						  teid);
  80		if (tmp)
  81			return tmp;
  82	}
  83
  84	return NULL;
  85}
  86
  87/**
  88 * ice_aqc_send_sched_elem_cmd - send scheduling elements cmd
  89 * @hw: pointer to the HW struct
  90 * @cmd_opc: cmd opcode
  91 * @elems_req: number of elements to request
  92 * @buf: pointer to buffer
  93 * @buf_size: buffer size in bytes
  94 * @elems_resp: returns total number of elements response
  95 * @cd: pointer to command details structure or NULL
  96 *
  97 * This function sends a scheduling elements cmd (cmd_opc)
  98 */
  99static enum ice_status
 100ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
 101			    u16 elems_req, void *buf, u16 buf_size,
 102			    u16 *elems_resp, struct ice_sq_cd *cd)
 103{
 104	struct ice_aqc_sched_elem_cmd *cmd;
 105	struct ice_aq_desc desc;
 106	enum ice_status status;
 107
 108	cmd = &desc.params.sched_elem_cmd;
 109	ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc);
 110	cmd->num_elem_req = cpu_to_le16(elems_req);
 111	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
 112	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
 113	if (!status && elems_resp)
 114		*elems_resp = le16_to_cpu(cmd->num_elem_resp);
 115
 116	return status;
 117}
 118
 119/**
 120 * ice_aq_query_sched_elems - query scheduler elements
 121 * @hw: pointer to the HW struct
 122 * @elems_req: number of elements to query
 123 * @buf: pointer to buffer
 124 * @buf_size: buffer size in bytes
 125 * @elems_ret: returns total number of elements returned
 126 * @cd: pointer to command details structure or NULL
 127 *
 128 * Query scheduling elements (0x0404)
 129 */
 130enum ice_status
 131ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
 132			 struct ice_aqc_get_elem *buf, u16 buf_size,
 133			 u16 *elems_ret, struct ice_sq_cd *cd)
 134{
 135	return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_get_sched_elems,
 136					   elems_req, (void *)buf, buf_size,
 137					   elems_ret, cd);
 138}
 139
 140/**
 141 * ice_sched_add_node - Insert the Tx scheduler node in SW DB
 142 * @pi: port information structure
 143 * @layer: Scheduler layer of the node
 144 * @info: Scheduler element information from firmware
 145 *
 146 * This function inserts a scheduler node to the SW DB.
 147 */
 148enum ice_status
 149ice_sched_add_node(struct ice_port_info *pi, u8 layer,
 150		   struct ice_aqc_txsched_elem_data *info)
 151{
 152	struct ice_sched_node *parent;
 153	struct ice_aqc_get_elem elem;
 154	struct ice_sched_node *node;
 155	enum ice_status status;
 156	struct ice_hw *hw;
 
 157
 158	if (!pi)
 159		return ICE_ERR_PARAM;
 160
 161	hw = pi->hw;
 162
 163	/* A valid parent node should be there */
 164	parent = ice_sched_find_node_by_teid(pi->root,
 165					     le32_to_cpu(info->parent_teid));
 166	if (!parent) {
 167		ice_debug(hw, ICE_DBG_SCHED,
 168			  "Parent Node not found for parent_teid=0x%x\n",
 169			  le32_to_cpu(info->parent_teid));
 170		return ICE_ERR_PARAM;
 171	}
 172
 173	/* query the current node information from FW  before additing it
 174	 * to the SW DB
 175	 */
 176	status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), &elem);
 177	if (status)
 178		return status;
 179
 180	node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL);
 181	if (!node)
 182		return ICE_ERR_NO_MEMORY;
 183	if (hw->max_children[layer]) {
 184		/* coverity[suspicious_sizeof] */
 185		node->children = devm_kcalloc(ice_hw_to_dev(hw),
 186					      hw->max_children[layer],
 187					      sizeof(*node), GFP_KERNEL);
 188		if (!node->children) {
 189			devm_kfree(ice_hw_to_dev(hw), node);
 190			return ICE_ERR_NO_MEMORY;
 191		}
 192	}
 193
 194	node->in_use = true;
 195	node->parent = parent;
 196	node->tx_sched_layer = layer;
 197	parent->children[parent->num_children++] = node;
 198	memcpy(&node->info, &elem.generic[0], sizeof(node->info));
 199	return 0;
 200}
 201
 202/**
 203 * ice_aq_delete_sched_elems - delete scheduler elements
 204 * @hw: pointer to the HW struct
 205 * @grps_req: number of groups to delete
 206 * @buf: pointer to buffer
 207 * @buf_size: buffer size in bytes
 208 * @grps_del: returns total number of elements deleted
 209 * @cd: pointer to command details structure or NULL
 210 *
 211 * Delete scheduling elements (0x040F)
 212 */
 213static enum ice_status
 214ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
 215			  struct ice_aqc_delete_elem *buf, u16 buf_size,
 216			  u16 *grps_del, struct ice_sq_cd *cd)
 217{
 218	return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_delete_sched_elems,
 219					   grps_req, (void *)buf, buf_size,
 220					   grps_del, cd);
 
 
 
 
 
 
 
 
 
 
 
 221}
 222
 223/**
 224 * ice_sched_remove_elems - remove nodes from HW
 225 * @hw: pointer to the HW struct
 226 * @parent: pointer to the parent node
 227 * @num_nodes: number of nodes
 228 * @node_teids: array of node teids to be deleted
 229 *
 230 * This function remove nodes from HW
 231 */
 232static enum ice_status
 233ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
 234		       u16 num_nodes, u32 *node_teids)
 235{
 236	struct ice_aqc_delete_elem *buf;
 237	u16 i, num_groups_removed = 0;
 238	enum ice_status status;
 239	u16 buf_size;
 240
 241	buf_size = sizeof(*buf) + sizeof(u32) * (num_nodes - 1);
 242	buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
 243	if (!buf)
 244		return ICE_ERR_NO_MEMORY;
 245
 246	buf->hdr.parent_teid = parent->info.node_teid;
 247	buf->hdr.num_elems = cpu_to_le16(num_nodes);
 248	for (i = 0; i < num_nodes; i++)
 249		buf->teid[i] = cpu_to_le32(node_teids[i]);
 250
 251	status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
 252					   &num_groups_removed, NULL);
 253	if (status || num_groups_removed != 1)
 254		ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n",
 255			  hw->adminq.sq_last_status);
 256
 257	devm_kfree(ice_hw_to_dev(hw), buf);
 258	return status;
 259}
 260
 261/**
 262 * ice_sched_get_first_node - get the first node of the given layer
 263 * @pi: port information structure
 264 * @parent: pointer the base node of the subtree
 265 * @layer: layer number
 266 *
 267 * This function retrieves the first node of the given layer from the subtree
 268 */
 269static struct ice_sched_node *
 270ice_sched_get_first_node(struct ice_port_info *pi,
 271			 struct ice_sched_node *parent, u8 layer)
 272{
 273	return pi->sib_head[parent->tc_num][layer];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 274}
 275
 276/**
 277 * ice_sched_get_tc_node - get pointer to TC node
 278 * @pi: port information structure
 279 * @tc: TC number
 280 *
 281 * This function returns the TC node pointer
 282 */
 283struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc)
 284{
 285	u8 i;
 286
 287	if (!pi || !pi->root)
 288		return NULL;
 289	for (i = 0; i < pi->root->num_children; i++)
 290		if (pi->root->children[i]->tc_num == tc)
 291			return pi->root->children[i];
 292	return NULL;
 293}
 294
 295/**
 296 * ice_free_sched_node - Free a Tx scheduler node from SW DB
 297 * @pi: port information structure
 298 * @node: pointer to the ice_sched_node struct
 299 *
 300 * This function frees up a node from SW DB as well as from HW
 301 *
 302 * This function needs to be called with the port_info->sched_lock held
 303 */
 304void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
 305{
 306	struct ice_sched_node *parent;
 307	struct ice_hw *hw = pi->hw;
 308	u8 i, j;
 309
 310	/* Free the children before freeing up the parent node
 311	 * The parent array is updated below and that shifts the nodes
 312	 * in the array. So always pick the first child if num children > 0
 313	 */
 314	while (node->num_children)
 315		ice_free_sched_node(pi, node->children[0]);
 316
 317	/* Leaf, TC and root nodes can't be deleted by SW */
 318	if (node->tx_sched_layer >= hw->sw_entry_point_layer &&
 319	    node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
 320	    node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT &&
 321	    node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) {
 322		u32 teid = le32_to_cpu(node->info.node_teid);
 
 323
 324		ice_sched_remove_elems(hw, node->parent, 1, &teid);
 
 
 
 325	}
 326	parent = node->parent;
 327	/* root has no parent */
 328	if (parent) {
 329		struct ice_sched_node *p;
 330
 331		/* update the parent */
 332		for (i = 0; i < parent->num_children; i++)
 333			if (parent->children[i] == node) {
 334				for (j = i + 1; j < parent->num_children; j++)
 335					parent->children[j - 1] =
 336						parent->children[j];
 337				parent->num_children--;
 338				break;
 339			}
 340
 341		p = ice_sched_get_first_node(pi, node, node->tx_sched_layer);
 
 
 
 
 
 
 
 
 
 342		while (p) {
 343			if (p->sibling == node) {
 344				p->sibling = node->sibling;
 345				break;
 346			}
 347			p = p->sibling;
 348		}
 349
 350		/* update the sibling head if head is getting removed */
 351		if (pi->sib_head[node->tc_num][node->tx_sched_layer] == node)
 352			pi->sib_head[node->tc_num][node->tx_sched_layer] =
 353				node->sibling;
 354	}
 355
 356	/* leaf nodes have no children */
 357	if (node->children)
 358		devm_kfree(ice_hw_to_dev(hw), node->children);
 359	devm_kfree(ice_hw_to_dev(hw), node);
 360}
 361
 362/**
 363 * ice_aq_get_dflt_topo - gets default scheduler topology
 364 * @hw: pointer to the HW struct
 365 * @lport: logical port number
 366 * @buf: pointer to buffer
 367 * @buf_size: buffer size in bytes
 368 * @num_branches: returns total number of queue to port branches
 369 * @cd: pointer to command details structure or NULL
 370 *
 371 * Get default scheduler topology (0x400)
 372 */
 373static enum ice_status
 374ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
 375		     struct ice_aqc_get_topo_elem *buf, u16 buf_size,
 376		     u8 *num_branches, struct ice_sq_cd *cd)
 377{
 378	struct ice_aqc_get_topo *cmd;
 379	struct ice_aq_desc desc;
 380	enum ice_status status;
 381
 382	cmd = &desc.params.get_topo;
 383	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo);
 384	cmd->port_num = lport;
 385	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
 386	if (!status && num_branches)
 387		*num_branches = cmd->num_branches;
 388
 389	return status;
 390}
 391
 392/**
 393 * ice_aq_add_sched_elems - adds scheduling element
 394 * @hw: pointer to the HW struct
 395 * @grps_req: the number of groups that are requested to be added
 396 * @buf: pointer to buffer
 397 * @buf_size: buffer size in bytes
 398 * @grps_added: returns total number of groups added
 399 * @cd: pointer to command details structure or NULL
 400 *
 401 * Add scheduling elements (0x0401)
 402 */
 403static enum ice_status
 404ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
 405		       struct ice_aqc_add_elem *buf, u16 buf_size,
 406		       u16 *grps_added, struct ice_sq_cd *cd)
 407{
 408	return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_add_sched_elems,
 409					   grps_req, (void *)buf, buf_size,
 410					   grps_added, cd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 411}
 412
 413/**
 414 * ice_aq_suspend_sched_elems - suspend scheduler elements
 415 * @hw: pointer to the HW struct
 416 * @elems_req: number of elements to suspend
 417 * @buf: pointer to buffer
 418 * @buf_size: buffer size in bytes
 419 * @elems_ret: returns total number of elements suspended
 420 * @cd: pointer to command details structure or NULL
 421 *
 422 * Suspend scheduling elements (0x0409)
 423 */
 424static enum ice_status
 425ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req,
 426			   struct ice_aqc_suspend_resume_elem *buf,
 427			   u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
 428{
 429	return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_suspend_sched_elems,
 430					   elems_req, (void *)buf, buf_size,
 431					   elems_ret, cd);
 432}
 433
 434/**
 435 * ice_aq_resume_sched_elems - resume scheduler elements
 436 * @hw: pointer to the HW struct
 437 * @elems_req: number of elements to resume
 438 * @buf: pointer to buffer
 439 * @buf_size: buffer size in bytes
 440 * @elems_ret: returns total number of elements resumed
 441 * @cd: pointer to command details structure or NULL
 442 *
 443 * resume scheduling elements (0x040A)
 444 */
 445static enum ice_status
 446ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req,
 447			  struct ice_aqc_suspend_resume_elem *buf,
 448			  u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
 449{
 450	return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_resume_sched_elems,
 451					   elems_req, (void *)buf, buf_size,
 452					   elems_ret, cd);
 453}
 454
 455/**
 456 * ice_aq_query_sched_res - query scheduler resource
 457 * @hw: pointer to the HW struct
 458 * @buf_size: buffer size in bytes
 459 * @buf: pointer to buffer
 460 * @cd: pointer to command details structure or NULL
 461 *
 462 * Query scheduler resource allocation (0x0412)
 463 */
 464static enum ice_status
 465ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
 466		       struct ice_aqc_query_txsched_res_resp *buf,
 467		       struct ice_sq_cd *cd)
 468{
 469	struct ice_aq_desc desc;
 470
 471	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res);
 472	return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
 473}
 474
 475/**
 476 * ice_sched_suspend_resume_elems - suspend or resume HW nodes
 477 * @hw: pointer to the HW struct
 478 * @num_nodes: number of nodes
 479 * @node_teids: array of node teids to be suspended or resumed
 480 * @suspend: true means suspend / false means resume
 481 *
 482 * This function suspends or resumes HW nodes
 483 */
 484static enum ice_status
 485ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
 486			       bool suspend)
 487{
 488	struct ice_aqc_suspend_resume_elem *buf;
 489	u16 i, buf_size, num_elem_ret = 0;
 490	enum ice_status status;
 491
 492	buf_size = sizeof(*buf) * num_nodes;
 493	buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
 494	if (!buf)
 495		return ICE_ERR_NO_MEMORY;
 496
 497	for (i = 0; i < num_nodes; i++)
 498		buf->teid[i] = cpu_to_le32(node_teids[i]);
 499
 500	if (suspend)
 501		status = ice_aq_suspend_sched_elems(hw, num_nodes, buf,
 502						    buf_size, &num_elem_ret,
 503						    NULL);
 504	else
 505		status = ice_aq_resume_sched_elems(hw, num_nodes, buf,
 506						   buf_size, &num_elem_ret,
 507						   NULL);
 508	if (status || num_elem_ret != num_nodes)
 509		ice_debug(hw, ICE_DBG_SCHED, "suspend/resume failed\n");
 510
 511	devm_kfree(ice_hw_to_dev(hw), buf);
 512	return status;
 513}
 514
 515/**
 516 * ice_alloc_lan_q_ctx - allocate LAN queue contexts for the given VSI and TC
 517 * @hw: pointer to the HW struct
 518 * @vsi_handle: VSI handle
 519 * @tc: TC number
 520 * @new_numqs: number of queues
 521 */
 522static enum ice_status
 523ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
 524{
 525	struct ice_vsi_ctx *vsi_ctx;
 526	struct ice_q_ctx *q_ctx;
 527
 528	vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
 529	if (!vsi_ctx)
 530		return ICE_ERR_PARAM;
 531	/* allocate LAN queue contexts */
 532	if (!vsi_ctx->lan_q_ctx[tc]) {
 533		vsi_ctx->lan_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
 534						      new_numqs,
 535						      sizeof(*q_ctx),
 536						      GFP_KERNEL);
 537		if (!vsi_ctx->lan_q_ctx[tc])
 538			return ICE_ERR_NO_MEMORY;
 539		vsi_ctx->num_lan_q_entries[tc] = new_numqs;
 540		return 0;
 541	}
 542	/* num queues are increased, update the queue contexts */
 543	if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) {
 544		u16 prev_num = vsi_ctx->num_lan_q_entries[tc];
 545
 546		q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
 547				     sizeof(*q_ctx), GFP_KERNEL);
 548		if (!q_ctx)
 549			return ICE_ERR_NO_MEMORY;
 550		memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc],
 551		       prev_num * sizeof(*q_ctx));
 552		devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]);
 553		vsi_ctx->lan_q_ctx[tc] = q_ctx;
 554		vsi_ctx->num_lan_q_entries[tc] = new_numqs;
 555	}
 556	return 0;
 557}
 558
 559/**
 560 * ice_sched_clear_agg - clears the aggregator related information
 561 * @hw: pointer to the hardware structure
 562 *
 563 * This function removes aggregator list and free up aggregator related memory
 564 * previously allocated.
 565 */
 566void ice_sched_clear_agg(struct ice_hw *hw)
 567{
 568	struct ice_sched_agg_info *agg_info;
 
 569	struct ice_sched_agg_info *atmp;
 
 
 
 
 
 
 
 570
 571	list_for_each_entry_safe(agg_info, atmp, &hw->agg_list, list_entry) {
 572		struct ice_sched_agg_vsi_info *agg_vsi_info;
 573		struct ice_sched_agg_vsi_info *vtmp;
 574
 575		list_for_each_entry_safe(agg_vsi_info, vtmp,
 576					 &agg_info->agg_vsi_list, list_entry) {
 577			list_del(&agg_vsi_info->list_entry);
 578			devm_kfree(ice_hw_to_dev(hw), agg_vsi_info);
 579		}
 580		list_del(&agg_info->list_entry);
 581		devm_kfree(ice_hw_to_dev(hw), agg_info);
 582	}
 583}
 584
 585/**
 586 * ice_sched_clear_tx_topo - clears the scheduler tree nodes
 587 * @pi: port information structure
 588 *
 589 * This function removes all the nodes from HW as well as from SW DB.
 590 */
 591static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
 592{
 593	if (!pi)
 594		return;
 595	if (pi->root) {
 596		ice_free_sched_node(pi, pi->root);
 597		pi->root = NULL;
 598	}
 599}
 600
 601/**
 602 * ice_sched_clear_port - clear the scheduler elements from SW DB for a port
 603 * @pi: port information structure
 604 *
 605 * Cleanup scheduling elements from SW DB
 606 */
 607void ice_sched_clear_port(struct ice_port_info *pi)
 608{
 609	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
 610		return;
 611
 612	pi->port_state = ICE_SCHED_PORT_STATE_INIT;
 613	mutex_lock(&pi->sched_lock);
 614	ice_sched_clear_tx_topo(pi);
 615	mutex_unlock(&pi->sched_lock);
 616	mutex_destroy(&pi->sched_lock);
 617}
 618
 619/**
 620 * ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports
 621 * @hw: pointer to the HW struct
 622 *
 623 * Cleanup scheduling elements from SW DB for all the ports
 624 */
 625void ice_sched_cleanup_all(struct ice_hw *hw)
 626{
 627	if (!hw)
 628		return;
 629
 630	if (hw->layer_info) {
 631		devm_kfree(ice_hw_to_dev(hw), hw->layer_info);
 632		hw->layer_info = NULL;
 633	}
 634
 635	if (hw->port_info)
 636		ice_sched_clear_port(hw->port_info);
 637
 638	hw->num_tx_sched_layers = 0;
 639	hw->num_tx_sched_phys_layers = 0;
 640	hw->flattened_layers = 0;
 641	hw->max_cgds = 0;
 642}
 643
 644/**
 645 * ice_sched_add_elems - add nodes to HW and SW DB
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 646 * @pi: port information structure
 647 * @tc_node: pointer to the branch node
 648 * @parent: pointer to the parent node
 649 * @layer: layer number to add nodes
 650 * @num_nodes: number of nodes
 651 * @num_nodes_added: pointer to num nodes added
 652 * @first_node_teid: if new nodes are added then return the TEID of first node
 653 *
 654 * This function add nodes to HW as well as to SW DB for a given layer
 655 */
 656static enum ice_status
 657ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
 658		    struct ice_sched_node *parent, u8 layer, u16 num_nodes,
 659		    u16 *num_nodes_added, u32 *first_node_teid)
 660{
 661	struct ice_sched_node *prev, *new_node;
 662	struct ice_aqc_add_elem *buf;
 663	u16 i, num_groups_added = 0;
 664	enum ice_status status = 0;
 665	struct ice_hw *hw = pi->hw;
 666	size_t buf_size;
 667	u32 teid;
 668
 669	buf_size = struct_size(buf, generic, num_nodes - 1);
 670	buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
 671	if (!buf)
 672		return ICE_ERR_NO_MEMORY;
 673
 674	buf->hdr.parent_teid = parent->info.node_teid;
 675	buf->hdr.num_elems = cpu_to_le16(num_nodes);
 676	for (i = 0; i < num_nodes; i++) {
 677		buf->generic[i].parent_teid = parent->info.node_teid;
 678		buf->generic[i].data.elem_type = ICE_AQC_ELEM_TYPE_SE_GENERIC;
 679		buf->generic[i].data.valid_sections =
 680			ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
 681			ICE_AQC_ELEM_VALID_EIR;
 682		buf->generic[i].data.generic = 0;
 683		buf->generic[i].data.cir_bw.bw_profile_idx =
 684			cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
 685		buf->generic[i].data.cir_bw.bw_alloc =
 686			cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
 687		buf->generic[i].data.eir_bw.bw_profile_idx =
 688			cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
 689		buf->generic[i].data.eir_bw.bw_alloc =
 690			cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
 691	}
 692
 693	status = ice_aq_add_sched_elems(hw, 1, buf, buf_size,
 694					&num_groups_added, NULL);
 695	if (status || num_groups_added != 1) {
 696		ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n",
 697			  hw->adminq.sq_last_status);
 698		devm_kfree(ice_hw_to_dev(hw), buf);
 699		return ICE_ERR_CFG;
 700	}
 701
 702	*num_nodes_added = num_nodes;
 703	/* add nodes to the SW DB */
 704	for (i = 0; i < num_nodes; i++) {
 705		status = ice_sched_add_node(pi, layer, &buf->generic[i]);
 706		if (status) {
 707			ice_debug(hw, ICE_DBG_SCHED,
 708				  "add nodes in SW DB failed status =%d\n",
 709				  status);
 710			break;
 711		}
 712
 713		teid = le32_to_cpu(buf->generic[i].node_teid);
 714		new_node = ice_sched_find_node_by_teid(parent, teid);
 
 715		if (!new_node) {
 716			ice_debug(hw, ICE_DBG_SCHED,
 717				  "Node is missing for teid =%d\n", teid);
 718			break;
 719		}
 720
 721		new_node->sibling = NULL;
 722		new_node->tc_num = tc_node->tc_num;
 723
 724		/* add it to previous node sibling pointer */
 725		/* Note: siblings are not linked across branches */
 726		prev = ice_sched_get_first_node(pi, tc_node, layer);
 
 727		if (prev && prev != new_node) {
 728			while (prev->sibling)
 729				prev = prev->sibling;
 730			prev->sibling = new_node;
 731		}
 732
 733		/* initialize the sibling head */
 734		if (!pi->sib_head[tc_node->tc_num][layer])
 735			pi->sib_head[tc_node->tc_num][layer] = new_node;
 736
 737		if (i == 0)
 738			*first_node_teid = teid;
 739	}
 740
 741	devm_kfree(ice_hw_to_dev(hw), buf);
 742	return status;
 743}
 744
 745/**
 746 * ice_sched_add_nodes_to_layer - Add nodes to a given layer
 747 * @pi: port information structure
 748 * @tc_node: pointer to TC node
 749 * @parent: pointer to parent node
 750 * @layer: layer number to add nodes
 751 * @num_nodes: number of nodes to be added
 752 * @first_node_teid: pointer to the first node TEID
 753 * @num_nodes_added: pointer to number of nodes added
 754 *
 755 * This function add nodes to a given layer.
 756 */
 757static enum ice_status
 758ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
 759			     struct ice_sched_node *tc_node,
 760			     struct ice_sched_node *parent, u8 layer,
 761			     u16 num_nodes, u32 *first_node_teid,
 762			     u16 *num_nodes_added)
 763{
 764	u32 *first_teid_ptr = first_node_teid;
 765	u16 new_num_nodes, max_child_nodes;
 766	enum ice_status status = 0;
 767	struct ice_hw *hw = pi->hw;
 768	u16 num_added = 0;
 769	u32 temp;
 770
 771	*num_nodes_added = 0;
 772
 773	if (!num_nodes)
 774		return status;
 775
 776	if (!parent || layer < hw->sw_entry_point_layer)
 777		return ICE_ERR_PARAM;
 778
 779	/* max children per node per layer */
 780	max_child_nodes = hw->max_children[parent->tx_sched_layer];
 
 781
 782	/* current number of children + required nodes exceed max children ? */
 783	if ((parent->num_children + num_nodes) > max_child_nodes) {
 784		/* Fail if the parent is a TC node */
 785		if (parent == tc_node)
 786			return ICE_ERR_CFG;
 787
 788		/* utilize all the spaces if the parent is not full */
 789		if (parent->num_children < max_child_nodes) {
 790			new_num_nodes = max_child_nodes - parent->num_children;
 791			/* this recursion is intentional, and wouldn't
 792			 * go more than 2 calls
 793			 */
 794			status = ice_sched_add_nodes_to_layer(pi, tc_node,
 795							      parent, layer,
 796							      new_num_nodes,
 797							      first_node_teid,
 798							      &num_added);
 799			if (status)
 800				return status;
 801
 802			*num_nodes_added += num_added;
 803		}
 804		/* Don't modify the first node TEID memory if the first node was
 805		 * added already in the above call. Instead send some temp
 806		 * memory for all other recursive calls.
 807		 */
 808		if (num_added)
 809			first_teid_ptr = &temp;
 810
 811		new_num_nodes = num_nodes - num_added;
 812
 813		/* This parent is full, try the next sibling */
 814		parent = parent->sibling;
 815
 816		/* this recursion is intentional, for 1024 queues
 817		 * per VSI, it goes max of 16 iterations.
 818		 * 1024 / 8 = 128 layer 8 nodes
 819		 * 128 /8 = 16 (add 8 nodes per iteration)
 820		 */
 821		status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
 822						      layer, new_num_nodes,
 823						      first_teid_ptr,
 824						      &num_added);
 825		*num_nodes_added += num_added;
 826		return status;
 827	}
 828
 829	status = ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes,
 830				     num_nodes_added, first_node_teid);
 831	return status;
 832}
 833
 834/**
 835 * ice_sched_get_qgrp_layer - get the current queue group layer number
 836 * @hw: pointer to the HW struct
 837 *
 838 * This function returns the current queue group layer number
 839 */
 840static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw)
 841{
 842	/* It's always total layers - 1, the array is 0 relative so -2 */
 843	return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET;
 844}
 845
 846/**
 847 * ice_sched_get_vsi_layer - get the current VSI layer number
 848 * @hw: pointer to the HW struct
 849 *
 850 * This function returns the current VSI layer number
 851 */
 852static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
 853{
 854	/* Num Layers       VSI layer
 855	 *     9               6
 856	 *     7               4
 857	 *     5 or less       sw_entry_point_layer
 858	 */
 859	/* calculate the VSI layer based on number of layers. */
 860	if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) {
 861		u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
 862
 863		if (layer > hw->sw_entry_point_layer)
 864			return layer;
 865	}
 866	return hw->sw_entry_point_layer;
 867}
 868
 869/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 870 * ice_rm_dflt_leaf_node - remove the default leaf node in the tree
 871 * @pi: port information structure
 872 *
 873 * This function removes the leaf node that was created by the FW
 874 * during initialization
 875 */
 876static void ice_rm_dflt_leaf_node(struct ice_port_info *pi)
 
 877{
 878	struct ice_sched_node *node;
 879
 880	node = pi->root;
 881	while (node) {
 882		if (!node->num_children)
 883			break;
 884		node = node->children[0];
 885	}
 886	if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) {
 887		u32 teid = le32_to_cpu(node->info.node_teid);
 888		enum ice_status status;
 889
 890		/* remove the default leaf node */
 891		status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid);
 892		if (!status)
 893			ice_free_sched_node(pi, node);
 894	}
 895}
 896
 897/**
 898 * ice_sched_rm_dflt_nodes - free the default nodes in the tree
 899 * @pi: port information structure
 900 *
 901 * This function frees all the nodes except root and TC that were created by
 902 * the FW during initialization
 903 */
 904static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
 
 905{
 906	struct ice_sched_node *node;
 907
 908	ice_rm_dflt_leaf_node(pi);
 909
 910	/* remove the default nodes except TC and root nodes */
 911	node = pi->root;
 912	while (node) {
 913		if (node->tx_sched_layer >= pi->hw->sw_entry_point_layer &&
 914		    node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
 915		    node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT) {
 916			ice_free_sched_node(pi, node);
 917			break;
 918		}
 919
 920		if (!node->num_children)
 921			break;
 922		node = node->children[0];
 923	}
 924}
 925
 926/**
 927 * ice_sched_init_port - Initialize scheduler by querying information from FW
 928 * @pi: port info structure for the tree to cleanup
 929 *
 930 * This function is the initial call to find the total number of Tx scheduler
 931 * resources, default topology created by firmware and storing the information
 932 * in SW DB.
 933 */
 934enum ice_status ice_sched_init_port(struct ice_port_info *pi)
 935{
 936	struct ice_aqc_get_topo_elem *buf;
 937	enum ice_status status;
 938	struct ice_hw *hw;
 939	u8 num_branches;
 940	u16 num_elems;
 941	u8 i, j;
 942
 943	if (!pi)
 944		return ICE_ERR_PARAM;
 945	hw = pi->hw;
 946
 947	/* Query the Default Topology from FW */
 948	buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
 
 949	if (!buf)
 950		return ICE_ERR_NO_MEMORY;
 951
 952	/* Query default scheduling tree topology */
 953	status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN,
 
 954				      &num_branches, NULL);
 955	if (status)
 956		goto err_init_port;
 957
 958	/* num_branches should be between 1-8 */
 959	if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) {
 960		ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n",
 961			  num_branches);
 962		status = ICE_ERR_PARAM;
 963		goto err_init_port;
 964	}
 965
 966	/* get the number of elements on the default/first branch */
 967	num_elems = le16_to_cpu(buf[0].hdr.num_elems);
 968
 969	/* num_elems should always be between 1-9 */
 970	if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) {
 971		ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n",
 972			  num_elems);
 973		status = ICE_ERR_PARAM;
 974		goto err_init_port;
 975	}
 976
 977	/* If the last node is a leaf node then the index of the queue group
 978	 * layer is two less than the number of elements.
 979	 */
 980	if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type ==
 981	    ICE_AQC_ELEM_TYPE_LEAF)
 982		pi->last_node_teid =
 983			le32_to_cpu(buf[0].generic[num_elems - 2].node_teid);
 984	else
 985		pi->last_node_teid =
 986			le32_to_cpu(buf[0].generic[num_elems - 1].node_teid);
 987
 988	/* Insert the Tx Sched root node */
 989	status = ice_sched_add_root_node(pi, &buf[0].generic[0]);
 990	if (status)
 991		goto err_init_port;
 992
 993	/* Parse the default tree and cache the information */
 994	for (i = 0; i < num_branches; i++) {
 995		num_elems = le16_to_cpu(buf[i].hdr.num_elems);
 996
 997		/* Skip root element as already inserted */
 998		for (j = 1; j < num_elems; j++) {
 999			/* update the sw entry point */
1000			if (buf[0].generic[j].data.elem_type ==
1001			    ICE_AQC_ELEM_TYPE_ENTRY_POINT)
1002				hw->sw_entry_point_layer = j;
1003
1004			status = ice_sched_add_node(pi, j, &buf[i].generic[j]);
1005			if (status)
1006				goto err_init_port;
1007		}
1008	}
1009
1010	/* Remove the default nodes. */
1011	if (pi->root)
1012		ice_sched_rm_dflt_nodes(pi);
1013
1014	/* initialize the port for handling the scheduler tree */
1015	pi->port_state = ICE_SCHED_PORT_STATE_READY;
1016	mutex_init(&pi->sched_lock);
 
 
1017
1018err_init_port:
1019	if (status && pi->root) {
1020		ice_free_sched_node(pi, pi->root);
1021		pi->root = NULL;
1022	}
1023
1024	devm_kfree(ice_hw_to_dev(hw), buf);
1025	return status;
1026}
1027
1028/**
1029 * ice_sched_query_res_alloc - query the FW for num of logical sched layers
1030 * @hw: pointer to the HW struct
1031 *
1032 * query FW for allocated scheduler resources and store in HW struct
1033 */
1034enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
1035{
1036	struct ice_aqc_query_txsched_res_resp *buf;
1037	enum ice_status status = 0;
1038	__le16 max_sibl;
1039	u16 i;
1040
1041	if (hw->layer_info)
1042		return status;
1043
1044	buf = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*buf), GFP_KERNEL);
1045	if (!buf)
1046		return ICE_ERR_NO_MEMORY;
1047
1048	status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL);
1049	if (status)
1050		goto sched_query_out;
1051
1052	hw->num_tx_sched_layers = le16_to_cpu(buf->sched_props.logical_levels);
1053	hw->num_tx_sched_phys_layers =
1054		le16_to_cpu(buf->sched_props.phys_levels);
1055	hw->flattened_layers = buf->sched_props.flattening_bitmap;
1056	hw->max_cgds = buf->sched_props.max_pf_cgds;
1057
1058	/* max sibling group size of current layer refers to the max children
1059	 * of the below layer node.
1060	 * layer 1 node max children will be layer 2 max sibling group size
1061	 * layer 2 node max children will be layer 3 max sibling group size
1062	 * and so on. This array will be populated from root (index 0) to
1063	 * qgroup layer 7. Leaf node has no children.
1064	 */
1065	for (i = 0; i < hw->num_tx_sched_layers; i++) {
1066		max_sibl = buf->layer_props[i].max_sibl_grp_sz;
1067		hw->max_children[i] = le16_to_cpu(max_sibl);
1068	}
1069
1070	hw->layer_info = devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props,
1071				      (hw->num_tx_sched_layers *
1072				       sizeof(*hw->layer_info)),
1073				      GFP_KERNEL);
1074	if (!hw->layer_info) {
1075		status = ICE_ERR_NO_MEMORY;
1076		goto sched_query_out;
1077	}
1078
1079sched_query_out:
1080	devm_kfree(ice_hw_to_dev(hw), buf);
1081	return status;
1082}
1083
1084/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1085 * ice_sched_find_node_in_subtree - Find node in part of base node subtree
1086 * @hw: pointer to the HW struct
1087 * @base: pointer to the base node
1088 * @node: pointer to the node to search
1089 *
1090 * This function checks whether a given node is part of the base node
1091 * subtree or not
1092 */
1093static bool
1094ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
1095			       struct ice_sched_node *node)
1096{
1097	u8 i;
1098
1099	for (i = 0; i < base->num_children; i++) {
1100		struct ice_sched_node *child = base->children[i];
1101
1102		if (node == child)
1103			return true;
1104
1105		if (child->tx_sched_layer > node->tx_sched_layer)
1106			return false;
1107
1108		/* this recursion is intentional, and wouldn't
1109		 * go more than 8 calls
1110		 */
1111		if (ice_sched_find_node_in_subtree(hw, child, node))
1112			return true;
1113	}
1114	return false;
1115}
1116
1117/**
1118 * ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node
1119 * @pi: port information structure
1120 * @vsi_handle: software VSI handle
1121 * @tc: branch number
1122 * @owner: LAN or RDMA
1123 *
1124 * This function retrieves a free LAN or RDMA queue group node
1125 */
1126struct ice_sched_node *
1127ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
1128			   u8 owner)
1129{
1130	struct ice_sched_node *vsi_node, *qgrp_node = NULL;
1131	struct ice_vsi_ctx *vsi_ctx;
1132	u16 max_children;
1133	u8 qgrp_layer;
1134
1135	qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
1136	max_children = pi->hw->max_children[qgrp_layer];
 
 
 
 
 
 
1137
1138	vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
1139	if (!vsi_ctx)
1140		return NULL;
1141	vsi_node = vsi_ctx->sched.vsi_node[tc];
1142	/* validate invalid VSI ID */
1143	if (!vsi_node)
1144		goto lan_q_exit;
1145
1146	/* get the first queue group node from VSI sub-tree */
1147	qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
1148	while (qgrp_node) {
1149		/* make sure the qgroup node is part of the VSI subtree */
1150		if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
1151			if (qgrp_node->num_children < max_children &&
1152			    qgrp_node->owner == owner)
1153				break;
1154		qgrp_node = qgrp_node->sibling;
1155	}
1156
1157lan_q_exit:
1158	return qgrp_node;
1159}
1160
1161/**
1162 * ice_sched_get_vsi_node - Get a VSI node based on VSI ID
1163 * @hw: pointer to the HW struct
1164 * @tc_node: pointer to the TC node
1165 * @vsi_handle: software VSI handle
1166 *
1167 * This function retrieves a VSI node for a given VSI ID from a given
1168 * TC branch
1169 */
1170static struct ice_sched_node *
1171ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node,
1172		       u16 vsi_handle)
1173{
1174	struct ice_sched_node *node;
1175	u8 vsi_layer;
1176
1177	vsi_layer = ice_sched_get_vsi_layer(hw);
1178	node = ice_sched_get_first_node(hw->port_info, tc_node, vsi_layer);
1179
1180	/* Check whether it already exists */
1181	while (node) {
1182		if (node->vsi_handle == vsi_handle)
1183			return node;
1184		node = node->sibling;
1185	}
1186
1187	return node;
1188}
1189
1190/**
1191 * ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes
1192 * @hw: pointer to the HW struct
1193 * @num_qs: number of queues
1194 * @num_nodes: num nodes array
1195 *
1196 * This function calculates the number of VSI child nodes based on the
1197 * number of queues.
1198 */
1199static void
1200ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
1201{
1202	u16 num = num_qs;
1203	u8 i, qgl, vsil;
1204
1205	qgl = ice_sched_get_qgrp_layer(hw);
1206	vsil = ice_sched_get_vsi_layer(hw);
1207
1208	/* calculate num nodes from queue group to VSI layer */
1209	for (i = qgl; i > vsil; i--) {
 
 
1210		/* round to the next integer if there is a remainder */
1211		num = DIV_ROUND_UP(num, hw->max_children[i]);
1212
1213		/* need at least one node */
1214		num_nodes[i] = num ? num : 1;
1215	}
1216}
1217
1218/**
1219 * ice_sched_add_vsi_child_nodes - add VSI child nodes to tree
1220 * @pi: port information structure
1221 * @vsi_handle: software VSI handle
1222 * @tc_node: pointer to the TC node
1223 * @num_nodes: pointer to the num nodes that needs to be added per layer
1224 * @owner: node owner (LAN or RDMA)
1225 *
1226 * This function adds the VSI child nodes to tree. It gets called for
1227 * LAN and RDMA separately.
1228 */
1229static enum ice_status
1230ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
1231			      struct ice_sched_node *tc_node, u16 *num_nodes,
1232			      u8 owner)
1233{
1234	struct ice_sched_node *parent, *node;
1235	struct ice_hw *hw = pi->hw;
1236	enum ice_status status;
1237	u32 first_node_teid;
1238	u16 num_added = 0;
1239	u8 i, qgl, vsil;
1240
 
 
 
 
1241	qgl = ice_sched_get_qgrp_layer(hw);
1242	vsil = ice_sched_get_vsi_layer(hw);
1243	parent = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
1244	for (i = vsil + 1; i <= qgl; i++) {
1245		if (!parent)
1246			return ICE_ERR_CFG;
1247
1248		status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
1249						      num_nodes[i],
1250						      &first_node_teid,
1251						      &num_added);
1252		if (status || num_nodes[i] != num_added)
1253			return ICE_ERR_CFG;
1254
1255		/* The newly added node can be a new parent for the next
1256		 * layer nodes
1257		 */
1258		if (num_added) {
1259			parent = ice_sched_find_node_by_teid(tc_node,
1260							     first_node_teid);
1261			node = parent;
1262			while (node) {
1263				node->owner = owner;
1264				node = node->sibling;
1265			}
1266		} else {
1267			parent = parent->children[0];
1268		}
1269	}
1270
1271	return 0;
1272}
1273
1274/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1275 * ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes
1276 * @hw: pointer to the HW struct
1277 * @tc_node: pointer to TC node
1278 * @num_nodes: pointer to num nodes array
1279 *
1280 * This function calculates the number of supported nodes needed to add this
1281 * VSI into Tx tree including the VSI, parent and intermediate nodes in below
1282 * layers
1283 */
1284static void
1285ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
1286				 struct ice_sched_node *tc_node, u16 *num_nodes)
1287{
1288	struct ice_sched_node *node;
1289	u8 vsil;
1290	int i;
1291
1292	vsil = ice_sched_get_vsi_layer(hw);
1293	for (i = vsil; i >= hw->sw_entry_point_layer; i--)
1294		/* Add intermediate nodes if TC has no children and
1295		 * need at least one node for VSI
1296		 */
1297		if (!tc_node->num_children || i == vsil) {
1298			num_nodes[i]++;
1299		} else {
1300			/* If intermediate nodes are reached max children
1301			 * then add a new one.
1302			 */
1303			node = ice_sched_get_first_node(hw->port_info, tc_node,
1304							(u8)i);
 
1305			/* scan all the siblings */
1306			while (node) {
1307				if (node->num_children < hw->max_children[i])
1308					break;
1309				node = node->sibling;
1310			}
1311
1312			/* tree has one intermediate node to add this new VSI.
1313			 * So no need to calculate supported nodes for below
1314			 * layers.
1315			 */
1316			if (node)
1317				break;
1318			/* all the nodes are full, allocate a new one */
1319			num_nodes[i]++;
 
1320		}
1321}
1322
1323/**
1324 * ice_sched_add_vsi_support_nodes - add VSI supported nodes into Tx tree
1325 * @pi: port information structure
1326 * @vsi_handle: software VSI handle
1327 * @tc_node: pointer to TC node
1328 * @num_nodes: pointer to num nodes array
1329 *
1330 * This function adds the VSI supported nodes into Tx tree including the
1331 * VSI, its parent and intermediate nodes in below layers
1332 */
1333static enum ice_status
1334ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
1335				struct ice_sched_node *tc_node, u16 *num_nodes)
1336{
1337	struct ice_sched_node *parent = tc_node;
1338	enum ice_status status;
1339	u32 first_node_teid;
1340	u16 num_added = 0;
1341	u8 i, vsil;
1342
1343	if (!pi)
1344		return ICE_ERR_PARAM;
1345
 
 
 
 
1346	vsil = ice_sched_get_vsi_layer(pi->hw);
1347	for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
1348		status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
1349						      i, num_nodes[i],
1350						      &first_node_teid,
1351						      &num_added);
1352		if (status || num_nodes[i] != num_added)
1353			return ICE_ERR_CFG;
1354
1355		/* The newly added node can be a new parent for the next
1356		 * layer nodes
1357		 */
1358		if (num_added)
1359			parent = ice_sched_find_node_by_teid(tc_node,
1360							     first_node_teid);
1361		else
1362			parent = parent->children[0];
1363
1364		if (!parent)
1365			return ICE_ERR_CFG;
1366
1367		if (i == vsil)
1368			parent->vsi_handle = vsi_handle;
1369	}
1370
1371	return 0;
1372}
1373
1374/**
1375 * ice_sched_add_vsi_to_topo - add a new VSI into tree
1376 * @pi: port information structure
1377 * @vsi_handle: software VSI handle
1378 * @tc: TC number
1379 *
1380 * This function adds a new VSI into scheduler tree
1381 */
1382static enum ice_status
1383ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
1384{
1385	u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
1386	struct ice_sched_node *tc_node;
1387	struct ice_hw *hw = pi->hw;
1388
1389	tc_node = ice_sched_get_tc_node(pi, tc);
1390	if (!tc_node)
1391		return ICE_ERR_PARAM;
1392
1393	/* calculate number of supported nodes needed for this VSI */
1394	ice_sched_calc_vsi_support_nodes(hw, tc_node, num_nodes);
1395
1396	/* add VSI supported nodes to TC subtree */
1397	return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node,
1398					       num_nodes);
1399}
1400
1401/**
1402 * ice_sched_update_vsi_child_nodes - update VSI child nodes
1403 * @pi: port information structure
1404 * @vsi_handle: software VSI handle
1405 * @tc: TC number
1406 * @new_numqs: new number of max queues
1407 * @owner: owner of this subtree
1408 *
1409 * This function updates the VSI child nodes based on the number of queues
1410 */
1411static enum ice_status
1412ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
1413				 u8 tc, u16 new_numqs, u8 owner)
1414{
 
1415	u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
1416	struct ice_sched_node *vsi_node;
1417	struct ice_sched_node *tc_node;
1418	struct ice_vsi_ctx *vsi_ctx;
1419	enum ice_status status = 0;
1420	struct ice_hw *hw = pi->hw;
1421	u16 prev_numqs;
 
1422
1423	tc_node = ice_sched_get_tc_node(pi, tc);
1424	if (!tc_node)
1425		return ICE_ERR_CFG;
1426
1427	vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
1428	if (!vsi_node)
1429		return ICE_ERR_CFG;
1430
1431	vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1432	if (!vsi_ctx)
 
 
 
 
 
1433		return ICE_ERR_PARAM;
1434
1435	prev_numqs = vsi_ctx->sched.max_lanq[tc];
1436	/* num queues are not changed or less than the previous number */
1437	if (new_numqs <= prev_numqs)
1438		return status;
1439	status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);
1440	if (status)
1441		return status;
 
 
 
 
1442
1443	if (new_numqs)
1444		ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
1445	/* Keep the max number of queue configuration all the time. Update the
1446	 * tree only if number of queues > previous number of queues. This may
1447	 * leave some extra nodes in the tree if number of queues < previous
1448	 * number but that wouldn't harm anything. Removing those extra nodes
1449	 * may complicate the code if those nodes are part of SRL or
1450	 * individually rate limited.
1451	 */
1452	status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
1453					       new_num_nodes, owner);
1454	if (status)
1455		return status;
1456	vsi_ctx->sched.max_lanq[tc] = new_numqs;
1457
1458	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1459}
1460
1461/**
1462 * ice_sched_cfg_vsi - configure the new/existing VSI
1463 * @pi: port information structure
1464 * @vsi_handle: software VSI handle
1465 * @tc: TC number
1466 * @maxqs: max number of queues
1467 * @owner: LAN or RDMA
1468 * @enable: TC enabled or disabled
1469 *
1470 * This function adds/updates VSI nodes based on the number of queues. If TC is
1471 * enabled and VSI is in suspended state then resume the VSI back. If TC is
1472 * disabled then suspend the VSI if it is not already.
1473 */
1474enum ice_status
1475ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
1476		  u8 owner, bool enable)
1477{
1478	struct ice_sched_node *vsi_node, *tc_node;
1479	struct ice_vsi_ctx *vsi_ctx;
1480	enum ice_status status = 0;
1481	struct ice_hw *hw = pi->hw;
1482
1483	ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle);
1484	tc_node = ice_sched_get_tc_node(pi, tc);
1485	if (!tc_node)
1486		return ICE_ERR_PARAM;
1487	vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1488	if (!vsi_ctx)
1489		return ICE_ERR_PARAM;
1490	vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
1491
1492	/* suspend the VSI if TC is not enabled */
 
 
 
 
 
 
 
 
1493	if (!enable) {
1494		if (vsi_node && vsi_node->in_use) {
1495			u32 teid = le32_to_cpu(vsi_node->info.node_teid);
1496
1497			status = ice_sched_suspend_resume_elems(hw, 1, &teid,
1498								true);
1499			if (!status)
1500				vsi_node->in_use = false;
1501		}
1502		return status;
1503	}
1504
1505	/* TC is enabled, if it is a new VSI then add it to the tree */
1506	if (!vsi_node) {
1507		status = ice_sched_add_vsi_to_topo(pi, vsi_handle, tc);
1508		if (status)
1509			return status;
1510
1511		vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
1512		if (!vsi_node)
1513			return ICE_ERR_CFG;
1514
1515		vsi_ctx->sched.vsi_node[tc] = vsi_node;
1516		vsi_node->in_use = true;
1517		/* invalidate the max queues whenever VSI gets added first time
1518		 * into the scheduler tree (boot or after reset). We need to
1519		 * recreate the child nodes all the time in these cases.
1520		 */
1521		vsi_ctx->sched.max_lanq[tc] = 0;
1522	}
1523
1524	/* update the VSI child nodes */
1525	status = ice_sched_update_vsi_child_nodes(pi, vsi_handle, tc, maxqs,
1526						  owner);
1527	if (status)
1528		return status;
1529
1530	/* TC is enabled, resume the VSI if it is in the suspend state */
1531	if (!vsi_node->in_use) {
1532		u32 teid = le32_to_cpu(vsi_node->info.node_teid);
1533
1534		status = ice_sched_suspend_resume_elems(hw, 1, &teid, false);
1535		if (!status)
1536			vsi_node->in_use = true;
1537	}
1538
1539	return status;
1540}
1541
1542/**
1543 * ice_sched_rm_agg_vsi_entry - remove aggregator related VSI info entry
1544 * @pi: port information structure
1545 * @vsi_handle: software VSI handle
1546 *
1547 * This function removes single aggregator VSI info entry from
1548 * aggregator list.
1549 */
1550static void
1551ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
1552{
1553	struct ice_sched_agg_info *agg_info;
1554	struct ice_sched_agg_info *atmp;
1555
1556	list_for_each_entry_safe(agg_info, atmp, &pi->hw->agg_list,
1557				 list_entry) {
1558		struct ice_sched_agg_vsi_info *agg_vsi_info;
1559		struct ice_sched_agg_vsi_info *vtmp;
1560
1561		list_for_each_entry_safe(agg_vsi_info, vtmp,
1562					 &agg_info->agg_vsi_list, list_entry)
1563			if (agg_vsi_info->vsi_handle == vsi_handle) {
1564				list_del(&agg_vsi_info->list_entry);
1565				devm_kfree(ice_hw_to_dev(pi->hw),
1566					   agg_vsi_info);
1567				return;
1568			}
1569	}
1570}
1571
1572/**
1573 * ice_sched_is_leaf_node_present - check for a leaf node in the sub-tree
1574 * @node: pointer to the sub-tree node
1575 *
1576 * This function checks for a leaf node presence in a given sub-tree node.
1577 */
1578static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node)
1579{
1580	u8 i;
1581
1582	for (i = 0; i < node->num_children; i++)
1583		if (ice_sched_is_leaf_node_present(node->children[i]))
1584			return true;
1585	/* check for a leaf node */
1586	return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF);
1587}
1588
1589/**
1590 * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes
1591 * @pi: port information structure
1592 * @vsi_handle: software VSI handle
1593 * @owner: LAN or RDMA
1594 *
1595 * This function removes the VSI and its LAN or RDMA children nodes from the
1596 * scheduler tree.
1597 */
1598static enum ice_status
1599ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
1600{
1601	enum ice_status status = ICE_ERR_PARAM;
1602	struct ice_vsi_ctx *vsi_ctx;
1603	u8 i;
1604
1605	ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle);
1606	if (!ice_is_vsi_valid(pi->hw, vsi_handle))
1607		return status;
1608	mutex_lock(&pi->sched_lock);
1609	vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
1610	if (!vsi_ctx)
1611		goto exit_sched_rm_vsi_cfg;
1612
1613	ice_for_each_traffic_class(i) {
1614		struct ice_sched_node *vsi_node, *tc_node;
1615		u8 j = 0;
1616
1617		tc_node = ice_sched_get_tc_node(pi, i);
1618		if (!tc_node)
1619			continue;
1620
1621		vsi_node = ice_sched_get_vsi_node(pi->hw, tc_node, vsi_handle);
1622		if (!vsi_node)
1623			continue;
1624
1625		if (ice_sched_is_leaf_node_present(vsi_node)) {
1626			ice_debug(pi->hw, ICE_DBG_SCHED,
1627				  "VSI has leaf nodes in TC %d\n", i);
1628			status = ICE_ERR_IN_USE;
1629			goto exit_sched_rm_vsi_cfg;
1630		}
1631		while (j < vsi_node->num_children) {
1632			if (vsi_node->children[j]->owner == owner) {
1633				ice_free_sched_node(pi, vsi_node->children[j]);
1634
1635				/* reset the counter again since the num
1636				 * children will be updated after node removal
1637				 */
1638				j = 0;
1639			} else {
1640				j++;
1641			}
1642		}
1643		/* remove the VSI if it has no children */
1644		if (!vsi_node->num_children) {
1645			ice_free_sched_node(pi, vsi_node);
1646			vsi_ctx->sched.vsi_node[i] = NULL;
1647
1648			/* clean up aggregator related VSI info if any */
1649			ice_sched_rm_agg_vsi_info(pi, vsi_handle);
1650		}
1651		if (owner == ICE_SCHED_NODE_OWNER_LAN)
1652			vsi_ctx->sched.max_lanq[i] = 0;
1653	}
1654	status = 0;
1655
1656exit_sched_rm_vsi_cfg:
1657	mutex_unlock(&pi->sched_lock);
1658	return status;
1659}
1660
1661/**
1662 * ice_rm_vsi_lan_cfg - remove VSI and its LAN children nodes
1663 * @pi: port information structure
1664 * @vsi_handle: software VSI handle
1665 *
1666 * This function clears the VSI and its LAN children nodes from scheduler tree
1667 * for all TCs.
1668 */
1669enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
1670{
1671	return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
1672}