Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.4.
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright(c) 2023 Intel Corporation */
   3
   4#define dev_fmt(fmt) "RateLimiting: " fmt
   5
   6#include <asm/errno.h>
   7#include <asm/div64.h>
   8
   9#include <linux/dev_printk.h>
  10#include <linux/kernel.h>
  11#include <linux/pci.h>
  12#include <linux/slab.h>
  13#include <linux/units.h>
  14
  15#include "adf_accel_devices.h"
  16#include "adf_common_drv.h"
  17#include "adf_rl_admin.h"
  18#include "adf_rl.h"
  19#include "adf_sysfs_rl.h"
  20
  21#define RL_TOKEN_GRANULARITY_PCIEIN_BUCKET	0U
  22#define RL_TOKEN_GRANULARITY_PCIEOUT_BUCKET	0U
  23#define RL_TOKEN_PCIE_SIZE			64
  24#define RL_TOKEN_ASYM_SIZE			1024
  25#define RL_CSR_SIZE				4U
  26#define RL_CAPABILITY_MASK			GENMASK(6, 4)
  27#define RL_CAPABILITY_VALUE			0x70
  28#define RL_VALIDATE_NON_ZERO(input)		((input) == 0)
  29#define ROOT_MASK				GENMASK(1, 0)
  30#define CLUSTER_MASK				GENMASK(3, 0)
  31#define LEAF_MASK				GENMASK(5, 0)
  32
  33static int validate_user_input(struct adf_accel_dev *accel_dev,
  34			       struct adf_rl_sla_input_data *sla_in,
  35			       bool is_update)
  36{
  37	const unsigned long rp_mask = sla_in->rp_mask;
  38	size_t rp_mask_size;
  39	int i, cnt;
  40
  41	if (sla_in->pir < sla_in->cir) {
  42		dev_notice(&GET_DEV(accel_dev),
  43			   "PIR must be >= CIR, setting PIR to CIR\n");
  44		sla_in->pir = sla_in->cir;
  45	}
  46
  47	if (!is_update) {
  48		cnt = 0;
  49		rp_mask_size = sizeof(sla_in->rp_mask) * BITS_PER_BYTE;
  50		for_each_set_bit(i, &rp_mask, rp_mask_size) {
  51			if (++cnt > RL_RP_CNT_PER_LEAF_MAX) {
  52				dev_notice(&GET_DEV(accel_dev),
  53					   "Too many ring pairs selected for this SLA\n");
  54				return -EINVAL;
  55			}
  56		}
  57
  58		if (sla_in->srv >= ADF_SVC_NONE) {
  59			dev_notice(&GET_DEV(accel_dev),
  60				   "Wrong service type\n");
  61			return -EINVAL;
  62		}
  63
  64		if (sla_in->type > RL_LEAF) {
  65			dev_notice(&GET_DEV(accel_dev),
  66				   "Wrong node type\n");
  67			return -EINVAL;
  68		}
  69
  70		if (sla_in->parent_id < RL_PARENT_DEFAULT_ID ||
  71		    sla_in->parent_id >= RL_NODES_CNT_MAX) {
  72			dev_notice(&GET_DEV(accel_dev),
  73				   "Wrong parent ID\n");
  74			return -EINVAL;
  75		}
  76	}
  77
  78	return 0;
  79}
  80
  81static int validate_sla_id(struct adf_accel_dev *accel_dev, int sla_id)
  82{
  83	struct rl_sla *sla;
  84
  85	if (sla_id <= RL_SLA_EMPTY_ID || sla_id >= RL_NODES_CNT_MAX) {
  86		dev_notice(&GET_DEV(accel_dev), "Provided ID is out of bounds\n");
  87		return -EINVAL;
  88	}
  89
  90	sla = accel_dev->rate_limiting->sla[sla_id];
  91
  92	if (!sla) {
  93		dev_notice(&GET_DEV(accel_dev), "SLA with provided ID does not exist\n");
  94		return -EINVAL;
  95	}
  96
  97	if (sla->type != RL_LEAF) {
  98		dev_notice(&GET_DEV(accel_dev), "This ID is reserved for internal use\n");
  99		return -EINVAL;
 100	}
 101
 102	return 0;
 103}
 104
 105/**
 106 * find_parent() - Find the parent for a new SLA
 107 * @rl_data: pointer to ratelimiting data
 108 * @sla_in: pointer to user input data for a new SLA
 109 *
 110 * Function returns a pointer to the parent SLA. If the parent ID is provided
 111 * as input in the user data, then such ID is validated and the parent SLA
 112 * is returned.
 113 * Otherwise, it returns the default parent SLA (root or cluster) for
 114 * the new object.
 115 *
 116 * Return:
 117 * * Pointer to the parent SLA object
 118 * * NULL - when parent cannot be found
 119 */
 120static struct rl_sla *find_parent(struct adf_rl *rl_data,
 121				  struct adf_rl_sla_input_data *sla_in)
 122{
 123	int input_parent_id = sla_in->parent_id;
 124	struct rl_sla *root = NULL;
 125	struct rl_sla *parent_sla;
 126	int i;
 127
 128	if (sla_in->type == RL_ROOT)
 129		return NULL;
 130
 131	if (input_parent_id > RL_PARENT_DEFAULT_ID) {
 132		parent_sla = rl_data->sla[input_parent_id];
 133		/*
 134		 * SLA can be a parent if it has the same service as the child
 135		 * and its type is higher in the hierarchy,
 136		 * for example the parent type of a LEAF must be a CLUSTER.
 137		 */
 138		if (parent_sla && parent_sla->srv == sla_in->srv &&
 139		    parent_sla->type == sla_in->type - 1)
 140			return parent_sla;
 141
 142		return NULL;
 143	}
 144
 145	/* If input_parent_id is not valid, get root for this service type. */
 146	for (i = 0; i < RL_ROOT_MAX; i++) {
 147		if (rl_data->root[i] && rl_data->root[i]->srv == sla_in->srv) {
 148			root = rl_data->root[i];
 149			break;
 150		}
 151	}
 152
 153	if (!root)
 154		return NULL;
 155
 156	/*
 157	 * If the type of this SLA is cluster, then return the root.
 158	 * Otherwise, find the default (i.e. first) cluster for this service.
 159	 */
 160	if (sla_in->type == RL_CLUSTER)
 161		return root;
 162
 163	for (i = 0; i < RL_CLUSTER_MAX; i++) {
 164		if (rl_data->cluster[i] && rl_data->cluster[i]->parent == root)
 165			return rl_data->cluster[i];
 166	}
 167
 168	return NULL;
 169}
 170
 171static enum adf_cfg_service_type srv_to_cfg_svc_type(enum adf_base_services rl_srv)
 172{
 173	switch (rl_srv) {
 174	case ADF_SVC_ASYM:
 175		return ASYM;
 176	case ADF_SVC_SYM:
 177		return SYM;
 178	case ADF_SVC_DC:
 179		return COMP;
 180	default:
 181		return UNUSED;
 182	}
 183}
 184
 185/**
 186 * get_sla_arr_of_type() - Returns a pointer to SLA type specific array
 187 * @rl_data: pointer to ratelimiting data
 188 * @type: SLA type
 189 * @sla_arr: pointer to variable where requested pointer will be stored
 190 *
 191 * Return: Max number of elements allowed for the returned array
 192 */
 193static u32 get_sla_arr_of_type(struct adf_rl *rl_data, enum rl_node_type type,
 194			       struct rl_sla ***sla_arr)
 195{
 196	switch (type) {
 197	case RL_LEAF:
 198		*sla_arr = rl_data->leaf;
 199		return RL_LEAF_MAX;
 200	case RL_CLUSTER:
 201		*sla_arr = rl_data->cluster;
 202		return RL_CLUSTER_MAX;
 203	case RL_ROOT:
 204		*sla_arr = rl_data->root;
 205		return RL_ROOT_MAX;
 206	default:
 207		*sla_arr = NULL;
 208		return 0;
 209	}
 210}
 211
 212static bool is_service_enabled(struct adf_accel_dev *accel_dev,
 213			       enum adf_base_services rl_srv)
 214{
 215	enum adf_cfg_service_type arb_srv = srv_to_cfg_svc_type(rl_srv);
 216	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
 217	u8 rps_per_bundle = hw_data->num_banks_per_vf;
 218	int i;
 219
 220	for (i = 0; i < rps_per_bundle; i++) {
 221		if (GET_SRV_TYPE(accel_dev, i) == arb_srv)
 222			return true;
 223	}
 224
 225	return false;
 226}
 227
 228/**
 229 * prepare_rp_ids() - Creates an array of ring pair IDs from bitmask
 230 * @accel_dev: pointer to acceleration device structure
 231 * @sla: SLA object data where result will be written
 232 * @rp_mask: bitmask of ring pair IDs
 233 *
 234 * Function tries to convert provided bitmap to an array of IDs. It checks if
 235 * RPs aren't in use, are assigned to SLA  service or if a number of provided
 236 * IDs is not too big. If successful, writes the result into the field
 237 * sla->ring_pairs_cnt.
 238 *
 239 * Return:
 240 * * 0		- ok
 241 * * -EINVAL	- ring pairs array cannot be created from provided mask
 242 */
 243static int prepare_rp_ids(struct adf_accel_dev *accel_dev, struct rl_sla *sla,
 244			  const unsigned long rp_mask)
 245{
 246	enum adf_cfg_service_type arb_srv = srv_to_cfg_svc_type(sla->srv);
 247	u16 rps_per_bundle = GET_HW_DATA(accel_dev)->num_banks_per_vf;
 248	bool *rp_in_use = accel_dev->rate_limiting->rp_in_use;
 249	size_t rp_cnt_max = ARRAY_SIZE(sla->ring_pairs_ids);
 250	u16 rp_id_max = GET_HW_DATA(accel_dev)->num_banks;
 251	u16 cnt = 0;
 252	u16 rp_id;
 253
 254	for_each_set_bit(rp_id, &rp_mask, rp_id_max) {
 255		if (cnt >= rp_cnt_max) {
 256			dev_notice(&GET_DEV(accel_dev),
 257				   "Assigned more ring pairs than supported");
 258			return -EINVAL;
 259		}
 260
 261		if (rp_in_use[rp_id]) {
 262			dev_notice(&GET_DEV(accel_dev),
 263				   "RP %u already assigned to other SLA", rp_id);
 264			return -EINVAL;
 265		}
 266
 267		if (GET_SRV_TYPE(accel_dev, rp_id % rps_per_bundle) != arb_srv) {
 268			dev_notice(&GET_DEV(accel_dev),
 269				   "RP %u does not support SLA service", rp_id);
 270			return -EINVAL;
 271		}
 272
 273		sla->ring_pairs_ids[cnt++] = rp_id;
 274	}
 275
 276	sla->ring_pairs_cnt = cnt;
 277
 278	return 0;
 279}
 280
 281static void mark_rps_usage(struct rl_sla *sla, bool *rp_in_use, bool used)
 282{
 283	u16 rp_id;
 284	int i;
 285
 286	for (i = 0; i < sla->ring_pairs_cnt; i++) {
 287		rp_id = sla->ring_pairs_ids[i];
 288		rp_in_use[rp_id] = used;
 289	}
 290}
 291
 292static void assign_rps_to_leaf(struct adf_accel_dev *accel_dev,
 293			       struct rl_sla *sla, bool clear)
 294{
 295	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
 296	void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
 297	u32 base_offset = hw_data->rl_data.r2l_offset;
 298	u32 node_id = clear ? 0U : (sla->node_id & LEAF_MASK);
 299	u32 offset;
 300	int i;
 301
 302	for (i = 0; i < sla->ring_pairs_cnt; i++) {
 303		offset = base_offset + (RL_CSR_SIZE * sla->ring_pairs_ids[i]);
 304		ADF_CSR_WR(pmisc_addr, offset, node_id);
 305	}
 306}
 307
 308static void assign_leaf_to_cluster(struct adf_accel_dev *accel_dev,
 309				   struct rl_sla *sla, bool clear)
 310{
 311	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
 312	void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
 313	u32 base_offset = hw_data->rl_data.l2c_offset;
 314	u32 node_id = sla->node_id & LEAF_MASK;
 315	u32 parent_id = clear ? 0U : (sla->parent->node_id & CLUSTER_MASK);
 316	u32 offset;
 317
 318	offset = base_offset + (RL_CSR_SIZE * node_id);
 319	ADF_CSR_WR(pmisc_addr, offset, parent_id);
 320}
 321
 322static void assign_cluster_to_root(struct adf_accel_dev *accel_dev,
 323				   struct rl_sla *sla, bool clear)
 324{
 325	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
 326	void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
 327	u32 base_offset = hw_data->rl_data.c2s_offset;
 328	u32 node_id = sla->node_id & CLUSTER_MASK;
 329	u32 parent_id = clear ? 0U : (sla->parent->node_id & ROOT_MASK);
 330	u32 offset;
 331
 332	offset = base_offset + (RL_CSR_SIZE * node_id);
 333	ADF_CSR_WR(pmisc_addr, offset, parent_id);
 334}
 335
 336static void assign_node_to_parent(struct adf_accel_dev *accel_dev,
 337				  struct rl_sla *sla, bool clear_assignment)
 338{
 339	switch (sla->type) {
 340	case RL_LEAF:
 341		assign_rps_to_leaf(accel_dev, sla, clear_assignment);
 342		assign_leaf_to_cluster(accel_dev, sla, clear_assignment);
 343		break;
 344	case RL_CLUSTER:
 345		assign_cluster_to_root(accel_dev, sla, clear_assignment);
 346		break;
 347	default:
 348		break;
 349	}
 350}
 351
 352/**
 353 * can_parent_afford_sla() - Verifies if parent allows to create an SLA
 354 * @sla_in: pointer to user input data for a new SLA
 355 * @sla_parent: pointer to parent SLA object
 356 * @sla_cir: current child CIR value (only for update)
 357 * @is_update: request is a update
 358 *
 359 * Algorithm verifies if parent has enough remaining budget to take assignment
 360 * of a child with provided parameters. In update case current CIR value must be
 361 * returned to budget first.
 362 * PIR value cannot exceed the PIR assigned to parent.
 363 *
 364 * Return:
 365 * * true	- SLA can be created
 366 * * false	- SLA cannot be created
 367 */
 368static bool can_parent_afford_sla(struct adf_rl_sla_input_data *sla_in,
 369				  struct rl_sla *sla_parent, u32 sla_cir,
 370				  bool is_update)
 371{
 372	u32 rem_cir = sla_parent->rem_cir;
 373
 374	if (is_update)
 375		rem_cir += sla_cir;
 376
 377	if (sla_in->cir > rem_cir || sla_in->pir > sla_parent->pir)
 378		return false;
 379
 380	return true;
 381}
 382
 383/**
 384 * can_node_afford_update() - Verifies if SLA can be updated with input data
 385 * @sla_in: pointer to user input data for a new SLA
 386 * @sla: pointer to SLA object selected for update
 387 *
 388 * Algorithm verifies if a new CIR value is big enough to satisfy currently
 389 * assigned child SLAs and if PIR can be updated
 390 *
 391 * Return:
 392 * * true	- SLA can be updated
 393 * * false	- SLA cannot be updated
 394 */
 395static bool can_node_afford_update(struct adf_rl_sla_input_data *sla_in,
 396				   struct rl_sla *sla)
 397{
 398	u32 cir_in_use = sla->cir - sla->rem_cir;
 399
 400	/* new CIR cannot be smaller then currently consumed value */
 401	if (cir_in_use > sla_in->cir)
 402		return false;
 403
 404	/* PIR of root/cluster cannot be reduced in node with assigned children */
 405	if (sla_in->pir < sla->pir && sla->type != RL_LEAF && cir_in_use > 0)
 406		return false;
 407
 408	return true;
 409}
 410
 411static bool is_enough_budget(struct adf_rl *rl_data, struct rl_sla *sla,
 412			     struct adf_rl_sla_input_data *sla_in,
 413			     bool is_update)
 414{
 415	u32 max_val = rl_data->device_data->scale_ref;
 416	struct rl_sla *parent = sla->parent;
 417	bool ret = true;
 418
 419	if (sla_in->cir > max_val || sla_in->pir > max_val)
 420		ret = false;
 421
 422	switch (sla->type) {
 423	case RL_LEAF:
 424		ret &= can_parent_afford_sla(sla_in, parent, sla->cir,
 425						  is_update);
 426		break;
 427	case RL_CLUSTER:
 428		ret &= can_parent_afford_sla(sla_in, parent, sla->cir,
 429						  is_update);
 430
 431		if (is_update)
 432			ret &= can_node_afford_update(sla_in, sla);
 433
 434		break;
 435	case RL_ROOT:
 436		if (is_update)
 437			ret &= can_node_afford_update(sla_in, sla);
 438
 439		break;
 440	default:
 441		ret = false;
 442		break;
 443	}
 444
 445	return ret;
 446}
 447
 448static void update_budget(struct rl_sla *sla, u32 old_cir, bool is_update)
 449{
 450	switch (sla->type) {
 451	case RL_LEAF:
 452		if (is_update)
 453			sla->parent->rem_cir += old_cir;
 454
 455		sla->parent->rem_cir -= sla->cir;
 456		sla->rem_cir = 0;
 457		break;
 458	case RL_CLUSTER:
 459		if (is_update) {
 460			sla->parent->rem_cir += old_cir;
 461			sla->rem_cir = sla->cir - (old_cir - sla->rem_cir);
 462		} else {
 463			sla->rem_cir = sla->cir;
 464		}
 465
 466		sla->parent->rem_cir -= sla->cir;
 467		break;
 468	case RL_ROOT:
 469		if (is_update)
 470			sla->rem_cir = sla->cir - (old_cir - sla->rem_cir);
 471		else
 472			sla->rem_cir = sla->cir;
 473		break;
 474	default:
 475		break;
 476	}
 477}
 478
 479/**
 480 * get_next_free_sla_id() - finds next free ID in the SLA array
 481 * @rl_data: Pointer to ratelimiting data structure
 482 *
 483 * Return:
 484 * * 0 : RL_NODES_CNT_MAX	- correct ID
 485 * * -ENOSPC			- all SLA slots are in use
 486 */
 487static int get_next_free_sla_id(struct adf_rl *rl_data)
 488{
 489	int i = 0;
 490
 491	while (i < RL_NODES_CNT_MAX && rl_data->sla[i++])
 492		;
 493
 494	if (i == RL_NODES_CNT_MAX)
 495		return -ENOSPC;
 496
 497	return i - 1;
 498}
 499
 500/**
 501 * get_next_free_node_id() - finds next free ID in the array of that node type
 502 * @rl_data: Pointer to ratelimiting data structure
 503 * @sla: Pointer to SLA object for which the ID is searched
 504 *
 505 * Return:
 506 * * 0 : RL_[NODE_TYPE]_MAX	- correct ID
 507 * * -ENOSPC			- all slots of that type are in use
 508 */
 509static int get_next_free_node_id(struct adf_rl *rl_data, struct rl_sla *sla)
 510{
 511	struct adf_hw_device_data *hw_device = GET_HW_DATA(rl_data->accel_dev);
 512	int max_id, i, step, rp_per_leaf;
 513	struct rl_sla **sla_list;
 514
 515	rp_per_leaf = hw_device->num_banks / hw_device->num_banks_per_vf;
 516
 517	/*
 518	 * Static nodes mapping:
 519	 * root0 - cluster[0,4,8,12] - leaf[0-15]
 520	 * root1 - cluster[1,5,9,13] - leaf[16-31]
 521	 * root2 - cluster[2,6,10,14] - leaf[32-47]
 522	 */
 523	switch (sla->type) {
 524	case RL_LEAF:
 525		i = sla->srv * rp_per_leaf;
 526		step = 1;
 527		max_id = i + rp_per_leaf;
 528		sla_list = rl_data->leaf;
 529		break;
 530	case RL_CLUSTER:
 531		i = sla->srv;
 532		step = 4;
 533		max_id = RL_CLUSTER_MAX;
 534		sla_list = rl_data->cluster;
 535		break;
 536	case RL_ROOT:
 537		return sla->srv;
 538	default:
 539		return -EINVAL;
 540	}
 541
 542	while (i < max_id && sla_list[i])
 543		i += step;
 544
 545	if (i >= max_id)
 546		return -ENOSPC;
 547
 548	return i;
 549}
 550
 551u32 adf_rl_calculate_slice_tokens(struct adf_accel_dev *accel_dev, u32 sla_val,
 552				  enum adf_base_services svc_type)
 553{
 554	struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data;
 555	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
 556	u64 avail_slice_cycles, allocated_tokens;
 557
 558	if (!sla_val)
 559		return 0;
 560
 561	avail_slice_cycles = hw_data->clock_frequency;
 562
 563	switch (svc_type) {
 564	case ADF_SVC_ASYM:
 565		avail_slice_cycles *= device_data->slices.pke_cnt;
 566		break;
 567	case ADF_SVC_SYM:
 568		avail_slice_cycles *= device_data->slices.cph_cnt;
 569		break;
 570	case ADF_SVC_DC:
 571		avail_slice_cycles *= device_data->slices.dcpr_cnt;
 572		break;
 573	default:
 574		break;
 575	}
 576
 577	do_div(avail_slice_cycles, device_data->scan_interval);
 578	allocated_tokens = avail_slice_cycles * sla_val;
 579	do_div(allocated_tokens, device_data->scale_ref);
 580
 581	return allocated_tokens;
 582}
 583
 584u32 adf_rl_calculate_ae_cycles(struct adf_accel_dev *accel_dev, u32 sla_val,
 585			       enum adf_base_services svc_type)
 586{
 587	struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data;
 588	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
 589	u64 allocated_ae_cycles, avail_ae_cycles;
 590
 591	if (!sla_val)
 592		return 0;
 593
 594	avail_ae_cycles = hw_data->clock_frequency;
 595	avail_ae_cycles *= hw_data->get_num_aes(hw_data) - 1;
 596	do_div(avail_ae_cycles, device_data->scan_interval);
 597
 598	sla_val *= device_data->max_tp[svc_type];
 599	sla_val /= device_data->scale_ref;
 600
 601	allocated_ae_cycles = (sla_val * avail_ae_cycles);
 602	do_div(allocated_ae_cycles, device_data->max_tp[svc_type]);
 603
 604	return allocated_ae_cycles;
 605}
 606
 607u32 adf_rl_calculate_pci_bw(struct adf_accel_dev *accel_dev, u32 sla_val,
 608			    enum adf_base_services svc_type, bool is_bw_out)
 609{
 610	struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data;
 611	u64 sla_to_bytes, allocated_bw, sla_scaled;
 612
 613	if (!sla_val)
 614		return 0;
 615
 616	sla_to_bytes = sla_val;
 617	sla_to_bytes *= device_data->max_tp[svc_type];
 618	do_div(sla_to_bytes, device_data->scale_ref);
 619
 620	sla_to_bytes *= (svc_type == ADF_SVC_ASYM) ? RL_TOKEN_ASYM_SIZE :
 621						     BYTES_PER_MBIT;
 622	if (svc_type == ADF_SVC_DC && is_bw_out)
 623		sla_to_bytes *= device_data->slices.dcpr_cnt -
 624				device_data->dcpr_correction;
 625
 626	sla_scaled = sla_to_bytes * device_data->pcie_scale_mul;
 627	do_div(sla_scaled, device_data->pcie_scale_div);
 628	allocated_bw = sla_scaled;
 629	do_div(allocated_bw, RL_TOKEN_PCIE_SIZE);
 630	do_div(allocated_bw, device_data->scan_interval);
 631
 632	return allocated_bw;
 633}
 634
 635/**
 636 * add_new_sla_entry() - creates a new SLA object and fills it with user data
 637 * @accel_dev: pointer to acceleration device structure
 638 * @sla_in: pointer to user input data for a new SLA
 639 * @sla_out: Pointer to variable that will contain the address of a new
 640 *	     SLA object if the operation succeeds
 641 *
 642 * Return:
 643 * * 0		- ok
 644 * * -ENOMEM	- memory allocation failed
 645 * * -EINVAL	- invalid user input
 646 * * -ENOSPC	- all available SLAs are in use
 647 */
 648static int add_new_sla_entry(struct adf_accel_dev *accel_dev,
 649			     struct adf_rl_sla_input_data *sla_in,
 650			     struct rl_sla **sla_out)
 651{
 652	struct adf_rl *rl_data = accel_dev->rate_limiting;
 653	struct rl_sla *sla;
 654	int ret = 0;
 655
 656	sla = kzalloc(sizeof(*sla), GFP_KERNEL);
 657	if (!sla) {
 658		ret = -ENOMEM;
 659		goto ret_err;
 660	}
 661	*sla_out = sla;
 662
 663	if (!is_service_enabled(accel_dev, sla_in->srv)) {
 664		dev_notice(&GET_DEV(accel_dev),
 665			   "Provided service is not enabled\n");
 666		ret = -EINVAL;
 667		goto ret_err;
 668	}
 669
 670	sla->srv = sla_in->srv;
 671	sla->type = sla_in->type;
 672	ret = get_next_free_node_id(rl_data, sla);
 673	if (ret < 0) {
 674		dev_notice(&GET_DEV(accel_dev),
 675			   "Exceeded number of available nodes for that service\n");
 676		goto ret_err;
 677	}
 678	sla->node_id = ret;
 679
 680	ret = get_next_free_sla_id(rl_data);
 681	if (ret < 0) {
 682		dev_notice(&GET_DEV(accel_dev),
 683			   "Allocated maximum SLAs number\n");
 684		goto ret_err;
 685	}
 686	sla->sla_id = ret;
 687
 688	sla->parent = find_parent(rl_data, sla_in);
 689	if (!sla->parent && sla->type != RL_ROOT) {
 690		if (sla_in->parent_id != RL_PARENT_DEFAULT_ID)
 691			dev_notice(&GET_DEV(accel_dev),
 692				   "Provided parent ID does not exist or cannot be parent for this SLA.");
 693		else
 694			dev_notice(&GET_DEV(accel_dev),
 695				   "Unable to find parent node for this service. Is service enabled?");
 696		ret = -EINVAL;
 697		goto ret_err;
 698	}
 699
 700	if (sla->type == RL_LEAF) {
 701		ret = prepare_rp_ids(accel_dev, sla, sla_in->rp_mask);
 702		if (!sla->ring_pairs_cnt || ret) {
 703			dev_notice(&GET_DEV(accel_dev),
 704				   "Unable to find ring pairs to assign to the leaf");
 705			if (!ret)
 706				ret = -EINVAL;
 707
 708			goto ret_err;
 709		}
 710	}
 711
 712	return 0;
 713
 714ret_err:
 715	kfree(sla);
 716	*sla_out = NULL;
 717
 718	return ret;
 719}
 720
 721static int initialize_default_nodes(struct adf_accel_dev *accel_dev)
 722{
 723	struct adf_rl *rl_data = accel_dev->rate_limiting;
 724	struct adf_rl_hw_data *device_data = rl_data->device_data;
 725	struct adf_rl_sla_input_data sla_in = { };
 726	int ret = 0;
 727	int i;
 728
 729	/* Init root for each enabled service */
 730	sla_in.type = RL_ROOT;
 731	sla_in.parent_id = RL_PARENT_DEFAULT_ID;
 732
 733	for (i = 0; i < ADF_SVC_NONE; i++) {
 734		if (!is_service_enabled(accel_dev, i))
 735			continue;
 736
 737		sla_in.cir = device_data->scale_ref;
 738		sla_in.pir = sla_in.cir;
 739		sla_in.srv = i;
 740
 741		ret = adf_rl_add_sla(accel_dev, &sla_in);
 742		if (ret)
 743			return ret;
 744	}
 745
 746	/* Init default cluster for each root */
 747	sla_in.type = RL_CLUSTER;
 748	for (i = 0; i < ADF_SVC_NONE; i++) {
 749		if (!rl_data->root[i])
 750			continue;
 751
 752		sla_in.cir = rl_data->root[i]->cir;
 753		sla_in.pir = sla_in.cir;
 754		sla_in.srv = rl_data->root[i]->srv;
 755
 756		ret = adf_rl_add_sla(accel_dev, &sla_in);
 757		if (ret)
 758			return ret;
 759	}
 760
 761	return 0;
 762}
 763
 764static void clear_sla(struct adf_rl *rl_data, struct rl_sla *sla)
 765{
 766	bool *rp_in_use = rl_data->rp_in_use;
 767	struct rl_sla **sla_type_arr = NULL;
 768	int i, sla_id, node_id;
 769	u32 old_cir;
 770
 771	sla_id = sla->sla_id;
 772	node_id = sla->node_id;
 773	old_cir = sla->cir;
 774	sla->cir = 0;
 775	sla->pir = 0;
 776
 777	for (i = 0; i < sla->ring_pairs_cnt; i++)
 778		rp_in_use[sla->ring_pairs_ids[i]] = false;
 779
 780	update_budget(sla, old_cir, true);
 781	get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr);
 782	assign_node_to_parent(rl_data->accel_dev, sla, true);
 783	adf_rl_send_admin_delete_msg(rl_data->accel_dev, node_id, sla->type);
 784	mark_rps_usage(sla, rl_data->rp_in_use, false);
 785
 786	kfree(sla);
 787	rl_data->sla[sla_id] = NULL;
 788	sla_type_arr[node_id] = NULL;
 789}
 790
 791/**
 792 * add_update_sla() - handles the creation and the update of an SLA
 793 * @accel_dev: pointer to acceleration device structure
 794 * @sla_in: pointer to user input data for a new/updated SLA
 795 * @is_update: flag to indicate if this is an update or an add operation
 796 *
 797 * Return:
 798 * * 0		- ok
 799 * * -ENOMEM	- memory allocation failed
 800 * * -EINVAL	- user input data cannot be used to create SLA
 801 * * -ENOSPC	- all available SLAs are in use
 802 */
 803static int add_update_sla(struct adf_accel_dev *accel_dev,
 804			  struct adf_rl_sla_input_data *sla_in, bool is_update)
 805{
 806	struct adf_rl *rl_data = accel_dev->rate_limiting;
 807	struct rl_sla **sla_type_arr = NULL;
 808	struct rl_sla *sla = NULL;
 809	u32 old_cir = 0;
 810	int ret;
 811
 812	if (!sla_in) {
 813		dev_warn(&GET_DEV(accel_dev),
 814			 "SLA input data pointer is missing\n");
 815		return -EFAULT;
 816	}
 817
 818	mutex_lock(&rl_data->rl_lock);
 819
 820	/* Input validation */
 821	ret = validate_user_input(accel_dev, sla_in, is_update);
 822	if (ret)
 823		goto ret_err;
 824
 825	if (is_update) {
 826		ret = validate_sla_id(accel_dev, sla_in->sla_id);
 827		if (ret)
 828			goto ret_err;
 829
 830		sla = rl_data->sla[sla_in->sla_id];
 831		old_cir = sla->cir;
 832	} else {
 833		ret = add_new_sla_entry(accel_dev, sla_in, &sla);
 834		if (ret)
 835			goto ret_err;
 836	}
 837
 838	if (!is_enough_budget(rl_data, sla, sla_in, is_update)) {
 839		dev_notice(&GET_DEV(accel_dev),
 840			   "Input value exceeds the remaining budget%s\n",
 841			   is_update ? " or more budget is already in use" : "");
 842		ret = -EINVAL;
 843		goto ret_err;
 844	}
 845	sla->cir = sla_in->cir;
 846	sla->pir = sla_in->pir;
 847
 848	/* Apply SLA */
 849	assign_node_to_parent(accel_dev, sla, false);
 850	ret = adf_rl_send_admin_add_update_msg(accel_dev, sla, is_update);
 851	if (ret) {
 852		dev_notice(&GET_DEV(accel_dev),
 853			   "Failed to apply an SLA\n");
 854		goto ret_err;
 855	}
 856	update_budget(sla, old_cir, is_update);
 857
 858	if (!is_update) {
 859		mark_rps_usage(sla, rl_data->rp_in_use, true);
 860		get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr);
 861		sla_type_arr[sla->node_id] = sla;
 862		rl_data->sla[sla->sla_id] = sla;
 863	}
 864
 865	sla_in->sla_id = sla->sla_id;
 866	goto ret_ok;
 867
 868ret_err:
 869	if (!is_update) {
 870		sla_in->sla_id = -1;
 871		kfree(sla);
 872	}
 873ret_ok:
 874	mutex_unlock(&rl_data->rl_lock);
 875	return ret;
 876}
 877
 878/**
 879 * adf_rl_add_sla() - handles the creation of an SLA
 880 * @accel_dev: pointer to acceleration device structure
 881 * @sla_in: pointer to user input data required to add an SLA
 882 *
 883 * Return:
 884 * * 0		- ok
 885 * * -ENOMEM	- memory allocation failed
 886 * * -EINVAL	- invalid user input
 887 * * -ENOSPC	- all available SLAs are in use
 888 */
 889int adf_rl_add_sla(struct adf_accel_dev *accel_dev,
 890		   struct adf_rl_sla_input_data *sla_in)
 891{
 892	return add_update_sla(accel_dev, sla_in, false);
 893}
 894
 895/**
 896 * adf_rl_update_sla() - handles the update of an SLA
 897 * @accel_dev: pointer to acceleration device structure
 898 * @sla_in: pointer to user input data required to update an SLA
 899 *
 900 * Return:
 901 * * 0		- ok
 902 * * -EINVAL	- user input data cannot be used to update SLA
 903 */
 904int adf_rl_update_sla(struct adf_accel_dev *accel_dev,
 905		      struct adf_rl_sla_input_data *sla_in)
 906{
 907	return add_update_sla(accel_dev, sla_in, true);
 908}
 909
 910/**
 911 * adf_rl_get_sla() - returns an existing SLA data
 912 * @accel_dev: pointer to acceleration device structure
 913 * @sla_in: pointer to user data where SLA info will be stored
 914 *
 915 * The sla_id for which data are requested should be set in sla_id structure
 916 *
 917 * Return:
 918 * * 0		- ok
 919 * * -EINVAL	- provided sla_id does not exist
 920 */
 921int adf_rl_get_sla(struct adf_accel_dev *accel_dev,
 922		   struct adf_rl_sla_input_data *sla_in)
 923{
 924	struct rl_sla *sla;
 925	int ret, i;
 926
 927	ret = validate_sla_id(accel_dev, sla_in->sla_id);
 928	if (ret)
 929		return ret;
 930
 931	sla = accel_dev->rate_limiting->sla[sla_in->sla_id];
 932	sla_in->type = sla->type;
 933	sla_in->srv = sla->srv;
 934	sla_in->cir = sla->cir;
 935	sla_in->pir = sla->pir;
 936	sla_in->rp_mask = 0U;
 937	if (sla->parent)
 938		sla_in->parent_id = sla->parent->sla_id;
 939	else
 940		sla_in->parent_id = RL_PARENT_DEFAULT_ID;
 941
 942	for (i = 0; i < sla->ring_pairs_cnt; i++)
 943		sla_in->rp_mask |= BIT(sla->ring_pairs_ids[i]);
 944
 945	return 0;
 946}
 947
 948/**
 949 * adf_rl_get_capability_remaining() - returns the remaining SLA value (CIR) for
 950 *				       selected service or provided sla_id
 951 * @accel_dev: pointer to acceleration device structure
 952 * @srv: service ID for which capability is requested
 953 * @sla_id: ID of the cluster or root to which we want assign a new SLA
 954 *
 955 * Check if the provided SLA id is valid. If it is and the service matches
 956 * the requested service and the type is cluster or root, return the remaining
 957 * capability.
 958 * If the provided ID does not match the service or type, return the remaining
 959 * capacity of the default cluster for that service.
 960 *
 961 * Return:
 962 * * Positive value	- correct remaining value
 963 * * -EINVAL		- algorithm cannot find a remaining value for provided data
 964 */
 965int adf_rl_get_capability_remaining(struct adf_accel_dev *accel_dev,
 966				    enum adf_base_services srv, int sla_id)
 967{
 968	struct adf_rl *rl_data = accel_dev->rate_limiting;
 969	struct rl_sla *sla = NULL;
 970	int i;
 971
 972	if (srv >= ADF_SVC_NONE)
 973		return -EINVAL;
 974
 975	if (sla_id > RL_SLA_EMPTY_ID && !validate_sla_id(accel_dev, sla_id)) {
 976		sla = rl_data->sla[sla_id];
 977
 978		if (sla->srv == srv && sla->type <= RL_CLUSTER)
 979			goto ret_ok;
 980	}
 981
 982	for (i = 0; i < RL_CLUSTER_MAX; i++) {
 983		if (!rl_data->cluster[i])
 984			continue;
 985
 986		if (rl_data->cluster[i]->srv == srv) {
 987			sla = rl_data->cluster[i];
 988			goto ret_ok;
 989		}
 990	}
 991
 992	return -EINVAL;
 993ret_ok:
 994	return sla->rem_cir;
 995}
 996
 997/**
 998 * adf_rl_remove_sla() - removes provided sla_id
 999 * @accel_dev: pointer to acceleration device structure
1000 * @sla_id: ID of the cluster or root to which we want assign an new SLA
1001 *
1002 * Return:
1003 * * 0		- ok
1004 * * -EINVAL	- wrong sla_id or it still have assigned children
1005 */
1006int adf_rl_remove_sla(struct adf_accel_dev *accel_dev, u32 sla_id)
1007{
1008	struct adf_rl *rl_data = accel_dev->rate_limiting;
1009	struct rl_sla *sla;
1010	int ret = 0;
1011
1012	mutex_lock(&rl_data->rl_lock);
1013	ret = validate_sla_id(accel_dev, sla_id);
1014	if (ret)
1015		goto err_ret;
1016
1017	sla = rl_data->sla[sla_id];
1018
1019	if (sla->type < RL_LEAF && sla->rem_cir != sla->cir) {
1020		dev_notice(&GET_DEV(accel_dev),
1021			   "To remove parent SLA all its children must be removed first");
1022		ret = -EINVAL;
1023		goto err_ret;
1024	}
1025
1026	clear_sla(rl_data, sla);
1027
1028err_ret:
1029	mutex_unlock(&rl_data->rl_lock);
1030	return ret;
1031}
1032
1033/**
1034 * adf_rl_remove_sla_all() - removes all SLAs from device
1035 * @accel_dev: pointer to acceleration device structure
1036 * @incl_default: set to true if default SLAs also should be removed
1037 */
1038void adf_rl_remove_sla_all(struct adf_accel_dev *accel_dev, bool incl_default)
1039{
1040	struct adf_rl *rl_data = accel_dev->rate_limiting;
1041	int end_type = incl_default ? RL_ROOT : RL_LEAF;
1042	struct rl_sla **sla_type_arr = NULL;
1043	u32 max_id;
1044	int i, j;
1045
1046	mutex_lock(&rl_data->rl_lock);
1047
1048	/* Unregister and remove all SLAs */
1049	for (j = RL_LEAF; j >= end_type; j--) {
1050		max_id = get_sla_arr_of_type(rl_data, j, &sla_type_arr);
1051
1052		for (i = 0; i < max_id; i++) {
1053			if (!sla_type_arr[i])
1054				continue;
1055
1056			clear_sla(rl_data, sla_type_arr[i]);
1057		}
1058	}
1059
1060	mutex_unlock(&rl_data->rl_lock);
1061}
1062
1063int adf_rl_init(struct adf_accel_dev *accel_dev)
1064{
1065	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
1066	struct adf_rl_hw_data *rl_hw_data = &hw_data->rl_data;
1067	struct adf_rl *rl;
1068	int ret = 0;
1069
1070	/* Validate device parameters */
1071	if (RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_ASYM]) ||
1072	    RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_SYM]) ||
1073	    RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_DC]) ||
1074	    RL_VALIDATE_NON_ZERO(rl_hw_data->scan_interval) ||
1075	    RL_VALIDATE_NON_ZERO(rl_hw_data->pcie_scale_div) ||
1076	    RL_VALIDATE_NON_ZERO(rl_hw_data->pcie_scale_mul) ||
1077	    RL_VALIDATE_NON_ZERO(rl_hw_data->scale_ref)) {
1078		ret = -EOPNOTSUPP;
1079		goto err_ret;
1080	}
1081
1082	rl = kzalloc(sizeof(*rl), GFP_KERNEL);
1083	if (!rl) {
1084		ret = -ENOMEM;
1085		goto err_ret;
1086	}
1087
1088	mutex_init(&rl->rl_lock);
1089	rl->device_data = &accel_dev->hw_device->rl_data;
1090	rl->accel_dev = accel_dev;
1091	accel_dev->rate_limiting = rl;
1092
1093err_ret:
1094	return ret;
1095}
1096
1097int adf_rl_start(struct adf_accel_dev *accel_dev)
1098{
1099	struct adf_rl_hw_data *rl_hw_data = &GET_HW_DATA(accel_dev)->rl_data;
1100	void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
1101	u16 fw_caps =  GET_HW_DATA(accel_dev)->fw_capabilities;
1102	int ret;
1103
1104	if (!accel_dev->rate_limiting) {
1105		ret = -EOPNOTSUPP;
1106		goto ret_err;
1107	}
1108
1109	if ((fw_caps & RL_CAPABILITY_MASK) != RL_CAPABILITY_VALUE) {
1110		dev_info(&GET_DEV(accel_dev), "not supported\n");
1111		ret = -EOPNOTSUPP;
1112		goto ret_free;
1113	}
1114
1115	ADF_CSR_WR(pmisc_addr, rl_hw_data->pciin_tb_offset,
1116		   RL_TOKEN_GRANULARITY_PCIEIN_BUCKET);
1117	ADF_CSR_WR(pmisc_addr, rl_hw_data->pciout_tb_offset,
1118		   RL_TOKEN_GRANULARITY_PCIEOUT_BUCKET);
1119
1120	ret = adf_rl_send_admin_init_msg(accel_dev, &rl_hw_data->slices);
1121	if (ret) {
1122		dev_err(&GET_DEV(accel_dev), "initialization failed\n");
1123		goto ret_free;
1124	}
1125
1126	ret = initialize_default_nodes(accel_dev);
1127	if (ret) {
1128		dev_err(&GET_DEV(accel_dev),
1129			"failed to initialize default SLAs\n");
1130		goto ret_sla_rm;
1131	}
1132
1133	ret = adf_sysfs_rl_add(accel_dev);
1134	if (ret) {
1135		dev_err(&GET_DEV(accel_dev), "failed to add sysfs interface\n");
1136		goto ret_sysfs_rm;
1137	}
1138
1139	return 0;
1140
1141ret_sysfs_rm:
1142	adf_sysfs_rl_rm(accel_dev);
1143ret_sla_rm:
1144	adf_rl_remove_sla_all(accel_dev, true);
1145ret_free:
1146	kfree(accel_dev->rate_limiting);
1147	accel_dev->rate_limiting = NULL;
1148ret_err:
1149	return ret;
1150}
1151
1152void adf_rl_stop(struct adf_accel_dev *accel_dev)
1153{
1154	if (!accel_dev->rate_limiting)
1155		return;
1156
1157	adf_sysfs_rl_rm(accel_dev);
1158	adf_rl_remove_sla_all(accel_dev, true);
1159}
1160
1161void adf_rl_exit(struct adf_accel_dev *accel_dev)
1162{
1163	if (!accel_dev->rate_limiting)
1164		return;
1165
1166	kfree(accel_dev->rate_limiting);
1167	accel_dev->rate_limiting = NULL;
1168}