Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/* Texas Instruments K3 AM65 Ethernet QoS submodule
   3 * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
   4 *
   5 * quality of service module includes:
   6 * Enhanced Scheduler Traffic (EST - P802.1Qbv/D2.2)
   7 * Interspersed Express Traffic (IET - P802.3br/D2.0)
   8 */
   9
  10#include <linux/pm_runtime.h>
  11#include <linux/math.h>
  12#include <linux/math64.h>
  13#include <linux/time.h>
  14#include <linux/units.h>
  15#include <net/pkt_cls.h>
  16
  17#include "am65-cpsw-nuss.h"
  18#include "am65-cpsw-qos.h"
  19#include "am65-cpts.h"
  20#include "cpsw_ale.h"
  21
  22#define TO_MBPS(x)	DIV_ROUND_UP((x), BYTES_PER_MBIT)
  23
  24enum timer_act {
  25	TACT_PROG,		/* need program timer */
  26	TACT_NEED_STOP,		/* need stop first */
  27	TACT_SKIP_PROG,		/* just buffer can be updated */
  28};
  29
  30static void am65_cpsw_iet_change_preemptible_tcs(struct am65_cpsw_port *port, u8 preemptible_tcs);
  31
  32static u32
  33am65_cpsw_qos_tx_rate_calc(u32 rate_mbps, unsigned long bus_freq)
  34{
  35	u32 ir;
  36
  37	bus_freq /= 1000000;
  38	ir = DIV_ROUND_UP(((u64)rate_mbps * 32768),  bus_freq);
  39	return ir;
  40}
  41
  42static void am65_cpsw_tx_pn_shaper_reset(struct am65_cpsw_port *port)
  43{
  44	int prio;
  45
  46	for (prio = 0; prio < AM65_CPSW_PN_FIFO_PRIO_NUM; prio++) {
  47		writel(0, port->port_base + AM65_CPSW_PN_REG_PRI_CIR(prio));
  48		writel(0, port->port_base + AM65_CPSW_PN_REG_PRI_EIR(prio));
  49	}
  50}
  51
  52static void am65_cpsw_tx_pn_shaper_apply(struct am65_cpsw_port *port)
  53{
  54	struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio;
  55	struct am65_cpsw_common *common = port->common;
  56	struct tc_mqprio_qopt_offload *mqprio;
  57	bool enable, shaper_susp = false;
  58	u32 rate_mbps;
  59	int tc, prio;
  60
  61	mqprio = &p_mqprio->mqprio_hw;
  62	/* takes care of no link case as well */
  63	if (p_mqprio->max_rate_total > port->qos.link_speed)
  64		shaper_susp = true;
  65
  66	am65_cpsw_tx_pn_shaper_reset(port);
  67
  68	enable = p_mqprio->shaper_en && !shaper_susp;
  69	if (!enable)
  70		return;
  71
  72	/* Rate limit is specified per Traffic Class but
  73	 * for CPSW, rate limit can be applied per priority
  74	 * at port FIFO.
  75	 *
  76	 * We have assigned the same priority (TCn) to all queues
  77	 * of a Traffic Class so they share the same shaper
  78	 * bandwidth.
  79	 */
  80	for (tc = 0; tc < mqprio->qopt.num_tc; tc++) {
  81		prio = tc;
  82
  83		rate_mbps = TO_MBPS(mqprio->min_rate[tc]);
  84		rate_mbps = am65_cpsw_qos_tx_rate_calc(rate_mbps,
  85						       common->bus_freq);
  86		writel(rate_mbps,
  87		       port->port_base + AM65_CPSW_PN_REG_PRI_CIR(prio));
  88
  89		rate_mbps = 0;
  90
  91		if (mqprio->max_rate[tc]) {
  92			rate_mbps = mqprio->max_rate[tc] - mqprio->min_rate[tc];
  93			rate_mbps = TO_MBPS(rate_mbps);
  94			rate_mbps = am65_cpsw_qos_tx_rate_calc(rate_mbps,
  95							       common->bus_freq);
  96		}
  97
  98		writel(rate_mbps,
  99		       port->port_base + AM65_CPSW_PN_REG_PRI_EIR(prio));
 100	}
 101}
 102
 103static int am65_cpsw_mqprio_verify_shaper(struct am65_cpsw_port *port,
 104					  struct tc_mqprio_qopt_offload *mqprio)
 105{
 106	struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio;
 107	struct netlink_ext_ack *extack = mqprio->extack;
 108	u64 min_rate_total = 0, max_rate_total = 0;
 109	u32 min_rate_msk = 0, max_rate_msk = 0;
 110	bool has_min_rate, has_max_rate;
 111	int num_tc, i;
 112
 113	if (!(mqprio->flags & TC_MQPRIO_F_SHAPER))
 114		return 0;
 115
 116	if (mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE)
 117		return 0;
 118
 119	has_min_rate = !!(mqprio->flags & TC_MQPRIO_F_MIN_RATE);
 120	has_max_rate = !!(mqprio->flags & TC_MQPRIO_F_MAX_RATE);
 121
 122	if (!has_min_rate && has_max_rate) {
 123		NL_SET_ERR_MSG_MOD(extack, "min_rate is required with max_rate");
 124		return -EOPNOTSUPP;
 125	}
 126
 127	if (!has_min_rate)
 128		return 0;
 129
 130	num_tc = mqprio->qopt.num_tc;
 131
 132	for (i = num_tc - 1; i >= 0; i--) {
 133		u32 ch_msk;
 134
 135		if (mqprio->min_rate[i])
 136			min_rate_msk |= BIT(i);
 137		min_rate_total +=  mqprio->min_rate[i];
 138
 139		if (has_max_rate) {
 140			if (mqprio->max_rate[i])
 141				max_rate_msk |= BIT(i);
 142			max_rate_total +=  mqprio->max_rate[i];
 143
 144			if (!mqprio->min_rate[i] && mqprio->max_rate[i]) {
 145				NL_SET_ERR_MSG_FMT_MOD(extack,
 146						       "TX tc%d rate max>0 but min=0",
 147						       i);
 148				return -EINVAL;
 149			}
 150
 151			if (mqprio->max_rate[i] &&
 152			    mqprio->max_rate[i] < mqprio->min_rate[i]) {
 153				NL_SET_ERR_MSG_FMT_MOD(extack,
 154						       "TX tc%d rate min(%llu)>max(%llu)",
 155						       i, mqprio->min_rate[i],
 156						       mqprio->max_rate[i]);
 157				return -EINVAL;
 158			}
 159		}
 160
 161		ch_msk = GENMASK(num_tc - 1, i);
 162		if ((min_rate_msk & BIT(i)) && (min_rate_msk ^ ch_msk)) {
 163			NL_SET_ERR_MSG_FMT_MOD(extack,
 164					       "Min rate must be set sequentially hi->lo tx_rate_msk%x",
 165					       min_rate_msk);
 166			return -EINVAL;
 167		}
 168
 169		if ((max_rate_msk & BIT(i)) && (max_rate_msk ^ ch_msk)) {
 170			NL_SET_ERR_MSG_FMT_MOD(extack,
 171					       "Max rate must be set sequentially hi->lo tx_rate_msk%x",
 172					       max_rate_msk);
 173			return -EINVAL;
 174		}
 175	}
 176
 177	min_rate_total = TO_MBPS(min_rate_total);
 178	max_rate_total = TO_MBPS(max_rate_total);
 179
 180	p_mqprio->shaper_en = true;
 181	p_mqprio->max_rate_total = max_t(u64, min_rate_total, max_rate_total);
 182
 183	return 0;
 184}
 185
 186static void am65_cpsw_reset_tc_mqprio(struct net_device *ndev)
 187{
 188	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 189	struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio;
 190
 191	p_mqprio->shaper_en = false;
 192	p_mqprio->max_rate_total = 0;
 193
 194	am65_cpsw_tx_pn_shaper_reset(port);
 195	netdev_reset_tc(ndev);
 196
 197	/* Reset all Queue priorities to 0 */
 198	writel(0, port->port_base + AM65_CPSW_PN_REG_TX_PRI_MAP);
 199
 200	am65_cpsw_iet_change_preemptible_tcs(port, 0);
 201}
 202
 203static int am65_cpsw_setup_mqprio(struct net_device *ndev, void *type_data)
 204{
 205	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 206	struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio;
 207	struct tc_mqprio_qopt_offload *mqprio = type_data;
 208	struct am65_cpsw_common *common = port->common;
 209	struct tc_mqprio_qopt *qopt = &mqprio->qopt;
 210	int i, tc, offset, count, prio, ret;
 211	u8 num_tc = qopt->num_tc;
 212	u32 tx_prio_map = 0;
 213
 214	memcpy(&p_mqprio->mqprio_hw, mqprio, sizeof(*mqprio));
 215
 216	ret = pm_runtime_get_sync(common->dev);
 217	if (ret < 0) {
 218		pm_runtime_put_noidle(common->dev);
 219		return ret;
 220	}
 221
 222	if (!num_tc) {
 223		am65_cpsw_reset_tc_mqprio(ndev);
 224		ret = 0;
 225		goto exit_put;
 226	}
 227
 228	ret = am65_cpsw_mqprio_verify_shaper(port, mqprio);
 229	if (ret)
 230		goto exit_put;
 231
 232	netdev_set_num_tc(ndev, num_tc);
 233
 234	/* Multiple Linux priorities can map to a Traffic Class
 235	 * A Traffic Class can have multiple contiguous Queues,
 236	 * Queues get mapped to Channels (thread_id),
 237	 *	if not VLAN tagged, thread_id is used as packet_priority
 238	 *	if VLAN tagged. VLAN priority is used as packet_priority
 239	 * packet_priority gets mapped to header_priority in p0_rx_pri_map,
 240	 * header_priority gets mapped to switch_priority in pn_tx_pri_map.
 241	 * As p0_rx_pri_map is left at defaults (0x76543210), we can
 242	 * assume that Queue_n gets mapped to header_priority_n. We can then
 243	 * set the switch priority in pn_tx_pri_map.
 244	 */
 245
 246	for (tc = 0; tc < num_tc; tc++) {
 247		prio = tc;
 248
 249		/* For simplicity we assign the same priority (TCn) to
 250		 * all queues of a Traffic Class.
 251		 */
 252		for (i = qopt->offset[tc]; i < qopt->offset[tc] + qopt->count[tc]; i++)
 253			tx_prio_map |= prio << (4 * i);
 254
 255		count = qopt->count[tc];
 256		offset = qopt->offset[tc];
 257		netdev_set_tc_queue(ndev, tc, count, offset);
 258	}
 259
 260	writel(tx_prio_map, port->port_base + AM65_CPSW_PN_REG_TX_PRI_MAP);
 261
 262	am65_cpsw_tx_pn_shaper_apply(port);
 263	am65_cpsw_iet_change_preemptible_tcs(port, mqprio->preemptible_tcs);
 264
 265exit_put:
 266	pm_runtime_put(common->dev);
 267
 268	return ret;
 269}
 270
 271static int am65_cpsw_iet_set_verify_timeout_count(struct am65_cpsw_port *port)
 272{
 273	int verify_time_ms = port->qos.iet.verify_time_ms;
 274	u32 val;
 275
 276	/* The number of wireside clocks contained in the verify
 277	 * timeout counter. The default is 0x1312d0
 278	 * (10ms at 125Mhz in 1G mode).
 279	 */
 280	val = 125 * HZ_PER_MHZ;	/* assuming 125MHz wireside clock */
 281
 282	val /= MILLIHZ_PER_HZ;		/* count per ms timeout */
 283	val *= verify_time_ms;		/* count for timeout ms */
 284
 285	if (val > AM65_CPSW_PN_MAC_VERIFY_CNT_MASK)
 286		return -EINVAL;
 287
 288	writel(val, port->port_base + AM65_CPSW_PN_REG_IET_VERIFY);
 289
 290	return 0;
 291}
 292
 293static int am65_cpsw_iet_verify_wait(struct am65_cpsw_port *port)
 294{
 295	u32 ctrl, status;
 296	int try;
 297
 298	try = 20;
 299	do {
 300		/* Reset the verify state machine by writing 1
 301		 * to LINKFAIL
 302		 */
 303		ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
 304		ctrl |= AM65_CPSW_PN_IET_MAC_LINKFAIL;
 305		writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
 306
 307		/* Clear MAC_LINKFAIL bit to start Verify. */
 308		ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
 309		ctrl &= ~AM65_CPSW_PN_IET_MAC_LINKFAIL;
 310		writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
 311
 312		msleep(port->qos.iet.verify_time_ms);
 313
 314		status = readl(port->port_base + AM65_CPSW_PN_REG_IET_STATUS);
 315		if (status & AM65_CPSW_PN_MAC_VERIFIED)
 316			return 0;
 317
 318		if (status & AM65_CPSW_PN_MAC_VERIFY_FAIL) {
 319			netdev_dbg(port->ndev,
 320				   "MAC Merge verify failed, trying again\n");
 321			continue;
 322		}
 323
 324		if (status & AM65_CPSW_PN_MAC_RESPOND_ERR) {
 325			netdev_dbg(port->ndev, "MAC Merge respond error\n");
 326			return -ENODEV;
 327		}
 328
 329		if (status & AM65_CPSW_PN_MAC_VERIFY_ERR) {
 330			netdev_dbg(port->ndev, "MAC Merge verify error\n");
 331			return -ENODEV;
 332		}
 333	} while (try-- > 0);
 334
 335	netdev_dbg(port->ndev, "MAC Merge verify timeout\n");
 336	return -ETIMEDOUT;
 337}
 338
 339static void am65_cpsw_iet_set_preempt_mask(struct am65_cpsw_port *port, u8 preemptible_tcs)
 340{
 341	u32 val;
 342
 343	val = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
 344	val &= ~AM65_CPSW_PN_IET_MAC_PREMPT_MASK;
 345	val |= AM65_CPSW_PN_IET_MAC_SET_PREEMPT(preemptible_tcs);
 346	writel(val, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
 347}
 348
 349/* enable common IET_ENABLE only if at least 1 port has rx IET enabled.
 350 * UAPI doesn't allow tx enable without rx enable.
 351 */
 352void am65_cpsw_iet_common_enable(struct am65_cpsw_common *common)
 353{
 354	struct am65_cpsw_port *port;
 355	bool rx_enable = false;
 356	u32 val;
 357	int i;
 358
 359	for (i = 0; i < common->port_num; i++) {
 360		port = &common->ports[i];
 361		val = readl(port->port_base + AM65_CPSW_PN_REG_CTL);
 362		rx_enable = !!(val & AM65_CPSW_PN_CTL_IET_PORT_EN);
 363		if (rx_enable)
 364			break;
 365	}
 366
 367	val = readl(common->cpsw_base + AM65_CPSW_REG_CTL);
 368
 369	if (rx_enable)
 370		val |= AM65_CPSW_CTL_IET_EN;
 371	else
 372		val &= ~AM65_CPSW_CTL_IET_EN;
 373
 374	writel(val, common->cpsw_base + AM65_CPSW_REG_CTL);
 375	common->iet_enabled = rx_enable;
 376}
 377
 378/* CPSW does not have an IRQ to notify changes to the MAC Merge TX status
 379 * (active/inactive), but the preemptible traffic classes should only be
 380 * committed to hardware once TX is active. Resort to polling.
 381 */
 382void am65_cpsw_iet_commit_preemptible_tcs(struct am65_cpsw_port *port)
 383{
 384	u8 preemptible_tcs;
 385	int err;
 386	u32 val;
 387
 388	if (port->qos.link_speed == SPEED_UNKNOWN)
 389		return;
 390
 391	val = readl(port->port_base + AM65_CPSW_PN_REG_CTL);
 392	if (!(val & AM65_CPSW_PN_CTL_IET_PORT_EN))
 393		return;
 394
 395	/* update common IET enable */
 396	am65_cpsw_iet_common_enable(port->common);
 397
 398	/* update verify count */
 399	err = am65_cpsw_iet_set_verify_timeout_count(port);
 400	if (err) {
 401		netdev_err(port->ndev, "couldn't set verify count: %d\n", err);
 402		return;
 403	}
 404
 405	val = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
 406	if (!(val & AM65_CPSW_PN_IET_MAC_DISABLEVERIFY)) {
 407		err = am65_cpsw_iet_verify_wait(port);
 408		if (err)
 409			return;
 410	}
 411
 412	preemptible_tcs = port->qos.iet.preemptible_tcs;
 413	am65_cpsw_iet_set_preempt_mask(port, preemptible_tcs);
 414}
 415
 416static void am65_cpsw_iet_change_preemptible_tcs(struct am65_cpsw_port *port, u8 preemptible_tcs)
 417{
 418	struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(port->ndev);
 419
 420	port->qos.iet.preemptible_tcs = preemptible_tcs;
 421	mutex_lock(&priv->mm_lock);
 422	am65_cpsw_iet_commit_preemptible_tcs(port);
 423	mutex_unlock(&priv->mm_lock);
 424}
 425
 426static void am65_cpsw_iet_link_state_update(struct net_device *ndev)
 427{
 428	struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev);
 429	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 430
 431	mutex_lock(&priv->mm_lock);
 432	am65_cpsw_iet_commit_preemptible_tcs(port);
 433	mutex_unlock(&priv->mm_lock);
 434}
 435
 436static int am65_cpsw_port_est_enabled(struct am65_cpsw_port *port)
 437{
 438	return port->qos.est_oper || port->qos.est_admin;
 439}
 440
 441static void am65_cpsw_est_enable(struct am65_cpsw_common *common, int enable)
 442{
 443	u32 val;
 444
 445	val = readl(common->cpsw_base + AM65_CPSW_REG_CTL);
 446
 447	if (enable)
 448		val |= AM65_CPSW_CTL_EST_EN;
 449	else
 450		val &= ~AM65_CPSW_CTL_EST_EN;
 451
 452	writel(val, common->cpsw_base + AM65_CPSW_REG_CTL);
 453	common->est_enabled = enable;
 454}
 455
 456static void am65_cpsw_port_est_enable(struct am65_cpsw_port *port, int enable)
 457{
 458	u32 val;
 459
 460	val = readl(port->port_base + AM65_CPSW_PN_REG_CTL);
 461	if (enable)
 462		val |= AM65_CPSW_PN_CTL_EST_PORT_EN;
 463	else
 464		val &= ~AM65_CPSW_PN_CTL_EST_PORT_EN;
 465
 466	writel(val, port->port_base + AM65_CPSW_PN_REG_CTL);
 467}
 468
 469/* target new EST RAM buffer, actual toggle happens after cycle completion */
 470static void am65_cpsw_port_est_assign_buf_num(struct net_device *ndev,
 471					      int buf_num)
 472{
 473	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 474	u32 val;
 475
 476	val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL);
 477	if (buf_num)
 478		val |= AM65_CPSW_PN_EST_BUFSEL;
 479	else
 480		val &= ~AM65_CPSW_PN_EST_BUFSEL;
 481
 482	writel(val, port->port_base + AM65_CPSW_PN_REG_EST_CTL);
 483}
 484
 485/* am65_cpsw_port_est_is_swapped() - Indicate if h/w is transitioned
 486 * admin -> oper or not
 487 *
 488 * Return true if already transitioned. i.e oper is equal to admin and buf
 489 * numbers match (est_oper->buf match with est_admin->buf).
 490 * false if before transition. i.e oper is not equal to admin, (i.e a
 491 * previous admin command is waiting to be transitioned to oper state
 492 * and est_oper->buf not match with est_oper->buf).
 493 */
 494static int am65_cpsw_port_est_is_swapped(struct net_device *ndev, int *oper,
 495					 int *admin)
 496{
 497	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 498	u32 val;
 499
 500	val = readl(port->port_base + AM65_CPSW_PN_REG_FIFO_STATUS);
 501	*oper = !!(val & AM65_CPSW_PN_FST_EST_BUFACT);
 502
 503	val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL);
 504	*admin = !!(val & AM65_CPSW_PN_EST_BUFSEL);
 505
 506	return *admin == *oper;
 507}
 508
 509/* am65_cpsw_port_est_get_free_buf_num() - Get free buffer number for
 510 * Admin to program the new schedule.
 511 *
 512 * Logic as follows:-
 513 * If oper is same as admin, return the other buffer (!oper) as the admin
 514 * buffer.  If oper is not the same, driver let the current oper to continue
 515 * as it is in the process of transitioning from admin -> oper. So keep the
 516 * oper by selecting the same oper buffer by writing to EST_BUFSEL bit in
 517 * EST CTL register. In the second iteration they will match and code returns.
 518 * The actual buffer to write command is selected later before it is ready
 519 * to update the schedule.
 520 */
 521static int am65_cpsw_port_est_get_free_buf_num(struct net_device *ndev)
 522{
 523	int oper, admin;
 524	int roll = 2;
 525
 526	while (roll--) {
 527		if (am65_cpsw_port_est_is_swapped(ndev, &oper, &admin))
 528			return !oper;
 529
 530		/* admin is not set, so hinder transition as it's not allowed
 531		 * to touch memory in-flight, by targeting same oper buf.
 532		 */
 533		am65_cpsw_port_est_assign_buf_num(ndev, oper);
 534
 535		dev_info(&ndev->dev,
 536			 "Prev. EST admin cycle is in transit %d -> %d\n",
 537			 oper, admin);
 538	}
 539
 540	return admin;
 541}
 542
 543static void am65_cpsw_admin_to_oper(struct net_device *ndev)
 544{
 545	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 546
 547	devm_kfree(&ndev->dev, port->qos.est_oper);
 548
 549	port->qos.est_oper = port->qos.est_admin;
 550	port->qos.est_admin = NULL;
 551}
 552
 553static void am65_cpsw_port_est_get_buf_num(struct net_device *ndev,
 554					   struct am65_cpsw_est *est_new)
 555{
 556	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 557	u32 val;
 558
 559	val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL);
 560	val &= ~AM65_CPSW_PN_EST_ONEBUF;
 561	writel(val, port->port_base + AM65_CPSW_PN_REG_EST_CTL);
 562
 563	est_new->buf = am65_cpsw_port_est_get_free_buf_num(ndev);
 564
 565	/* rolled buf num means changed buf while configuring */
 566	if (port->qos.est_oper && port->qos.est_admin &&
 567	    est_new->buf == port->qos.est_oper->buf)
 568		am65_cpsw_admin_to_oper(ndev);
 569}
 570
 571static void am65_cpsw_est_set(struct net_device *ndev, int enable)
 572{
 573	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 574	struct am65_cpsw_common *common = port->common;
 575	int common_enable = 0;
 576	int i;
 577
 578	am65_cpsw_port_est_enable(port, enable);
 579
 580	for (i = 0; i < common->port_num; i++)
 581		common_enable |= am65_cpsw_port_est_enabled(&common->ports[i]);
 582
 583	common_enable |= enable;
 584	am65_cpsw_est_enable(common, common_enable);
 585}
 586
 587/* This update is supposed to be used in any routine before getting real state
 588 * of admin -> oper transition, particularly it's supposed to be used in some
 589 * generic routine for providing real state to Taprio Qdisc.
 590 */
 591static void am65_cpsw_est_update_state(struct net_device *ndev)
 592{
 593	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 594	int oper, admin;
 595
 596	if (!port->qos.est_admin)
 597		return;
 598
 599	if (!am65_cpsw_port_est_is_swapped(ndev, &oper, &admin))
 600		return;
 601
 602	am65_cpsw_admin_to_oper(ndev);
 603}
 604
 605/* Fetch command count it's number of bytes in Gigabit mode or nibbles in
 606 * 10/100Mb mode. So, having speed and time in ns, recalculate ns to number of
 607 * bytes/nibbles that can be sent while transmission on given speed.
 608 */
 609static int am65_est_cmd_ns_to_cnt(u64 ns, int link_speed)
 610{
 611	u64 temp;
 612
 613	temp = ns * link_speed;
 614	if (link_speed < SPEED_1000)
 615		temp <<= 1;
 616
 617	return DIV_ROUND_UP(temp, 8 * 1000);
 618}
 619
 620static void __iomem *am65_cpsw_est_set_sched_cmds(void __iomem *addr,
 621						  int fetch_cnt,
 622						  int fetch_allow)
 623{
 624	u32 prio_mask, cmd_fetch_cnt, cmd;
 625
 626	do {
 627		if (fetch_cnt > AM65_CPSW_FETCH_CNT_MAX) {
 628			fetch_cnt -= AM65_CPSW_FETCH_CNT_MAX;
 629			cmd_fetch_cnt = AM65_CPSW_FETCH_CNT_MAX;
 630		} else {
 631			cmd_fetch_cnt = fetch_cnt;
 632			/* fetch count can't be less than 16? */
 633			if (cmd_fetch_cnt && cmd_fetch_cnt < 16)
 634				cmd_fetch_cnt = 16;
 635
 636			fetch_cnt = 0;
 637		}
 638
 639		prio_mask = fetch_allow & AM65_CPSW_FETCH_ALLOW_MSK;
 640		cmd = (cmd_fetch_cnt << AM65_CPSW_FETCH_CNT_OFFSET) | prio_mask;
 641
 642		writel(cmd, addr);
 643		addr += 4;
 644	} while (fetch_cnt);
 645
 646	return addr;
 647}
 648
 649static int am65_cpsw_est_calc_cmd_num(struct net_device *ndev,
 650				      struct tc_taprio_qopt_offload *taprio,
 651				      int link_speed)
 652{
 653	int i, cmd_cnt, cmd_sum = 0;
 654	u32 fetch_cnt;
 655
 656	for (i = 0; i < taprio->num_entries; i++) {
 657		if (taprio->entries[i].command != TC_TAPRIO_CMD_SET_GATES) {
 658			dev_err(&ndev->dev, "Only SET command is supported");
 659			return -EINVAL;
 660		}
 661
 662		fetch_cnt = am65_est_cmd_ns_to_cnt(taprio->entries[i].interval,
 663						   link_speed);
 664
 665		cmd_cnt = DIV_ROUND_UP(fetch_cnt, AM65_CPSW_FETCH_CNT_MAX);
 666		if (!cmd_cnt)
 667			cmd_cnt++;
 668
 669		cmd_sum += cmd_cnt;
 670
 671		if (!fetch_cnt)
 672			break;
 673	}
 674
 675	return cmd_sum;
 676}
 677
 678static int am65_cpsw_est_check_scheds(struct net_device *ndev,
 679				      struct am65_cpsw_est *est_new)
 680{
 681	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 682	int cmd_num;
 683
 684	cmd_num = am65_cpsw_est_calc_cmd_num(ndev, &est_new->taprio,
 685					     port->qos.link_speed);
 686	if (cmd_num < 0)
 687		return cmd_num;
 688
 689	if (cmd_num > AM65_CPSW_FETCH_RAM_CMD_NUM / 2) {
 690		dev_err(&ndev->dev, "No fetch RAM");
 691		return -ENOMEM;
 692	}
 693
 694	return 0;
 695}
 696
 697static void am65_cpsw_est_set_sched_list(struct net_device *ndev,
 698					 struct am65_cpsw_est *est_new)
 699{
 700	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 701	u32 fetch_cnt, fetch_allow, all_fetch_allow = 0;
 702	void __iomem *ram_addr, *max_ram_addr;
 703	struct tc_taprio_sched_entry *entry;
 704	int i, ram_size;
 705
 706	ram_addr = port->fetch_ram_base;
 707	ram_size = AM65_CPSW_FETCH_RAM_CMD_NUM * 2;
 708	ram_addr += est_new->buf * ram_size;
 709
 710	max_ram_addr = ram_size + ram_addr;
 711	for (i = 0; i < est_new->taprio.num_entries; i++) {
 712		entry = &est_new->taprio.entries[i];
 713
 714		fetch_cnt = am65_est_cmd_ns_to_cnt(entry->interval,
 715						   port->qos.link_speed);
 716		fetch_allow = entry->gate_mask;
 717		if (fetch_allow > AM65_CPSW_FETCH_ALLOW_MAX)
 718			dev_dbg(&ndev->dev, "fetch_allow > 8 bits: %d\n",
 719				fetch_allow);
 720
 721		ram_addr = am65_cpsw_est_set_sched_cmds(ram_addr, fetch_cnt,
 722							fetch_allow);
 723
 724		if (!fetch_cnt && i < est_new->taprio.num_entries - 1) {
 725			dev_info(&ndev->dev,
 726				 "next scheds after %d have no impact", i + 1);
 727			break;
 728		}
 729
 730		all_fetch_allow |= fetch_allow;
 731	}
 732
 733	/* end cmd, enabling non-timed queues for potential over cycle time */
 734	if (ram_addr < max_ram_addr)
 735		writel(~all_fetch_allow & AM65_CPSW_FETCH_ALLOW_MSK, ram_addr);
 736}
 737
 738/*
 739 * Enable ESTf periodic output, set cycle start time and interval.
 740 */
 741static int am65_cpsw_timer_set(struct net_device *ndev,
 742			       struct am65_cpsw_est *est_new)
 743{
 744	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 745	struct am65_cpsw_common *common = port->common;
 746	struct am65_cpts *cpts = common->cpts;
 747	struct am65_cpts_estf_cfg cfg;
 748
 749	cfg.ns_period = est_new->taprio.cycle_time;
 750	cfg.ns_start = est_new->taprio.base_time;
 751
 752	return am65_cpts_estf_enable(cpts, port->port_id - 1, &cfg);
 753}
 754
 755static void am65_cpsw_timer_stop(struct net_device *ndev)
 756{
 757	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 758	struct am65_cpts *cpts = port->common->cpts;
 759
 760	am65_cpts_estf_disable(cpts, port->port_id - 1);
 761}
 762
 763static enum timer_act am65_cpsw_timer_act(struct net_device *ndev,
 764					  struct am65_cpsw_est *est_new)
 765{
 766	struct tc_taprio_qopt_offload *taprio_oper, *taprio_new;
 767	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 768	struct am65_cpts *cpts = port->common->cpts;
 769	u64 cur_time;
 770	s64 diff;
 771
 772	if (!port->qos.est_oper)
 773		return TACT_PROG;
 774
 775	taprio_new = &est_new->taprio;
 776	taprio_oper = &port->qos.est_oper->taprio;
 777
 778	if (taprio_new->cycle_time != taprio_oper->cycle_time)
 779		return TACT_NEED_STOP;
 780
 781	/* in order to avoid timer reset get base_time form oper taprio */
 782	if (!taprio_new->base_time && taprio_oper)
 783		taprio_new->base_time = taprio_oper->base_time;
 784
 785	if (taprio_new->base_time == taprio_oper->base_time)
 786		return TACT_SKIP_PROG;
 787
 788	/* base times are cycle synchronized */
 789	diff = taprio_new->base_time - taprio_oper->base_time;
 790	diff = diff < 0 ? -diff : diff;
 791	if (diff % taprio_new->cycle_time)
 792		return TACT_NEED_STOP;
 793
 794	cur_time = am65_cpts_ns_gettime(cpts);
 795	if (taprio_new->base_time <= cur_time + taprio_new->cycle_time)
 796		return TACT_SKIP_PROG;
 797
 798	/* TODO: Admin schedule at future time is not currently supported */
 799	return TACT_NEED_STOP;
 800}
 801
 802static void am65_cpsw_stop_est(struct net_device *ndev)
 803{
 804	am65_cpsw_est_set(ndev, 0);
 805	am65_cpsw_timer_stop(ndev);
 806}
 807
 808static void am65_cpsw_taprio_destroy(struct net_device *ndev)
 809{
 810	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 811
 812	am65_cpsw_stop_est(ndev);
 813
 814	devm_kfree(&ndev->dev, port->qos.est_admin);
 815	devm_kfree(&ndev->dev, port->qos.est_oper);
 816
 817	port->qos.est_oper = NULL;
 818	port->qos.est_admin = NULL;
 819
 820	am65_cpsw_reset_tc_mqprio(ndev);
 821}
 822
 823static void am65_cpsw_cp_taprio(struct tc_taprio_qopt_offload *from,
 824				struct tc_taprio_qopt_offload *to)
 825{
 826	int i;
 827
 828	*to = *from;
 829	for (i = 0; i < from->num_entries; i++)
 830		to->entries[i] = from->entries[i];
 831}
 832
 833static int am65_cpsw_taprio_replace(struct net_device *ndev,
 834				    struct tc_taprio_qopt_offload *taprio)
 835{
 836	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
 837	struct netlink_ext_ack *extack = taprio->mqprio.extack;
 838	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 839	struct am65_cpts *cpts = common->cpts;
 840	struct am65_cpsw_est *est_new;
 841	u64 cur_time, n;
 842	int ret, tact;
 843
 844	if (!netif_running(ndev)) {
 845		NL_SET_ERR_MSG_MOD(extack, "interface is down, link speed unknown");
 846		return -ENETDOWN;
 847	}
 848
 849	if (common->pf_p0_rx_ptype_rrobin) {
 850		NL_SET_ERR_MSG_MOD(extack,
 851				   "p0-rx-ptype-rrobin flag conflicts with taprio qdisc");
 852		return -EINVAL;
 853	}
 854
 855	if (port->qos.link_speed == SPEED_UNKNOWN)
 856		return -ENOLINK;
 857
 858	if (taprio->cycle_time_extension) {
 859		NL_SET_ERR_MSG_MOD(extack,
 860				   "cycle time extension not supported");
 861		return -EOPNOTSUPP;
 862	}
 863
 864	est_new = devm_kzalloc(&ndev->dev,
 865			       struct_size(est_new, taprio.entries, taprio->num_entries),
 866			       GFP_KERNEL);
 867	if (!est_new)
 868		return -ENOMEM;
 869
 870	ret = am65_cpsw_setup_mqprio(ndev, &taprio->mqprio);
 871	if (ret)
 872		return ret;
 873
 874	am65_cpsw_cp_taprio(taprio, &est_new->taprio);
 875
 876	am65_cpsw_est_update_state(ndev);
 877
 878	ret = am65_cpsw_est_check_scheds(ndev, est_new);
 879	if (ret < 0)
 880		goto fail;
 881
 882	tact = am65_cpsw_timer_act(ndev, est_new);
 883	if (tact == TACT_NEED_STOP) {
 884		NL_SET_ERR_MSG_MOD(extack,
 885				   "Can't toggle estf timer, stop taprio first");
 886		ret = -EINVAL;
 887		goto fail;
 888	}
 889
 890	if (tact == TACT_PROG)
 891		am65_cpsw_timer_stop(ndev);
 892
 
 
 
 893	am65_cpsw_port_est_get_buf_num(ndev, est_new);
 894	am65_cpsw_est_set_sched_list(ndev, est_new);
 895	am65_cpsw_port_est_assign_buf_num(ndev, est_new->buf);
 896
 897	/* If the base-time is in the past, start schedule from the time:
 898	 * base_time + (N*cycle_time)
 899	 * where N is the smallest possible integer such that the above
 900	 * time is in the future.
 901	 */
 902	cur_time = am65_cpts_ns_gettime(cpts);
 903	if (est_new->taprio.base_time < cur_time) {
 904		n = div64_u64(cur_time - est_new->taprio.base_time, est_new->taprio.cycle_time);
 905		est_new->taprio.base_time += (n + 1) * est_new->taprio.cycle_time;
 906	}
 907
 908	am65_cpsw_est_set(ndev, 1);
 909
 910	if (tact == TACT_PROG) {
 911		ret = am65_cpsw_timer_set(ndev, est_new);
 912		if (ret) {
 913			NL_SET_ERR_MSG_MOD(extack,
 914					   "Failed to set cycle time");
 915			goto fail;
 916		}
 917	}
 918
 919	devm_kfree(&ndev->dev, port->qos.est_admin);
 920	port->qos.est_admin = est_new;
 921	am65_cpsw_iet_change_preemptible_tcs(port, taprio->mqprio.preemptible_tcs);
 922
 923	return 0;
 924
 925fail:
 926	am65_cpsw_reset_tc_mqprio(ndev);
 927	devm_kfree(&ndev->dev, est_new);
 928	return ret;
 929}
 930
 931static void am65_cpsw_est_link_up(struct net_device *ndev, int link_speed)
 932{
 933	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 934	ktime_t cur_time;
 935	s64 delta;
 936
 937	if (!am65_cpsw_port_est_enabled(port))
 938		return;
 939
 940	if (port->qos.link_down_time) {
 941		cur_time = ktime_get();
 942		delta = ktime_us_delta(cur_time, port->qos.link_down_time);
 943		if (delta > USEC_PER_SEC) {
 944			dev_err(&ndev->dev,
 945				"Link has been lost too long, stopping TAS");
 946			goto purge_est;
 947		}
 948	}
 949
 950	return;
 951
 952purge_est:
 953	am65_cpsw_taprio_destroy(ndev);
 954}
 955
 956static int am65_cpsw_setup_taprio(struct net_device *ndev, void *type_data)
 957{
 958	struct tc_taprio_qopt_offload *taprio = type_data;
 959	int err = 0;
 960
 961	switch (taprio->cmd) {
 962	case TAPRIO_CMD_REPLACE:
 963		err = am65_cpsw_taprio_replace(ndev, taprio);
 964		break;
 965	case TAPRIO_CMD_DESTROY:
 966		am65_cpsw_taprio_destroy(ndev);
 967		break;
 968	default:
 969		err = -EOPNOTSUPP;
 970	}
 971
 972	return err;
 973}
 974
 975static int am65_cpsw_tc_query_caps(struct net_device *ndev, void *type_data)
 976{
 977	struct tc_query_caps_base *base = type_data;
 978
 979	switch (base->type) {
 980	case TC_SETUP_QDISC_MQPRIO: {
 981		struct tc_mqprio_caps *caps = base->caps;
 982
 983		caps->validate_queue_counts = true;
 984
 985		return 0;
 986	}
 987
 988	case TC_SETUP_QDISC_TAPRIO: {
 989		struct tc_taprio_caps *caps = base->caps;
 990
 991		caps->gate_mask_per_txq = true;
 992
 993		return 0;
 994	}
 995	default:
 996		return -EOPNOTSUPP;
 997	}
 998}
 999
1000static int am65_cpsw_qos_clsflower_add_policer(struct am65_cpsw_port *port,
1001					       struct netlink_ext_ack *extack,
1002					       struct flow_cls_offload *cls,
1003					       u64 rate_pkt_ps)
1004{
1005	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
1006	struct flow_dissector *dissector = rule->match.dissector;
1007	static const u8 mc_mac[] = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00};
1008	struct am65_cpsw_qos *qos = &port->qos;
1009	struct flow_match_eth_addrs match;
1010	int ret;
1011
1012	if (dissector->used_keys &
1013	    ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
1014	      BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
1015	      BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
1016		NL_SET_ERR_MSG_MOD(extack,
1017				   "Unsupported keys used");
1018		return -EOPNOTSUPP;
1019	}
1020
1021	if (flow_rule_match_has_control_flags(rule, extack))
1022		return -EOPNOTSUPP;
1023
1024	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1025		NL_SET_ERR_MSG_MOD(extack, "Not matching on eth address");
1026		return -EOPNOTSUPP;
1027	}
1028
1029	flow_rule_match_eth_addrs(rule, &match);
1030
1031	if (!is_zero_ether_addr(match.mask->src)) {
1032		NL_SET_ERR_MSG_MOD(extack,
1033				   "Matching on source MAC not supported");
1034		return -EOPNOTSUPP;
1035	}
1036
1037	if (is_broadcast_ether_addr(match.key->dst) &&
1038	    is_broadcast_ether_addr(match.mask->dst)) {
1039		ret = cpsw_ale_rx_ratelimit_bc(port->common->ale, port->port_id, rate_pkt_ps);
1040		if (ret)
1041			return ret;
1042
1043		qos->ale_bc_ratelimit.cookie = cls->cookie;
1044		qos->ale_bc_ratelimit.rate_packet_ps = rate_pkt_ps;
1045	} else if (ether_addr_equal_unaligned(match.key->dst, mc_mac) &&
1046		   ether_addr_equal_unaligned(match.mask->dst, mc_mac)) {
1047		ret = cpsw_ale_rx_ratelimit_mc(port->common->ale, port->port_id, rate_pkt_ps);
1048		if (ret)
1049			return ret;
1050
1051		qos->ale_mc_ratelimit.cookie = cls->cookie;
1052		qos->ale_mc_ratelimit.rate_packet_ps = rate_pkt_ps;
1053	} else {
1054		NL_SET_ERR_MSG_MOD(extack, "Not supported matching key");
1055		return -EOPNOTSUPP;
1056	}
1057
1058	return 0;
1059}
1060
1061static int am65_cpsw_qos_clsflower_policer_validate(const struct flow_action *action,
1062						    const struct flow_action_entry *act,
1063						    struct netlink_ext_ack *extack)
1064{
1065	if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
1066		NL_SET_ERR_MSG_MOD(extack,
1067				   "Offload not supported when exceed action is not drop");
1068		return -EOPNOTSUPP;
1069	}
1070
1071	if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
1072	    act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
1073		NL_SET_ERR_MSG_MOD(extack,
1074				   "Offload not supported when conform action is not pipe or ok");
1075		return -EOPNOTSUPP;
1076	}
1077
1078	if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
1079	    !flow_action_is_last_entry(action, act)) {
1080		NL_SET_ERR_MSG_MOD(extack,
1081				   "Offload not supported when conform action is ok, but action is not last");
1082		return -EOPNOTSUPP;
1083	}
1084
1085	if (act->police.rate_bytes_ps || act->police.peakrate_bytes_ps ||
1086	    act->police.avrate || act->police.overhead) {
1087		NL_SET_ERR_MSG_MOD(extack,
1088				   "Offload not supported when bytes per second/peakrate/avrate/overhead is configured");
1089		return -EOPNOTSUPP;
1090	}
1091
1092	return 0;
1093}
1094
1095static int am65_cpsw_qos_configure_clsflower(struct am65_cpsw_port *port,
1096					     struct flow_cls_offload *cls)
1097{
1098	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
1099	struct netlink_ext_ack *extack = cls->common.extack;
1100	const struct flow_action_entry *act;
1101	int i, ret;
1102
1103	flow_action_for_each(i, act, &rule->action) {
1104		switch (act->id) {
1105		case FLOW_ACTION_POLICE:
1106			ret = am65_cpsw_qos_clsflower_policer_validate(&rule->action, act, extack);
1107			if (ret)
1108				return ret;
1109
1110			return am65_cpsw_qos_clsflower_add_policer(port, extack, cls,
1111								   act->police.rate_pkt_ps);
1112		default:
1113			NL_SET_ERR_MSG_MOD(extack,
1114					   "Action not supported");
1115			return -EOPNOTSUPP;
1116		}
1117	}
1118	return -EOPNOTSUPP;
1119}
1120
1121static int am65_cpsw_qos_delete_clsflower(struct am65_cpsw_port *port, struct flow_cls_offload *cls)
1122{
1123	struct am65_cpsw_qos *qos = &port->qos;
1124
1125	if (cls->cookie == qos->ale_bc_ratelimit.cookie) {
1126		qos->ale_bc_ratelimit.cookie = 0;
1127		qos->ale_bc_ratelimit.rate_packet_ps = 0;
1128		cpsw_ale_rx_ratelimit_bc(port->common->ale, port->port_id, 0);
1129	}
1130
1131	if (cls->cookie == qos->ale_mc_ratelimit.cookie) {
1132		qos->ale_mc_ratelimit.cookie = 0;
1133		qos->ale_mc_ratelimit.rate_packet_ps = 0;
1134		cpsw_ale_rx_ratelimit_mc(port->common->ale, port->port_id, 0);
1135	}
1136
1137	return 0;
1138}
1139
1140static int am65_cpsw_qos_setup_tc_clsflower(struct am65_cpsw_port *port,
1141					    struct flow_cls_offload *cls_flower)
1142{
1143	switch (cls_flower->command) {
1144	case FLOW_CLS_REPLACE:
1145		return am65_cpsw_qos_configure_clsflower(port, cls_flower);
1146	case FLOW_CLS_DESTROY:
1147		return am65_cpsw_qos_delete_clsflower(port, cls_flower);
1148	default:
1149		return -EOPNOTSUPP;
1150	}
1151}
1152
1153static int am65_cpsw_qos_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
1154{
1155	struct am65_cpsw_port *port = cb_priv;
1156
1157	if (!tc_cls_can_offload_and_chain0(port->ndev, type_data))
1158		return -EOPNOTSUPP;
1159
1160	switch (type) {
1161	case TC_SETUP_CLSFLOWER:
1162		return am65_cpsw_qos_setup_tc_clsflower(port, type_data);
1163	default:
1164		return -EOPNOTSUPP;
1165	}
1166}
1167
1168static LIST_HEAD(am65_cpsw_qos_block_cb_list);
1169
1170static int am65_cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f)
1171{
1172	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1173
1174	return flow_block_cb_setup_simple(f, &am65_cpsw_qos_block_cb_list,
1175					  am65_cpsw_qos_setup_tc_block_cb,
1176					  port, port, true);
1177}
1178
1179static void
1180am65_cpsw_qos_tx_p0_rate_apply(struct am65_cpsw_common *common,
1181			       int tx_ch, u32 rate_mbps)
1182{
1183	struct am65_cpsw_host *host = am65_common_get_host(common);
1184	u32 ch_cir;
1185	int i;
1186
1187	ch_cir = am65_cpsw_qos_tx_rate_calc(rate_mbps, common->bus_freq);
1188	writel(ch_cir, host->port_base + AM65_CPSW_PN_REG_PRI_CIR(tx_ch));
1189
1190	/* update rates for every port tx queues */
1191	for (i = 0; i < common->port_num; i++) {
1192		struct net_device *ndev = common->ports[i].ndev;
1193
1194		if (!ndev)
1195			continue;
1196		netdev_get_tx_queue(ndev, tx_ch)->tx_maxrate = rate_mbps;
1197	}
1198}
1199
1200int am65_cpsw_qos_ndo_tx_p0_set_maxrate(struct net_device *ndev,
1201					int queue, u32 rate_mbps)
1202{
1203	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1204	struct am65_cpsw_common *common = port->common;
1205	struct am65_cpsw_tx_chn *tx_chn;
1206	u32 ch_rate, tx_ch_rate_msk_new;
1207	u32 ch_msk = 0;
1208	int ret;
1209
1210	dev_dbg(common->dev, "apply TX%d rate limiting %uMbps tx_rate_msk%x\n",
1211		queue, rate_mbps, common->tx_ch_rate_msk);
1212
1213	if (common->pf_p0_rx_ptype_rrobin) {
1214		dev_err(common->dev, "TX Rate Limiting failed - rrobin mode\n");
1215		return -EINVAL;
1216	}
1217
1218	ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate;
1219	if (ch_rate == rate_mbps)
1220		return 0;
1221
1222	ret = pm_runtime_get_sync(common->dev);
1223	if (ret < 0) {
1224		pm_runtime_put_noidle(common->dev);
1225		return ret;
1226	}
1227	ret = 0;
1228
1229	tx_ch_rate_msk_new = common->tx_ch_rate_msk;
1230	if (rate_mbps && !(tx_ch_rate_msk_new & BIT(queue))) {
1231		tx_ch_rate_msk_new |= BIT(queue);
1232		ch_msk = GENMASK(common->tx_ch_num - 1, queue);
1233		ch_msk = tx_ch_rate_msk_new ^ ch_msk;
1234	} else if (!rate_mbps) {
1235		tx_ch_rate_msk_new &= ~BIT(queue);
1236		ch_msk = queue ? GENMASK(queue - 1, 0) : 0;
1237		ch_msk = tx_ch_rate_msk_new & ch_msk;
1238	}
1239
1240	if (ch_msk) {
1241		dev_err(common->dev, "TX rate limiting has to be enabled sequentially hi->lo tx_rate_msk:%x tx_rate_msk_new:%x\n",
1242			common->tx_ch_rate_msk, tx_ch_rate_msk_new);
1243		ret = -EINVAL;
1244		goto exit_put;
1245	}
1246
1247	tx_chn = &common->tx_chns[queue];
1248	tx_chn->rate_mbps = rate_mbps;
1249	common->tx_ch_rate_msk = tx_ch_rate_msk_new;
1250
1251	if (!common->usage_count)
1252		/* will be applied on next netif up */
1253		goto exit_put;
1254
1255	am65_cpsw_qos_tx_p0_rate_apply(common, queue, rate_mbps);
1256
1257exit_put:
1258	pm_runtime_put(common->dev);
1259	return ret;
1260}
1261
1262void am65_cpsw_qos_tx_p0_rate_init(struct am65_cpsw_common *common)
1263{
1264	struct am65_cpsw_host *host = am65_common_get_host(common);
1265	int tx_ch;
1266
1267	for (tx_ch = 0; tx_ch < common->tx_ch_num; tx_ch++) {
1268		struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[tx_ch];
1269		u32 ch_cir;
1270
1271		if (!tx_chn->rate_mbps)
1272			continue;
1273
1274		ch_cir = am65_cpsw_qos_tx_rate_calc(tx_chn->rate_mbps,
1275						    common->bus_freq);
1276		writel(ch_cir,
1277		       host->port_base + AM65_CPSW_PN_REG_PRI_CIR(tx_ch));
1278	}
1279}
1280
1281int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
1282			       void *type_data)
1283{
1284	switch (type) {
1285	case TC_QUERY_CAPS:
1286		return am65_cpsw_tc_query_caps(ndev, type_data);
1287	case TC_SETUP_QDISC_TAPRIO:
1288		return am65_cpsw_setup_taprio(ndev, type_data);
1289	case TC_SETUP_QDISC_MQPRIO:
1290		return am65_cpsw_setup_mqprio(ndev, type_data);
1291	case TC_SETUP_BLOCK:
1292		return am65_cpsw_qos_setup_tc_block(ndev, type_data);
1293	default:
1294		return -EOPNOTSUPP;
1295	}
1296}
1297
1298void am65_cpsw_qos_link_up(struct net_device *ndev, int link_speed)
1299{
1300	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1301
1302	port->qos.link_speed = link_speed;
1303	am65_cpsw_tx_pn_shaper_apply(port);
1304	am65_cpsw_iet_link_state_update(ndev);
1305
1306	am65_cpsw_est_link_up(ndev, link_speed);
1307	port->qos.link_down_time = 0;
1308}
1309
1310void am65_cpsw_qos_link_down(struct net_device *ndev)
1311{
1312	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1313
1314	port->qos.link_speed = SPEED_UNKNOWN;
1315	am65_cpsw_tx_pn_shaper_apply(port);
1316	am65_cpsw_iet_link_state_update(ndev);
1317
1318	if (!port->qos.link_down_time)
1319		port->qos.link_down_time = ktime_get();
1320}
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/* Texas Instruments K3 AM65 Ethernet QoS submodule
   3 * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
   4 *
   5 * quality of service module includes:
   6 * Enhanced Scheduler Traffic (EST - P802.1Qbv/D2.2)
   7 * Interspersed Express Traffic (IET - P802.3br/D2.0)
   8 */
   9
  10#include <linux/pm_runtime.h>
  11#include <linux/math.h>
 
  12#include <linux/time.h>
  13#include <linux/units.h>
  14#include <net/pkt_cls.h>
  15
  16#include "am65-cpsw-nuss.h"
  17#include "am65-cpsw-qos.h"
  18#include "am65-cpts.h"
  19#include "cpsw_ale.h"
  20
  21#define TO_MBPS(x)	DIV_ROUND_UP((x), BYTES_PER_MBIT)
  22
  23enum timer_act {
  24	TACT_PROG,		/* need program timer */
  25	TACT_NEED_STOP,		/* need stop first */
  26	TACT_SKIP_PROG,		/* just buffer can be updated */
  27};
  28
  29static void am65_cpsw_iet_change_preemptible_tcs(struct am65_cpsw_port *port, u8 preemptible_tcs);
  30
  31static u32
  32am65_cpsw_qos_tx_rate_calc(u32 rate_mbps, unsigned long bus_freq)
  33{
  34	u32 ir;
  35
  36	bus_freq /= 1000000;
  37	ir = DIV_ROUND_UP(((u64)rate_mbps * 32768),  bus_freq);
  38	return ir;
  39}
  40
  41static void am65_cpsw_tx_pn_shaper_reset(struct am65_cpsw_port *port)
  42{
  43	int prio;
  44
  45	for (prio = 0; prio < AM65_CPSW_PN_FIFO_PRIO_NUM; prio++) {
  46		writel(0, port->port_base + AM65_CPSW_PN_REG_PRI_CIR(prio));
  47		writel(0, port->port_base + AM65_CPSW_PN_REG_PRI_EIR(prio));
  48	}
  49}
  50
  51static void am65_cpsw_tx_pn_shaper_apply(struct am65_cpsw_port *port)
  52{
  53	struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio;
  54	struct am65_cpsw_common *common = port->common;
  55	struct tc_mqprio_qopt_offload *mqprio;
  56	bool enable, shaper_susp = false;
  57	u32 rate_mbps;
  58	int tc, prio;
  59
  60	mqprio = &p_mqprio->mqprio_hw;
  61	/* takes care of no link case as well */
  62	if (p_mqprio->max_rate_total > port->qos.link_speed)
  63		shaper_susp = true;
  64
  65	am65_cpsw_tx_pn_shaper_reset(port);
  66
  67	enable = p_mqprio->shaper_en && !shaper_susp;
  68	if (!enable)
  69		return;
  70
  71	/* Rate limit is specified per Traffic Class but
  72	 * for CPSW, rate limit can be applied per priority
  73	 * at port FIFO.
  74	 *
  75	 * We have assigned the same priority (TCn) to all queues
  76	 * of a Traffic Class so they share the same shaper
  77	 * bandwidth.
  78	 */
  79	for (tc = 0; tc < mqprio->qopt.num_tc; tc++) {
  80		prio = tc;
  81
  82		rate_mbps = TO_MBPS(mqprio->min_rate[tc]);
  83		rate_mbps = am65_cpsw_qos_tx_rate_calc(rate_mbps,
  84						       common->bus_freq);
  85		writel(rate_mbps,
  86		       port->port_base + AM65_CPSW_PN_REG_PRI_CIR(prio));
  87
  88		rate_mbps = 0;
  89
  90		if (mqprio->max_rate[tc]) {
  91			rate_mbps = mqprio->max_rate[tc] - mqprio->min_rate[tc];
  92			rate_mbps = TO_MBPS(rate_mbps);
  93			rate_mbps = am65_cpsw_qos_tx_rate_calc(rate_mbps,
  94							       common->bus_freq);
  95		}
  96
  97		writel(rate_mbps,
  98		       port->port_base + AM65_CPSW_PN_REG_PRI_EIR(prio));
  99	}
 100}
 101
 102static int am65_cpsw_mqprio_verify_shaper(struct am65_cpsw_port *port,
 103					  struct tc_mqprio_qopt_offload *mqprio)
 104{
 105	struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio;
 106	struct netlink_ext_ack *extack = mqprio->extack;
 107	u64 min_rate_total = 0, max_rate_total = 0;
 108	u32 min_rate_msk = 0, max_rate_msk = 0;
 109	bool has_min_rate, has_max_rate;
 110	int num_tc, i;
 111
 112	if (!(mqprio->flags & TC_MQPRIO_F_SHAPER))
 113		return 0;
 114
 115	if (mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE)
 116		return 0;
 117
 118	has_min_rate = !!(mqprio->flags & TC_MQPRIO_F_MIN_RATE);
 119	has_max_rate = !!(mqprio->flags & TC_MQPRIO_F_MAX_RATE);
 120
 121	if (!has_min_rate && has_max_rate) {
 122		NL_SET_ERR_MSG_MOD(extack, "min_rate is required with max_rate");
 123		return -EOPNOTSUPP;
 124	}
 125
 126	if (!has_min_rate)
 127		return 0;
 128
 129	num_tc = mqprio->qopt.num_tc;
 130
 131	for (i = num_tc - 1; i >= 0; i--) {
 132		u32 ch_msk;
 133
 134		if (mqprio->min_rate[i])
 135			min_rate_msk |= BIT(i);
 136		min_rate_total +=  mqprio->min_rate[i];
 137
 138		if (has_max_rate) {
 139			if (mqprio->max_rate[i])
 140				max_rate_msk |= BIT(i);
 141			max_rate_total +=  mqprio->max_rate[i];
 142
 143			if (!mqprio->min_rate[i] && mqprio->max_rate[i]) {
 144				NL_SET_ERR_MSG_FMT_MOD(extack,
 145						       "TX tc%d rate max>0 but min=0",
 146						       i);
 147				return -EINVAL;
 148			}
 149
 150			if (mqprio->max_rate[i] &&
 151			    mqprio->max_rate[i] < mqprio->min_rate[i]) {
 152				NL_SET_ERR_MSG_FMT_MOD(extack,
 153						       "TX tc%d rate min(%llu)>max(%llu)",
 154						       i, mqprio->min_rate[i],
 155						       mqprio->max_rate[i]);
 156				return -EINVAL;
 157			}
 158		}
 159
 160		ch_msk = GENMASK(num_tc - 1, i);
 161		if ((min_rate_msk & BIT(i)) && (min_rate_msk ^ ch_msk)) {
 162			NL_SET_ERR_MSG_FMT_MOD(extack,
 163					       "Min rate must be set sequentially hi->lo tx_rate_msk%x",
 164					       min_rate_msk);
 165			return -EINVAL;
 166		}
 167
 168		if ((max_rate_msk & BIT(i)) && (max_rate_msk ^ ch_msk)) {
 169			NL_SET_ERR_MSG_FMT_MOD(extack,
 170					       "Max rate must be set sequentially hi->lo tx_rate_msk%x",
 171					       max_rate_msk);
 172			return -EINVAL;
 173		}
 174	}
 175
 176	min_rate_total = TO_MBPS(min_rate_total);
 177	max_rate_total = TO_MBPS(max_rate_total);
 178
 179	p_mqprio->shaper_en = true;
 180	p_mqprio->max_rate_total = max_t(u64, min_rate_total, max_rate_total);
 181
 182	return 0;
 183}
 184
 185static void am65_cpsw_reset_tc_mqprio(struct net_device *ndev)
 186{
 187	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 188	struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio;
 189
 190	p_mqprio->shaper_en = false;
 191	p_mqprio->max_rate_total = 0;
 192
 193	am65_cpsw_tx_pn_shaper_reset(port);
 194	netdev_reset_tc(ndev);
 195
 196	/* Reset all Queue priorities to 0 */
 197	writel(0, port->port_base + AM65_CPSW_PN_REG_TX_PRI_MAP);
 198
 199	am65_cpsw_iet_change_preemptible_tcs(port, 0);
 200}
 201
 202static int am65_cpsw_setup_mqprio(struct net_device *ndev, void *type_data)
 203{
 204	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 205	struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio;
 206	struct tc_mqprio_qopt_offload *mqprio = type_data;
 207	struct am65_cpsw_common *common = port->common;
 208	struct tc_mqprio_qopt *qopt = &mqprio->qopt;
 209	int i, tc, offset, count, prio, ret;
 210	u8 num_tc = qopt->num_tc;
 211	u32 tx_prio_map = 0;
 212
 213	memcpy(&p_mqprio->mqprio_hw, mqprio, sizeof(*mqprio));
 214
 215	ret = pm_runtime_get_sync(common->dev);
 216	if (ret < 0) {
 217		pm_runtime_put_noidle(common->dev);
 218		return ret;
 219	}
 220
 221	if (!num_tc) {
 222		am65_cpsw_reset_tc_mqprio(ndev);
 223		ret = 0;
 224		goto exit_put;
 225	}
 226
 227	ret = am65_cpsw_mqprio_verify_shaper(port, mqprio);
 228	if (ret)
 229		goto exit_put;
 230
 231	netdev_set_num_tc(ndev, num_tc);
 232
 233	/* Multiple Linux priorities can map to a Traffic Class
 234	 * A Traffic Class can have multiple contiguous Queues,
 235	 * Queues get mapped to Channels (thread_id),
 236	 *	if not VLAN tagged, thread_id is used as packet_priority
 237	 *	if VLAN tagged. VLAN priority is used as packet_priority
 238	 * packet_priority gets mapped to header_priority in p0_rx_pri_map,
 239	 * header_priority gets mapped to switch_priority in pn_tx_pri_map.
 240	 * As p0_rx_pri_map is left at defaults (0x76543210), we can
 241	 * assume that Queue_n gets mapped to header_priority_n. We can then
 242	 * set the switch priority in pn_tx_pri_map.
 243	 */
 244
 245	for (tc = 0; tc < num_tc; tc++) {
 246		prio = tc;
 247
 248		/* For simplicity we assign the same priority (TCn) to
 249		 * all queues of a Traffic Class.
 250		 */
 251		for (i = qopt->offset[tc]; i < qopt->offset[tc] + qopt->count[tc]; i++)
 252			tx_prio_map |= prio << (4 * i);
 253
 254		count = qopt->count[tc];
 255		offset = qopt->offset[tc];
 256		netdev_set_tc_queue(ndev, tc, count, offset);
 257	}
 258
 259	writel(tx_prio_map, port->port_base + AM65_CPSW_PN_REG_TX_PRI_MAP);
 260
 261	am65_cpsw_tx_pn_shaper_apply(port);
 262	am65_cpsw_iet_change_preemptible_tcs(port, mqprio->preemptible_tcs);
 263
 264exit_put:
 265	pm_runtime_put(common->dev);
 266
 267	return ret;
 268}
 269
 270static int am65_cpsw_iet_set_verify_timeout_count(struct am65_cpsw_port *port)
 271{
 272	int verify_time_ms = port->qos.iet.verify_time_ms;
 273	u32 val;
 274
 275	/* The number of wireside clocks contained in the verify
 276	 * timeout counter. The default is 0x1312d0
 277	 * (10ms at 125Mhz in 1G mode).
 278	 */
 279	val = 125 * HZ_PER_MHZ;	/* assuming 125MHz wireside clock */
 280
 281	val /= MILLIHZ_PER_HZ;		/* count per ms timeout */
 282	val *= verify_time_ms;		/* count for timeout ms */
 283
 284	if (val > AM65_CPSW_PN_MAC_VERIFY_CNT_MASK)
 285		return -EINVAL;
 286
 287	writel(val, port->port_base + AM65_CPSW_PN_REG_IET_VERIFY);
 288
 289	return 0;
 290}
 291
 292static int am65_cpsw_iet_verify_wait(struct am65_cpsw_port *port)
 293{
 294	u32 ctrl, status;
 295	int try;
 296
 297	try = 20;
 298	do {
 299		/* Reset the verify state machine by writing 1
 300		 * to LINKFAIL
 301		 */
 302		ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
 303		ctrl |= AM65_CPSW_PN_IET_MAC_LINKFAIL;
 304		writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
 305
 306		/* Clear MAC_LINKFAIL bit to start Verify. */
 307		ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
 308		ctrl &= ~AM65_CPSW_PN_IET_MAC_LINKFAIL;
 309		writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
 310
 311		msleep(port->qos.iet.verify_time_ms);
 312
 313		status = readl(port->port_base + AM65_CPSW_PN_REG_IET_STATUS);
 314		if (status & AM65_CPSW_PN_MAC_VERIFIED)
 315			return 0;
 316
 317		if (status & AM65_CPSW_PN_MAC_VERIFY_FAIL) {
 318			netdev_dbg(port->ndev,
 319				   "MAC Merge verify failed, trying again\n");
 320			continue;
 321		}
 322
 323		if (status & AM65_CPSW_PN_MAC_RESPOND_ERR) {
 324			netdev_dbg(port->ndev, "MAC Merge respond error\n");
 325			return -ENODEV;
 326		}
 327
 328		if (status & AM65_CPSW_PN_MAC_VERIFY_ERR) {
 329			netdev_dbg(port->ndev, "MAC Merge verify error\n");
 330			return -ENODEV;
 331		}
 332	} while (try-- > 0);
 333
 334	netdev_dbg(port->ndev, "MAC Merge verify timeout\n");
 335	return -ETIMEDOUT;
 336}
 337
 338static void am65_cpsw_iet_set_preempt_mask(struct am65_cpsw_port *port, u8 preemptible_tcs)
 339{
 340	u32 val;
 341
 342	val = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
 343	val &= ~AM65_CPSW_PN_IET_MAC_PREMPT_MASK;
 344	val |= AM65_CPSW_PN_IET_MAC_SET_PREEMPT(preemptible_tcs);
 345	writel(val, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
 346}
 347
 348/* enable common IET_ENABLE only if at least 1 port has rx IET enabled.
 349 * UAPI doesn't allow tx enable without rx enable.
 350 */
 351void am65_cpsw_iet_common_enable(struct am65_cpsw_common *common)
 352{
 353	struct am65_cpsw_port *port;
 354	bool rx_enable = false;
 355	u32 val;
 356	int i;
 357
 358	for (i = 0; i < common->port_num; i++) {
 359		port = &common->ports[i];
 360		val = readl(port->port_base + AM65_CPSW_PN_REG_CTL);
 361		rx_enable = !!(val & AM65_CPSW_PN_CTL_IET_PORT_EN);
 362		if (rx_enable)
 363			break;
 364	}
 365
 366	val = readl(common->cpsw_base + AM65_CPSW_REG_CTL);
 367
 368	if (rx_enable)
 369		val |= AM65_CPSW_CTL_IET_EN;
 370	else
 371		val &= ~AM65_CPSW_CTL_IET_EN;
 372
 373	writel(val, common->cpsw_base + AM65_CPSW_REG_CTL);
 374	common->iet_enabled = rx_enable;
 375}
 376
 377/* CPSW does not have an IRQ to notify changes to the MAC Merge TX status
 378 * (active/inactive), but the preemptible traffic classes should only be
 379 * committed to hardware once TX is active. Resort to polling.
 380 */
 381void am65_cpsw_iet_commit_preemptible_tcs(struct am65_cpsw_port *port)
 382{
 383	u8 preemptible_tcs;
 384	int err;
 385	u32 val;
 386
 387	if (port->qos.link_speed == SPEED_UNKNOWN)
 388		return;
 389
 390	val = readl(port->port_base + AM65_CPSW_PN_REG_CTL);
 391	if (!(val & AM65_CPSW_PN_CTL_IET_PORT_EN))
 392		return;
 393
 394	/* update common IET enable */
 395	am65_cpsw_iet_common_enable(port->common);
 396
 397	/* update verify count */
 398	err = am65_cpsw_iet_set_verify_timeout_count(port);
 399	if (err) {
 400		netdev_err(port->ndev, "couldn't set verify count: %d\n", err);
 401		return;
 402	}
 403
 404	val = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
 405	if (!(val & AM65_CPSW_PN_IET_MAC_DISABLEVERIFY)) {
 406		err = am65_cpsw_iet_verify_wait(port);
 407		if (err)
 408			return;
 409	}
 410
 411	preemptible_tcs = port->qos.iet.preemptible_tcs;
 412	am65_cpsw_iet_set_preempt_mask(port, preemptible_tcs);
 413}
 414
 415static void am65_cpsw_iet_change_preemptible_tcs(struct am65_cpsw_port *port, u8 preemptible_tcs)
 416{
 417	struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(port->ndev);
 418
 419	port->qos.iet.preemptible_tcs = preemptible_tcs;
 420	mutex_lock(&priv->mm_lock);
 421	am65_cpsw_iet_commit_preemptible_tcs(port);
 422	mutex_unlock(&priv->mm_lock);
 423}
 424
 425static void am65_cpsw_iet_link_state_update(struct net_device *ndev)
 426{
 427	struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev);
 428	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 429
 430	mutex_lock(&priv->mm_lock);
 431	am65_cpsw_iet_commit_preemptible_tcs(port);
 432	mutex_unlock(&priv->mm_lock);
 433}
 434
 435static int am65_cpsw_port_est_enabled(struct am65_cpsw_port *port)
 436{
 437	return port->qos.est_oper || port->qos.est_admin;
 438}
 439
 440static void am65_cpsw_est_enable(struct am65_cpsw_common *common, int enable)
 441{
 442	u32 val;
 443
 444	val = readl(common->cpsw_base + AM65_CPSW_REG_CTL);
 445
 446	if (enable)
 447		val |= AM65_CPSW_CTL_EST_EN;
 448	else
 449		val &= ~AM65_CPSW_CTL_EST_EN;
 450
 451	writel(val, common->cpsw_base + AM65_CPSW_REG_CTL);
 452	common->est_enabled = enable;
 453}
 454
 455static void am65_cpsw_port_est_enable(struct am65_cpsw_port *port, int enable)
 456{
 457	u32 val;
 458
 459	val = readl(port->port_base + AM65_CPSW_PN_REG_CTL);
 460	if (enable)
 461		val |= AM65_CPSW_PN_CTL_EST_PORT_EN;
 462	else
 463		val &= ~AM65_CPSW_PN_CTL_EST_PORT_EN;
 464
 465	writel(val, port->port_base + AM65_CPSW_PN_REG_CTL);
 466}
 467
 468/* target new EST RAM buffer, actual toggle happens after cycle completion */
 469static void am65_cpsw_port_est_assign_buf_num(struct net_device *ndev,
 470					      int buf_num)
 471{
 472	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 473	u32 val;
 474
 475	val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL);
 476	if (buf_num)
 477		val |= AM65_CPSW_PN_EST_BUFSEL;
 478	else
 479		val &= ~AM65_CPSW_PN_EST_BUFSEL;
 480
 481	writel(val, port->port_base + AM65_CPSW_PN_REG_EST_CTL);
 482}
 483
 484/* am65_cpsw_port_est_is_swapped() - Indicate if h/w is transitioned
 485 * admin -> oper or not
 486 *
 487 * Return true if already transitioned. i.e oper is equal to admin and buf
 488 * numbers match (est_oper->buf match with est_admin->buf).
 489 * false if before transition. i.e oper is not equal to admin, (i.e a
 490 * previous admin command is waiting to be transitioned to oper state
 491 * and est_oper->buf not match with est_oper->buf).
 492 */
 493static int am65_cpsw_port_est_is_swapped(struct net_device *ndev, int *oper,
 494					 int *admin)
 495{
 496	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 497	u32 val;
 498
 499	val = readl(port->port_base + AM65_CPSW_PN_REG_FIFO_STATUS);
 500	*oper = !!(val & AM65_CPSW_PN_FST_EST_BUFACT);
 501
 502	val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL);
 503	*admin = !!(val & AM65_CPSW_PN_EST_BUFSEL);
 504
 505	return *admin == *oper;
 506}
 507
 508/* am65_cpsw_port_est_get_free_buf_num() - Get free buffer number for
 509 * Admin to program the new schedule.
 510 *
 511 * Logic as follows:-
 512 * If oper is same as admin, return the other buffer (!oper) as the admin
 513 * buffer.  If oper is not the same, driver let the current oper to continue
 514 * as it is in the process of transitioning from admin -> oper. So keep the
 515 * oper by selecting the same oper buffer by writing to EST_BUFSEL bit in
 516 * EST CTL register. In the second iteration they will match and code returns.
 517 * The actual buffer to write command is selected later before it is ready
 518 * to update the schedule.
 519 */
 520static int am65_cpsw_port_est_get_free_buf_num(struct net_device *ndev)
 521{
 522	int oper, admin;
 523	int roll = 2;
 524
 525	while (roll--) {
 526		if (am65_cpsw_port_est_is_swapped(ndev, &oper, &admin))
 527			return !oper;
 528
 529		/* admin is not set, so hinder transition as it's not allowed
 530		 * to touch memory in-flight, by targeting same oper buf.
 531		 */
 532		am65_cpsw_port_est_assign_buf_num(ndev, oper);
 533
 534		dev_info(&ndev->dev,
 535			 "Prev. EST admin cycle is in transit %d -> %d\n",
 536			 oper, admin);
 537	}
 538
 539	return admin;
 540}
 541
 542static void am65_cpsw_admin_to_oper(struct net_device *ndev)
 543{
 544	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 545
 546	devm_kfree(&ndev->dev, port->qos.est_oper);
 547
 548	port->qos.est_oper = port->qos.est_admin;
 549	port->qos.est_admin = NULL;
 550}
 551
 552static void am65_cpsw_port_est_get_buf_num(struct net_device *ndev,
 553					   struct am65_cpsw_est *est_new)
 554{
 555	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 556	u32 val;
 557
 558	val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL);
 559	val &= ~AM65_CPSW_PN_EST_ONEBUF;
 560	writel(val, port->port_base + AM65_CPSW_PN_REG_EST_CTL);
 561
 562	est_new->buf = am65_cpsw_port_est_get_free_buf_num(ndev);
 563
 564	/* rolled buf num means changed buf while configuring */
 565	if (port->qos.est_oper && port->qos.est_admin &&
 566	    est_new->buf == port->qos.est_oper->buf)
 567		am65_cpsw_admin_to_oper(ndev);
 568}
 569
 570static void am65_cpsw_est_set(struct net_device *ndev, int enable)
 571{
 572	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 573	struct am65_cpsw_common *common = port->common;
 574	int common_enable = 0;
 575	int i;
 576
 577	am65_cpsw_port_est_enable(port, enable);
 578
 579	for (i = 0; i < common->port_num; i++)
 580		common_enable |= am65_cpsw_port_est_enabled(&common->ports[i]);
 581
 582	common_enable |= enable;
 583	am65_cpsw_est_enable(common, common_enable);
 584}
 585
 586/* This update is supposed to be used in any routine before getting real state
 587 * of admin -> oper transition, particularly it's supposed to be used in some
 588 * generic routine for providing real state to Taprio Qdisc.
 589 */
 590static void am65_cpsw_est_update_state(struct net_device *ndev)
 591{
 592	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 593	int oper, admin;
 594
 595	if (!port->qos.est_admin)
 596		return;
 597
 598	if (!am65_cpsw_port_est_is_swapped(ndev, &oper, &admin))
 599		return;
 600
 601	am65_cpsw_admin_to_oper(ndev);
 602}
 603
 604/* Fetch command count it's number of bytes in Gigabit mode or nibbles in
 605 * 10/100Mb mode. So, having speed and time in ns, recalculate ns to number of
 606 * bytes/nibbles that can be sent while transmission on given speed.
 607 */
 608static int am65_est_cmd_ns_to_cnt(u64 ns, int link_speed)
 609{
 610	u64 temp;
 611
 612	temp = ns * link_speed;
 613	if (link_speed < SPEED_1000)
 614		temp <<= 1;
 615
 616	return DIV_ROUND_UP(temp, 8 * 1000);
 617}
 618
 619static void __iomem *am65_cpsw_est_set_sched_cmds(void __iomem *addr,
 620						  int fetch_cnt,
 621						  int fetch_allow)
 622{
 623	u32 prio_mask, cmd_fetch_cnt, cmd;
 624
 625	do {
 626		if (fetch_cnt > AM65_CPSW_FETCH_CNT_MAX) {
 627			fetch_cnt -= AM65_CPSW_FETCH_CNT_MAX;
 628			cmd_fetch_cnt = AM65_CPSW_FETCH_CNT_MAX;
 629		} else {
 630			cmd_fetch_cnt = fetch_cnt;
 631			/* fetch count can't be less than 16? */
 632			if (cmd_fetch_cnt && cmd_fetch_cnt < 16)
 633				cmd_fetch_cnt = 16;
 634
 635			fetch_cnt = 0;
 636		}
 637
 638		prio_mask = fetch_allow & AM65_CPSW_FETCH_ALLOW_MSK;
 639		cmd = (cmd_fetch_cnt << AM65_CPSW_FETCH_CNT_OFFSET) | prio_mask;
 640
 641		writel(cmd, addr);
 642		addr += 4;
 643	} while (fetch_cnt);
 644
 645	return addr;
 646}
 647
 648static int am65_cpsw_est_calc_cmd_num(struct net_device *ndev,
 649				      struct tc_taprio_qopt_offload *taprio,
 650				      int link_speed)
 651{
 652	int i, cmd_cnt, cmd_sum = 0;
 653	u32 fetch_cnt;
 654
 655	for (i = 0; i < taprio->num_entries; i++) {
 656		if (taprio->entries[i].command != TC_TAPRIO_CMD_SET_GATES) {
 657			dev_err(&ndev->dev, "Only SET command is supported");
 658			return -EINVAL;
 659		}
 660
 661		fetch_cnt = am65_est_cmd_ns_to_cnt(taprio->entries[i].interval,
 662						   link_speed);
 663
 664		cmd_cnt = DIV_ROUND_UP(fetch_cnt, AM65_CPSW_FETCH_CNT_MAX);
 665		if (!cmd_cnt)
 666			cmd_cnt++;
 667
 668		cmd_sum += cmd_cnt;
 669
 670		if (!fetch_cnt)
 671			break;
 672	}
 673
 674	return cmd_sum;
 675}
 676
 677static int am65_cpsw_est_check_scheds(struct net_device *ndev,
 678				      struct am65_cpsw_est *est_new)
 679{
 680	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 681	int cmd_num;
 682
 683	cmd_num = am65_cpsw_est_calc_cmd_num(ndev, &est_new->taprio,
 684					     port->qos.link_speed);
 685	if (cmd_num < 0)
 686		return cmd_num;
 687
 688	if (cmd_num > AM65_CPSW_FETCH_RAM_CMD_NUM / 2) {
 689		dev_err(&ndev->dev, "No fetch RAM");
 690		return -ENOMEM;
 691	}
 692
 693	return 0;
 694}
 695
 696static void am65_cpsw_est_set_sched_list(struct net_device *ndev,
 697					 struct am65_cpsw_est *est_new)
 698{
 699	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 700	u32 fetch_cnt, fetch_allow, all_fetch_allow = 0;
 701	void __iomem *ram_addr, *max_ram_addr;
 702	struct tc_taprio_sched_entry *entry;
 703	int i, ram_size;
 704
 705	ram_addr = port->fetch_ram_base;
 706	ram_size = AM65_CPSW_FETCH_RAM_CMD_NUM * 2;
 707	ram_addr += est_new->buf * ram_size;
 708
 709	max_ram_addr = ram_size + ram_addr;
 710	for (i = 0; i < est_new->taprio.num_entries; i++) {
 711		entry = &est_new->taprio.entries[i];
 712
 713		fetch_cnt = am65_est_cmd_ns_to_cnt(entry->interval,
 714						   port->qos.link_speed);
 715		fetch_allow = entry->gate_mask;
 716		if (fetch_allow > AM65_CPSW_FETCH_ALLOW_MAX)
 717			dev_dbg(&ndev->dev, "fetch_allow > 8 bits: %d\n",
 718				fetch_allow);
 719
 720		ram_addr = am65_cpsw_est_set_sched_cmds(ram_addr, fetch_cnt,
 721							fetch_allow);
 722
 723		if (!fetch_cnt && i < est_new->taprio.num_entries - 1) {
 724			dev_info(&ndev->dev,
 725				 "next scheds after %d have no impact", i + 1);
 726			break;
 727		}
 728
 729		all_fetch_allow |= fetch_allow;
 730	}
 731
 732	/* end cmd, enabling non-timed queues for potential over cycle time */
 733	if (ram_addr < max_ram_addr)
 734		writel(~all_fetch_allow & AM65_CPSW_FETCH_ALLOW_MSK, ram_addr);
 735}
 736
 737/*
 738 * Enable ESTf periodic output, set cycle start time and interval.
 739 */
 740static int am65_cpsw_timer_set(struct net_device *ndev,
 741			       struct am65_cpsw_est *est_new)
 742{
 743	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 744	struct am65_cpsw_common *common = port->common;
 745	struct am65_cpts *cpts = common->cpts;
 746	struct am65_cpts_estf_cfg cfg;
 747
 748	cfg.ns_period = est_new->taprio.cycle_time;
 749	cfg.ns_start = est_new->taprio.base_time;
 750
 751	return am65_cpts_estf_enable(cpts, port->port_id - 1, &cfg);
 752}
 753
 754static void am65_cpsw_timer_stop(struct net_device *ndev)
 755{
 756	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 757	struct am65_cpts *cpts = port->common->cpts;
 758
 759	am65_cpts_estf_disable(cpts, port->port_id - 1);
 760}
 761
 762static enum timer_act am65_cpsw_timer_act(struct net_device *ndev,
 763					  struct am65_cpsw_est *est_new)
 764{
 765	struct tc_taprio_qopt_offload *taprio_oper, *taprio_new;
 766	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 767	struct am65_cpts *cpts = port->common->cpts;
 768	u64 cur_time;
 769	s64 diff;
 770
 771	if (!port->qos.est_oper)
 772		return TACT_PROG;
 773
 774	taprio_new = &est_new->taprio;
 775	taprio_oper = &port->qos.est_oper->taprio;
 776
 777	if (taprio_new->cycle_time != taprio_oper->cycle_time)
 778		return TACT_NEED_STOP;
 779
 780	/* in order to avoid timer reset get base_time form oper taprio */
 781	if (!taprio_new->base_time && taprio_oper)
 782		taprio_new->base_time = taprio_oper->base_time;
 783
 784	if (taprio_new->base_time == taprio_oper->base_time)
 785		return TACT_SKIP_PROG;
 786
 787	/* base times are cycle synchronized */
 788	diff = taprio_new->base_time - taprio_oper->base_time;
 789	diff = diff < 0 ? -diff : diff;
 790	if (diff % taprio_new->cycle_time)
 791		return TACT_NEED_STOP;
 792
 793	cur_time = am65_cpts_ns_gettime(cpts);
 794	if (taprio_new->base_time <= cur_time + taprio_new->cycle_time)
 795		return TACT_SKIP_PROG;
 796
 797	/* TODO: Admin schedule at future time is not currently supported */
 798	return TACT_NEED_STOP;
 799}
 800
 801static void am65_cpsw_stop_est(struct net_device *ndev)
 802{
 803	am65_cpsw_est_set(ndev, 0);
 804	am65_cpsw_timer_stop(ndev);
 805}
 806
 807static void am65_cpsw_taprio_destroy(struct net_device *ndev)
 808{
 809	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 810
 811	am65_cpsw_stop_est(ndev);
 812
 813	devm_kfree(&ndev->dev, port->qos.est_admin);
 814	devm_kfree(&ndev->dev, port->qos.est_oper);
 815
 816	port->qos.est_oper = NULL;
 817	port->qos.est_admin = NULL;
 818
 819	am65_cpsw_reset_tc_mqprio(ndev);
 820}
 821
 822static void am65_cpsw_cp_taprio(struct tc_taprio_qopt_offload *from,
 823				struct tc_taprio_qopt_offload *to)
 824{
 825	int i;
 826
 827	*to = *from;
 828	for (i = 0; i < from->num_entries; i++)
 829		to->entries[i] = from->entries[i];
 830}
 831
 832static int am65_cpsw_taprio_replace(struct net_device *ndev,
 833				    struct tc_taprio_qopt_offload *taprio)
 834{
 835	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
 836	struct netlink_ext_ack *extack = taprio->mqprio.extack;
 837	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 838	struct am65_cpts *cpts = common->cpts;
 839	struct am65_cpsw_est *est_new;
 
 840	int ret, tact;
 841
 842	if (!netif_running(ndev)) {
 843		NL_SET_ERR_MSG_MOD(extack, "interface is down, link speed unknown");
 844		return -ENETDOWN;
 845	}
 846
 847	if (common->pf_p0_rx_ptype_rrobin) {
 848		NL_SET_ERR_MSG_MOD(extack,
 849				   "p0-rx-ptype-rrobin flag conflicts with taprio qdisc");
 850		return -EINVAL;
 851	}
 852
 853	if (port->qos.link_speed == SPEED_UNKNOWN)
 854		return -ENOLINK;
 855
 856	if (taprio->cycle_time_extension) {
 857		NL_SET_ERR_MSG_MOD(extack,
 858				   "cycle time extension not supported");
 859		return -EOPNOTSUPP;
 860	}
 861
 862	est_new = devm_kzalloc(&ndev->dev,
 863			       struct_size(est_new, taprio.entries, taprio->num_entries),
 864			       GFP_KERNEL);
 865	if (!est_new)
 866		return -ENOMEM;
 867
 868	ret = am65_cpsw_setup_mqprio(ndev, &taprio->mqprio);
 869	if (ret)
 870		return ret;
 871
 872	am65_cpsw_cp_taprio(taprio, &est_new->taprio);
 873
 874	am65_cpsw_est_update_state(ndev);
 875
 876	ret = am65_cpsw_est_check_scheds(ndev, est_new);
 877	if (ret < 0)
 878		goto fail;
 879
 880	tact = am65_cpsw_timer_act(ndev, est_new);
 881	if (tact == TACT_NEED_STOP) {
 882		NL_SET_ERR_MSG_MOD(extack,
 883				   "Can't toggle estf timer, stop taprio first");
 884		ret = -EINVAL;
 885		goto fail;
 886	}
 887
 888	if (tact == TACT_PROG)
 889		am65_cpsw_timer_stop(ndev);
 890
 891	if (!est_new->taprio.base_time)
 892		est_new->taprio.base_time = am65_cpts_ns_gettime(cpts);
 893
 894	am65_cpsw_port_est_get_buf_num(ndev, est_new);
 895	am65_cpsw_est_set_sched_list(ndev, est_new);
 896	am65_cpsw_port_est_assign_buf_num(ndev, est_new->buf);
 897
 
 
 
 
 
 
 
 
 
 
 
 898	am65_cpsw_est_set(ndev, 1);
 899
 900	if (tact == TACT_PROG) {
 901		ret = am65_cpsw_timer_set(ndev, est_new);
 902		if (ret) {
 903			NL_SET_ERR_MSG_MOD(extack,
 904					   "Failed to set cycle time");
 905			goto fail;
 906		}
 907	}
 908
 909	devm_kfree(&ndev->dev, port->qos.est_admin);
 910	port->qos.est_admin = est_new;
 911	am65_cpsw_iet_change_preemptible_tcs(port, taprio->mqprio.preemptible_tcs);
 912
 913	return 0;
 914
 915fail:
 916	am65_cpsw_reset_tc_mqprio(ndev);
 917	devm_kfree(&ndev->dev, est_new);
 918	return ret;
 919}
 920
 921static void am65_cpsw_est_link_up(struct net_device *ndev, int link_speed)
 922{
 923	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 924	ktime_t cur_time;
 925	s64 delta;
 926
 927	if (!am65_cpsw_port_est_enabled(port))
 928		return;
 929
 930	if (port->qos.link_down_time) {
 931		cur_time = ktime_get();
 932		delta = ktime_us_delta(cur_time, port->qos.link_down_time);
 933		if (delta > USEC_PER_SEC) {
 934			dev_err(&ndev->dev,
 935				"Link has been lost too long, stopping TAS");
 936			goto purge_est;
 937		}
 938	}
 939
 940	return;
 941
 942purge_est:
 943	am65_cpsw_taprio_destroy(ndev);
 944}
 945
 946static int am65_cpsw_setup_taprio(struct net_device *ndev, void *type_data)
 947{
 948	struct tc_taprio_qopt_offload *taprio = type_data;
 949	int err = 0;
 950
 951	switch (taprio->cmd) {
 952	case TAPRIO_CMD_REPLACE:
 953		err = am65_cpsw_taprio_replace(ndev, taprio);
 954		break;
 955	case TAPRIO_CMD_DESTROY:
 956		am65_cpsw_taprio_destroy(ndev);
 957		break;
 958	default:
 959		err = -EOPNOTSUPP;
 960	}
 961
 962	return err;
 963}
 964
 965static int am65_cpsw_tc_query_caps(struct net_device *ndev, void *type_data)
 966{
 967	struct tc_query_caps_base *base = type_data;
 968
 969	switch (base->type) {
 970	case TC_SETUP_QDISC_MQPRIO: {
 971		struct tc_mqprio_caps *caps = base->caps;
 972
 973		caps->validate_queue_counts = true;
 974
 975		return 0;
 976	}
 977
 978	case TC_SETUP_QDISC_TAPRIO: {
 979		struct tc_taprio_caps *caps = base->caps;
 980
 981		caps->gate_mask_per_txq = true;
 982
 983		return 0;
 984	}
 985	default:
 986		return -EOPNOTSUPP;
 987	}
 988}
 989
 990static int am65_cpsw_qos_clsflower_add_policer(struct am65_cpsw_port *port,
 991					       struct netlink_ext_ack *extack,
 992					       struct flow_cls_offload *cls,
 993					       u64 rate_pkt_ps)
 994{
 995	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
 996	struct flow_dissector *dissector = rule->match.dissector;
 997	static const u8 mc_mac[] = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00};
 998	struct am65_cpsw_qos *qos = &port->qos;
 999	struct flow_match_eth_addrs match;
1000	int ret;
1001
1002	if (dissector->used_keys &
1003	    ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
1004	      BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
1005	      BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
1006		NL_SET_ERR_MSG_MOD(extack,
1007				   "Unsupported keys used");
1008		return -EOPNOTSUPP;
1009	}
 
 
 
1010
1011	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1012		NL_SET_ERR_MSG_MOD(extack, "Not matching on eth address");
1013		return -EOPNOTSUPP;
1014	}
1015
1016	flow_rule_match_eth_addrs(rule, &match);
1017
1018	if (!is_zero_ether_addr(match.mask->src)) {
1019		NL_SET_ERR_MSG_MOD(extack,
1020				   "Matching on source MAC not supported");
1021		return -EOPNOTSUPP;
1022	}
1023
1024	if (is_broadcast_ether_addr(match.key->dst) &&
1025	    is_broadcast_ether_addr(match.mask->dst)) {
1026		ret = cpsw_ale_rx_ratelimit_bc(port->common->ale, port->port_id, rate_pkt_ps);
1027		if (ret)
1028			return ret;
1029
1030		qos->ale_bc_ratelimit.cookie = cls->cookie;
1031		qos->ale_bc_ratelimit.rate_packet_ps = rate_pkt_ps;
1032	} else if (ether_addr_equal_unaligned(match.key->dst, mc_mac) &&
1033		   ether_addr_equal_unaligned(match.mask->dst, mc_mac)) {
1034		ret = cpsw_ale_rx_ratelimit_mc(port->common->ale, port->port_id, rate_pkt_ps);
1035		if (ret)
1036			return ret;
1037
1038		qos->ale_mc_ratelimit.cookie = cls->cookie;
1039		qos->ale_mc_ratelimit.rate_packet_ps = rate_pkt_ps;
1040	} else {
1041		NL_SET_ERR_MSG_MOD(extack, "Not supported matching key");
1042		return -EOPNOTSUPP;
1043	}
1044
1045	return 0;
1046}
1047
1048static int am65_cpsw_qos_clsflower_policer_validate(const struct flow_action *action,
1049						    const struct flow_action_entry *act,
1050						    struct netlink_ext_ack *extack)
1051{
1052	if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
1053		NL_SET_ERR_MSG_MOD(extack,
1054				   "Offload not supported when exceed action is not drop");
1055		return -EOPNOTSUPP;
1056	}
1057
1058	if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
1059	    act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
1060		NL_SET_ERR_MSG_MOD(extack,
1061				   "Offload not supported when conform action is not pipe or ok");
1062		return -EOPNOTSUPP;
1063	}
1064
1065	if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
1066	    !flow_action_is_last_entry(action, act)) {
1067		NL_SET_ERR_MSG_MOD(extack,
1068				   "Offload not supported when conform action is ok, but action is not last");
1069		return -EOPNOTSUPP;
1070	}
1071
1072	if (act->police.rate_bytes_ps || act->police.peakrate_bytes_ps ||
1073	    act->police.avrate || act->police.overhead) {
1074		NL_SET_ERR_MSG_MOD(extack,
1075				   "Offload not supported when bytes per second/peakrate/avrate/overhead is configured");
1076		return -EOPNOTSUPP;
1077	}
1078
1079	return 0;
1080}
1081
1082static int am65_cpsw_qos_configure_clsflower(struct am65_cpsw_port *port,
1083					     struct flow_cls_offload *cls)
1084{
1085	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
1086	struct netlink_ext_ack *extack = cls->common.extack;
1087	const struct flow_action_entry *act;
1088	int i, ret;
1089
1090	flow_action_for_each(i, act, &rule->action) {
1091		switch (act->id) {
1092		case FLOW_ACTION_POLICE:
1093			ret = am65_cpsw_qos_clsflower_policer_validate(&rule->action, act, extack);
1094			if (ret)
1095				return ret;
1096
1097			return am65_cpsw_qos_clsflower_add_policer(port, extack, cls,
1098								   act->police.rate_pkt_ps);
1099		default:
1100			NL_SET_ERR_MSG_MOD(extack,
1101					   "Action not supported");
1102			return -EOPNOTSUPP;
1103		}
1104	}
1105	return -EOPNOTSUPP;
1106}
1107
1108static int am65_cpsw_qos_delete_clsflower(struct am65_cpsw_port *port, struct flow_cls_offload *cls)
1109{
1110	struct am65_cpsw_qos *qos = &port->qos;
1111
1112	if (cls->cookie == qos->ale_bc_ratelimit.cookie) {
1113		qos->ale_bc_ratelimit.cookie = 0;
1114		qos->ale_bc_ratelimit.rate_packet_ps = 0;
1115		cpsw_ale_rx_ratelimit_bc(port->common->ale, port->port_id, 0);
1116	}
1117
1118	if (cls->cookie == qos->ale_mc_ratelimit.cookie) {
1119		qos->ale_mc_ratelimit.cookie = 0;
1120		qos->ale_mc_ratelimit.rate_packet_ps = 0;
1121		cpsw_ale_rx_ratelimit_mc(port->common->ale, port->port_id, 0);
1122	}
1123
1124	return 0;
1125}
1126
1127static int am65_cpsw_qos_setup_tc_clsflower(struct am65_cpsw_port *port,
1128					    struct flow_cls_offload *cls_flower)
1129{
1130	switch (cls_flower->command) {
1131	case FLOW_CLS_REPLACE:
1132		return am65_cpsw_qos_configure_clsflower(port, cls_flower);
1133	case FLOW_CLS_DESTROY:
1134		return am65_cpsw_qos_delete_clsflower(port, cls_flower);
1135	default:
1136		return -EOPNOTSUPP;
1137	}
1138}
1139
1140static int am65_cpsw_qos_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
1141{
1142	struct am65_cpsw_port *port = cb_priv;
1143
1144	if (!tc_cls_can_offload_and_chain0(port->ndev, type_data))
1145		return -EOPNOTSUPP;
1146
1147	switch (type) {
1148	case TC_SETUP_CLSFLOWER:
1149		return am65_cpsw_qos_setup_tc_clsflower(port, type_data);
1150	default:
1151		return -EOPNOTSUPP;
1152	}
1153}
1154
1155static LIST_HEAD(am65_cpsw_qos_block_cb_list);
1156
1157static int am65_cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f)
1158{
1159	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1160
1161	return flow_block_cb_setup_simple(f, &am65_cpsw_qos_block_cb_list,
1162					  am65_cpsw_qos_setup_tc_block_cb,
1163					  port, port, true);
1164}
1165
1166static void
1167am65_cpsw_qos_tx_p0_rate_apply(struct am65_cpsw_common *common,
1168			       int tx_ch, u32 rate_mbps)
1169{
1170	struct am65_cpsw_host *host = am65_common_get_host(common);
1171	u32 ch_cir;
1172	int i;
1173
1174	ch_cir = am65_cpsw_qos_tx_rate_calc(rate_mbps, common->bus_freq);
1175	writel(ch_cir, host->port_base + AM65_CPSW_PN_REG_PRI_CIR(tx_ch));
1176
1177	/* update rates for every port tx queues */
1178	for (i = 0; i < common->port_num; i++) {
1179		struct net_device *ndev = common->ports[i].ndev;
1180
1181		if (!ndev)
1182			continue;
1183		netdev_get_tx_queue(ndev, tx_ch)->tx_maxrate = rate_mbps;
1184	}
1185}
1186
1187int am65_cpsw_qos_ndo_tx_p0_set_maxrate(struct net_device *ndev,
1188					int queue, u32 rate_mbps)
1189{
1190	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1191	struct am65_cpsw_common *common = port->common;
1192	struct am65_cpsw_tx_chn *tx_chn;
1193	u32 ch_rate, tx_ch_rate_msk_new;
1194	u32 ch_msk = 0;
1195	int ret;
1196
1197	dev_dbg(common->dev, "apply TX%d rate limiting %uMbps tx_rate_msk%x\n",
1198		queue, rate_mbps, common->tx_ch_rate_msk);
1199
1200	if (common->pf_p0_rx_ptype_rrobin) {
1201		dev_err(common->dev, "TX Rate Limiting failed - rrobin mode\n");
1202		return -EINVAL;
1203	}
1204
1205	ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate;
1206	if (ch_rate == rate_mbps)
1207		return 0;
1208
1209	ret = pm_runtime_get_sync(common->dev);
1210	if (ret < 0) {
1211		pm_runtime_put_noidle(common->dev);
1212		return ret;
1213	}
1214	ret = 0;
1215
1216	tx_ch_rate_msk_new = common->tx_ch_rate_msk;
1217	if (rate_mbps && !(tx_ch_rate_msk_new & BIT(queue))) {
1218		tx_ch_rate_msk_new |= BIT(queue);
1219		ch_msk = GENMASK(common->tx_ch_num - 1, queue);
1220		ch_msk = tx_ch_rate_msk_new ^ ch_msk;
1221	} else if (!rate_mbps) {
1222		tx_ch_rate_msk_new &= ~BIT(queue);
1223		ch_msk = queue ? GENMASK(queue - 1, 0) : 0;
1224		ch_msk = tx_ch_rate_msk_new & ch_msk;
1225	}
1226
1227	if (ch_msk) {
1228		dev_err(common->dev, "TX rate limiting has to be enabled sequentially hi->lo tx_rate_msk:%x tx_rate_msk_new:%x\n",
1229			common->tx_ch_rate_msk, tx_ch_rate_msk_new);
1230		ret = -EINVAL;
1231		goto exit_put;
1232	}
1233
1234	tx_chn = &common->tx_chns[queue];
1235	tx_chn->rate_mbps = rate_mbps;
1236	common->tx_ch_rate_msk = tx_ch_rate_msk_new;
1237
1238	if (!common->usage_count)
1239		/* will be applied on next netif up */
1240		goto exit_put;
1241
1242	am65_cpsw_qos_tx_p0_rate_apply(common, queue, rate_mbps);
1243
1244exit_put:
1245	pm_runtime_put(common->dev);
1246	return ret;
1247}
1248
1249void am65_cpsw_qos_tx_p0_rate_init(struct am65_cpsw_common *common)
1250{
1251	struct am65_cpsw_host *host = am65_common_get_host(common);
1252	int tx_ch;
1253
1254	for (tx_ch = 0; tx_ch < common->tx_ch_num; tx_ch++) {
1255		struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[tx_ch];
1256		u32 ch_cir;
1257
1258		if (!tx_chn->rate_mbps)
1259			continue;
1260
1261		ch_cir = am65_cpsw_qos_tx_rate_calc(tx_chn->rate_mbps,
1262						    common->bus_freq);
1263		writel(ch_cir,
1264		       host->port_base + AM65_CPSW_PN_REG_PRI_CIR(tx_ch));
1265	}
1266}
1267
1268int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
1269			       void *type_data)
1270{
1271	switch (type) {
1272	case TC_QUERY_CAPS:
1273		return am65_cpsw_tc_query_caps(ndev, type_data);
1274	case TC_SETUP_QDISC_TAPRIO:
1275		return am65_cpsw_setup_taprio(ndev, type_data);
1276	case TC_SETUP_QDISC_MQPRIO:
1277		return am65_cpsw_setup_mqprio(ndev, type_data);
1278	case TC_SETUP_BLOCK:
1279		return am65_cpsw_qos_setup_tc_block(ndev, type_data);
1280	default:
1281		return -EOPNOTSUPP;
1282	}
1283}
1284
1285void am65_cpsw_qos_link_up(struct net_device *ndev, int link_speed)
1286{
1287	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1288
1289	port->qos.link_speed = link_speed;
1290	am65_cpsw_tx_pn_shaper_apply(port);
1291	am65_cpsw_iet_link_state_update(ndev);
1292
1293	am65_cpsw_est_link_up(ndev, link_speed);
1294	port->qos.link_down_time = 0;
1295}
1296
1297void am65_cpsw_qos_link_down(struct net_device *ndev)
1298{
1299	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1300
1301	port->qos.link_speed = SPEED_UNKNOWN;
1302	am65_cpsw_tx_pn_shaper_apply(port);
1303	am65_cpsw_iet_link_state_update(ndev);
1304
1305	if (!port->qos.link_down_time)
1306		port->qos.link_down_time = ktime_get();
1307}