Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
   4 * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
   5 * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
   6 * Copyright (c) 2016 John Crispin <john@phrozen.org>
   7 */
   8
   9#include <linux/module.h>
  10#include <linux/phy.h>
  11#include <linux/netdevice.h>
  12#include <linux/bitfield.h>
  13#include <linux/regmap.h>
  14#include <net/dsa.h>
  15#include <linux/of_net.h>
  16#include <linux/of_mdio.h>
  17#include <linux/of_platform.h>
  18#include <linux/mdio.h>
  19#include <linux/phylink.h>
  20#include <linux/gpio/consumer.h>
  21#include <linux/etherdevice.h>
  22#include <linux/dsa/tag_qca.h>
  23
  24#include "qca8k.h"
  25#include "qca8k_leds.h"
  26
  27static void
  28qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
  29{
  30	regaddr >>= 1;
  31	*r1 = regaddr & 0x1e;
  32
  33	regaddr >>= 5;
  34	*r2 = regaddr & 0x7;
  35
  36	regaddr >>= 3;
  37	*page = regaddr & 0x3ff;
  38}
  39
  40static int
  41qca8k_mii_write_lo(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
  42{
  43	int ret;
  44	u16 lo;
  45
  46	lo = val & 0xffff;
  47	ret = bus->write(bus, phy_id, regnum, lo);
  48	if (ret < 0)
  49		dev_err_ratelimited(&bus->dev,
  50				    "failed to write qca8k 32bit lo register\n");
  51
  52	return ret;
  53}
  54
  55static int
  56qca8k_mii_write_hi(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
  57{
  58	int ret;
  59	u16 hi;
  60
  61	hi = (u16)(val >> 16);
  62	ret = bus->write(bus, phy_id, regnum, hi);
  63	if (ret < 0)
  64		dev_err_ratelimited(&bus->dev,
  65				    "failed to write qca8k 32bit hi register\n");
  66
  67	return ret;
  68}
  69
  70static int
  71qca8k_mii_read_lo(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
  72{
  73	int ret;
  74
  75	ret = bus->read(bus, phy_id, regnum);
  76	if (ret < 0)
  77		goto err;
  78
  79	*val = ret & 0xffff;
  80	return 0;
  81
  82err:
  83	dev_err_ratelimited(&bus->dev,
  84			    "failed to read qca8k 32bit lo register\n");
  85	*val = 0;
  86
  87	return ret;
  88}
  89
  90static int
  91qca8k_mii_read_hi(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
  92{
  93	int ret;
  94
  95	ret = bus->read(bus, phy_id, regnum);
  96	if (ret < 0)
  97		goto err;
  98
  99	*val = ret << 16;
 100	return 0;
 101
 102err:
 103	dev_err_ratelimited(&bus->dev,
 104			    "failed to read qca8k 32bit hi register\n");
 105	*val = 0;
 106
 107	return ret;
 108}
 109
 110static int
 111qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
 112{
 113	u32 hi, lo;
 114	int ret;
 115
 116	*val = 0;
 117
 118	ret = qca8k_mii_read_lo(bus, phy_id, regnum, &lo);
 119	if (ret < 0)
 120		goto err;
 121
 122	ret = qca8k_mii_read_hi(bus, phy_id, regnum + 1, &hi);
 123	if (ret < 0)
 124		goto err;
 125
 126	*val = lo | hi;
 127
 128err:
 129	return ret;
 130}
 131
 132static void
 133qca8k_mii_write32(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
 134{
 135	if (qca8k_mii_write_lo(bus, phy_id, regnum, val) < 0)
 136		return;
 137
 138	qca8k_mii_write_hi(bus, phy_id, regnum + 1, val);
 139}
 140
 141static int
 142qca8k_set_page(struct qca8k_priv *priv, u16 page)
 143{
 144	u16 *cached_page = &priv->mdio_cache.page;
 145	struct mii_bus *bus = priv->bus;
 146	int ret;
 147
 148	if (page == *cached_page)
 149		return 0;
 150
 151	ret = bus->write(bus, 0x18, 0, page);
 152	if (ret < 0) {
 153		dev_err_ratelimited(&bus->dev,
 154				    "failed to set qca8k page\n");
 155		return ret;
 156	}
 157
 158	*cached_page = page;
 159	usleep_range(1000, 2000);
 160	return 0;
 161}
 162
 163static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
 164{
 165	struct qca8k_mgmt_eth_data *mgmt_eth_data;
 166	struct qca8k_priv *priv = ds->priv;
 167	struct qca_mgmt_ethhdr *mgmt_ethhdr;
 168	u32 command;
 169	u8 len, cmd;
 170	int i;
 171
 172	mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb);
 173	mgmt_eth_data = &priv->mgmt_eth_data;
 174
 175	command = get_unaligned_le32(&mgmt_ethhdr->command);
 176	cmd = FIELD_GET(QCA_HDR_MGMT_CMD, command);
 177
 178	len = FIELD_GET(QCA_HDR_MGMT_LENGTH, command);
 179	/* Special case for len of 15 as this is the max value for len and needs to
 180	 * be increased before converting it from word to dword.
 181	 */
 182	if (len == 15)
 183		len++;
 184
 185	/* We can ignore odd value, we always round up them in the alloc function. */
 186	len *= sizeof(u16);
 187
 188	/* Make sure the seq match the requested packet */
 189	if (get_unaligned_le32(&mgmt_ethhdr->seq) == mgmt_eth_data->seq)
 190		mgmt_eth_data->ack = true;
 191
 192	if (cmd == MDIO_READ) {
 193		u32 *val = mgmt_eth_data->data;
 194
 195		*val = get_unaligned_le32(&mgmt_ethhdr->mdio_data);
 196
 197		/* Get the rest of the 12 byte of data.
 198		 * The read/write function will extract the requested data.
 199		 */
 200		if (len > QCA_HDR_MGMT_DATA1_LEN) {
 201			__le32 *data2 = (__le32 *)skb->data;
 202			int data_len = min_t(int, QCA_HDR_MGMT_DATA2_LEN,
 203					     len - QCA_HDR_MGMT_DATA1_LEN);
 204
 205			val++;
 206
 207			for (i = sizeof(u32); i <= data_len; i += sizeof(u32)) {
 208				*val = get_unaligned_le32(data2);
 209				val++;
 210				data2++;
 211			}
 212		}
 213	}
 214
 215	complete(&mgmt_eth_data->rw_done);
 216}
 217
 218static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val,
 219					       int priority, unsigned int len)
 220{
 221	struct qca_mgmt_ethhdr *mgmt_ethhdr;
 222	unsigned int real_len;
 223	struct sk_buff *skb;
 224	__le32 *data2;
 225	u32 command;
 226	u16 hdr;
 227	int i;
 228
 229	skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN);
 230	if (!skb)
 231		return NULL;
 232
 233	/* Hdr mgmt length value is in step of word size.
 234	 * As an example to process 4 byte of data the correct length to set is 2.
 235	 * To process 8 byte 4, 12 byte 6, 16 byte 8...
 236	 *
 237	 * Odd values will always return the next size on the ack packet.
 238	 * (length of 3 (6 byte) will always return 8 bytes of data)
 239	 *
 240	 * This means that a value of 15 (0xf) actually means reading/writing 32 bytes
 241	 * of data.
 242	 *
 243	 * To correctly calculate the length we devide the requested len by word and
 244	 * round up.
 245	 * On the ack function we can skip the odd check as we already handle the
 246	 * case here.
 247	 */
 248	real_len = DIV_ROUND_UP(len, sizeof(u16));
 249
 250	/* We check if the result len is odd and we round up another time to
 251	 * the next size. (length of 3 will be increased to 4 as switch will always
 252	 * return 8 bytes)
 253	 */
 254	if (real_len % sizeof(u16) != 0)
 255		real_len++;
 256
 257	/* Max reg value is 0xf(15) but switch will always return the next size (32 byte) */
 258	if (real_len == 16)
 259		real_len--;
 260
 261	skb_reset_mac_header(skb);
 262	skb_set_network_header(skb, skb->len);
 263
 264	mgmt_ethhdr = skb_push(skb, QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN);
 265
 266	hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION);
 267	hdr |= FIELD_PREP(QCA_HDR_XMIT_PRIORITY, priority);
 268	hdr |= QCA_HDR_XMIT_FROM_CPU;
 269	hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0));
 270	hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG);
 271
 272	command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg);
 273	command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len);
 274	command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd);
 275	command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE,
 276					   QCA_HDR_MGMT_CHECK_CODE_VAL);
 277
 278	put_unaligned_le32(command, &mgmt_ethhdr->command);
 279
 280	if (cmd == MDIO_WRITE)
 281		put_unaligned_le32(*val, &mgmt_ethhdr->mdio_data);
 282
 283	mgmt_ethhdr->hdr = htons(hdr);
 284
 285	data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN);
 286	if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN) {
 287		int data_len = min_t(int, QCA_HDR_MGMT_DATA2_LEN,
 288				     len - QCA_HDR_MGMT_DATA1_LEN);
 289
 290		val++;
 291
 292		for (i = sizeof(u32); i <= data_len; i += sizeof(u32)) {
 293			put_unaligned_le32(*val, data2);
 294			data2++;
 295			val++;
 296		}
 297	}
 298
 299	return skb;
 300}
 301
 302static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num)
 303{
 304	struct qca_mgmt_ethhdr *mgmt_ethhdr;
 305	u32 seq;
 306
 307	seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num);
 308	mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data;
 309	put_unaligned_le32(seq, &mgmt_ethhdr->seq);
 310}
 311
 312static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
 313{
 314	struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
 315	struct sk_buff *skb;
 316	bool ack;
 317	int ret;
 318
 319	skb = qca8k_alloc_mdio_header(MDIO_READ, reg, NULL,
 320				      QCA8K_ETHERNET_MDIO_PRIORITY, len);
 321	if (!skb)
 322		return -ENOMEM;
 323
 324	mutex_lock(&mgmt_eth_data->mutex);
 325
 326	/* Check if the mgmt_conduit if is operational */
 327	if (!priv->mgmt_conduit) {
 328		kfree_skb(skb);
 329		mutex_unlock(&mgmt_eth_data->mutex);
 330		return -EINVAL;
 331	}
 332
 333	skb->dev = priv->mgmt_conduit;
 334
 335	reinit_completion(&mgmt_eth_data->rw_done);
 336
 337	/* Increment seq_num and set it in the mdio pkt */
 338	mgmt_eth_data->seq++;
 339	qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
 340	mgmt_eth_data->ack = false;
 341
 342	dev_queue_xmit(skb);
 343
 344	ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
 345					  msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
 346
 347	*val = mgmt_eth_data->data[0];
 348	if (len > QCA_HDR_MGMT_DATA1_LEN)
 349		memcpy(val + 1, mgmt_eth_data->data + 1, len - QCA_HDR_MGMT_DATA1_LEN);
 350
 351	ack = mgmt_eth_data->ack;
 352
 353	mutex_unlock(&mgmt_eth_data->mutex);
 354
 355	if (ret <= 0)
 356		return -ETIMEDOUT;
 357
 358	if (!ack)
 359		return -EINVAL;
 360
 361	return 0;
 362}
 363
 364static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
 365{
 366	struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
 367	struct sk_buff *skb;
 368	bool ack;
 369	int ret;
 370
 371	skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, val,
 372				      QCA8K_ETHERNET_MDIO_PRIORITY, len);
 373	if (!skb)
 374		return -ENOMEM;
 375
 376	mutex_lock(&mgmt_eth_data->mutex);
 377
 378	/* Check if the mgmt_conduit if is operational */
 379	if (!priv->mgmt_conduit) {
 380		kfree_skb(skb);
 381		mutex_unlock(&mgmt_eth_data->mutex);
 382		return -EINVAL;
 383	}
 384
 385	skb->dev = priv->mgmt_conduit;
 386
 387	reinit_completion(&mgmt_eth_data->rw_done);
 388
 389	/* Increment seq_num and set it in the mdio pkt */
 390	mgmt_eth_data->seq++;
 391	qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
 392	mgmt_eth_data->ack = false;
 393
 394	dev_queue_xmit(skb);
 395
 396	ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
 397					  msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
 398
 399	ack = mgmt_eth_data->ack;
 400
 401	mutex_unlock(&mgmt_eth_data->mutex);
 402
 403	if (ret <= 0)
 404		return -ETIMEDOUT;
 405
 406	if (!ack)
 407		return -EINVAL;
 408
 409	return 0;
 410}
 411
 412static int
 413qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
 414{
 415	u32 val = 0;
 416	int ret;
 417
 418	ret = qca8k_read_eth(priv, reg, &val, sizeof(val));
 419	if (ret)
 420		return ret;
 421
 422	val &= ~mask;
 423	val |= write_val;
 424
 425	return qca8k_write_eth(priv, reg, &val, sizeof(val));
 426}
 427
 428static int
 429qca8k_read_mii(struct qca8k_priv *priv, uint32_t reg, uint32_t *val)
 430{
 
 431	struct mii_bus *bus = priv->bus;
 432	u16 r1, r2, page;
 433	int ret;
 434
 
 
 
 435	qca8k_split_addr(reg, &r1, &r2, &page);
 436
 437	mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
 438
 439	ret = qca8k_set_page(priv, page);
 440	if (ret < 0)
 441		goto exit;
 442
 443	ret = qca8k_mii_read32(bus, 0x10 | r2, r1, val);
 444
 445exit:
 446	mutex_unlock(&bus->mdio_lock);
 447	return ret;
 448}
 449
 450static int
 451qca8k_write_mii(struct qca8k_priv *priv, uint32_t reg, uint32_t val)
 452{
 
 453	struct mii_bus *bus = priv->bus;
 454	u16 r1, r2, page;
 455	int ret;
 456
 
 
 
 457	qca8k_split_addr(reg, &r1, &r2, &page);
 458
 459	mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
 460
 461	ret = qca8k_set_page(priv, page);
 462	if (ret < 0)
 463		goto exit;
 464
 465	qca8k_mii_write32(bus, 0x10 | r2, r1, val);
 466
 467exit:
 468	mutex_unlock(&bus->mdio_lock);
 469	return ret;
 470}
 471
 472static int
 473qca8k_regmap_update_bits_mii(struct qca8k_priv *priv, uint32_t reg,
 474			     uint32_t mask, uint32_t write_val)
 475{
 
 476	struct mii_bus *bus = priv->bus;
 477	u16 r1, r2, page;
 478	u32 val;
 479	int ret;
 480
 
 
 
 481	qca8k_split_addr(reg, &r1, &r2, &page);
 482
 483	mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
 484
 485	ret = qca8k_set_page(priv, page);
 486	if (ret < 0)
 487		goto exit;
 488
 489	ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
 490	if (ret < 0)
 491		goto exit;
 492
 493	val &= ~mask;
 494	val |= write_val;
 495	qca8k_mii_write32(bus, 0x10 | r2, r1, val);
 496
 497exit:
 498	mutex_unlock(&bus->mdio_lock);
 499
 500	return ret;
 501}
 502
 503static int
 504qca8k_bulk_read(void *ctx, const void *reg_buf, size_t reg_len,
 505		void *val_buf, size_t val_len)
 506{
 507	int i, count = val_len / sizeof(u32), ret;
 508	struct qca8k_priv *priv = ctx;
 509	u32 reg = *(u16 *)reg_buf;
 510
 511	if (priv->mgmt_conduit &&
 512	    !qca8k_read_eth(priv, reg, val_buf, val_len))
 513		return 0;
 514
 515	/* loop count times and increment reg of 4 */
 516	for (i = 0; i < count; i++, reg += sizeof(u32)) {
 517		ret = qca8k_read_mii(priv, reg, val_buf + i);
 518		if (ret < 0)
 519			return ret;
 520	}
 521
 522	return 0;
 523}
 524
 525static int
 526qca8k_bulk_gather_write(void *ctx, const void *reg_buf, size_t reg_len,
 527			const void *val_buf, size_t val_len)
 528{
 529	int i, count = val_len / sizeof(u32), ret;
 530	struct qca8k_priv *priv = ctx;
 531	u32 reg = *(u16 *)reg_buf;
 532	u32 *val = (u32 *)val_buf;
 533
 534	if (priv->mgmt_conduit &&
 535	    !qca8k_write_eth(priv, reg, val, val_len))
 536		return 0;
 537
 538	/* loop count times, increment reg of 4 and increment val ptr to
 539	 * the next value
 540	 */
 541	for (i = 0; i < count; i++, reg += sizeof(u32), val++) {
 542		ret = qca8k_write_mii(priv, reg, *val);
 543		if (ret < 0)
 544			return ret;
 545	}
 546
 547	return 0;
 548}
 549
 550static int
 551qca8k_bulk_write(void *ctx, const void *data, size_t bytes)
 552{
 553	return qca8k_bulk_gather_write(ctx, data, sizeof(u16), data + sizeof(u16),
 554				       bytes - sizeof(u16));
 555}
 556
 557static int
 558qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val)
 559{
 560	struct qca8k_priv *priv = ctx;
 561
 562	if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val))
 563		return 0;
 564
 565	return qca8k_regmap_update_bits_mii(priv, reg, mask, write_val);
 566}
 567
 568static struct regmap_config qca8k_regmap_config = {
 569	.reg_bits = 16,
 570	.val_bits = 32,
 571	.reg_stride = 4,
 572	.max_register = 0x16ac, /* end MIB - Port6 range */
 573	.read = qca8k_bulk_read,
 574	.write = qca8k_bulk_write,
 575	.reg_update_bits = qca8k_regmap_update_bits,
 576	.rd_table = &qca8k_readable_table,
 577	.disable_locking = true, /* Locking is handled by qca8k read/write */
 578	.cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */
 579	.max_raw_read = 32, /* mgmt eth can read up to 8 registers at time */
 580	/* ATU regs suffer from a bug where some data are not correctly
 581	 * written. Disable bulk write to correctly write ATU entry.
 582	 */
 583	.use_single_write = true,
 584};
 585
 586static int
 587qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
 588			struct sk_buff *read_skb, u32 *val)
 589{
 590	struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL);
 591	bool ack;
 592	int ret;
 593
 594	if (!skb)
 595		return -ENOMEM;
 596
 597	reinit_completion(&mgmt_eth_data->rw_done);
 598
 599	/* Increment seq_num and set it in the copy pkt */
 600	mgmt_eth_data->seq++;
 601	qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
 602	mgmt_eth_data->ack = false;
 603
 604	dev_queue_xmit(skb);
 605
 606	ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
 607					  QCA8K_ETHERNET_TIMEOUT);
 608
 609	ack = mgmt_eth_data->ack;
 610
 611	if (ret <= 0)
 612		return -ETIMEDOUT;
 613
 614	if (!ack)
 615		return -EINVAL;
 616
 617	*val = mgmt_eth_data->data[0];
 618
 619	return 0;
 620}
 621
 622static int
 623qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
 624		      int regnum, u16 data)
 625{
 626	struct sk_buff *write_skb, *clear_skb, *read_skb;
 627	struct qca8k_mgmt_eth_data *mgmt_eth_data;
 628	u32 write_val, clear_val = 0, val;
 629	struct net_device *mgmt_conduit;
 630	int ret, ret1;
 631	bool ack;
 632
 633	if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
 634		return -EINVAL;
 635
 636	mgmt_eth_data = &priv->mgmt_eth_data;
 637
 638	write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
 639		    QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
 640		    QCA8K_MDIO_MASTER_REG_ADDR(regnum);
 641
 642	if (read) {
 643		write_val |= QCA8K_MDIO_MASTER_READ;
 644	} else {
 645		write_val |= QCA8K_MDIO_MASTER_WRITE;
 646		write_val |= QCA8K_MDIO_MASTER_DATA(data);
 647	}
 648
 649	/* Prealloc all the needed skb before the lock */
 650	write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &write_val,
 651					    QCA8K_ETHERNET_PHY_PRIORITY, sizeof(write_val));
 652	if (!write_skb)
 653		return -ENOMEM;
 654
 655	clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &clear_val,
 656					    QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
 657	if (!clear_skb) {
 658		ret = -ENOMEM;
 659		goto err_clear_skb;
 660	}
 661
 662	read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, &clear_val,
 663					   QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
 664	if (!read_skb) {
 665		ret = -ENOMEM;
 666		goto err_read_skb;
 667	}
 668
 669	/* It seems that accessing the switch's internal PHYs via management
 670	 * packets still uses the MDIO bus within the switch internally, and
 671	 * these accesses can conflict with external MDIO accesses to other
 672	 * devices on the MDIO bus.
 673	 * We therefore need to lock the MDIO bus onto which the switch is
 674	 * connected.
 675	 */
 676	mutex_lock(&priv->bus->mdio_lock);
 677
 678	/* Actually start the request:
 679	 * 1. Send mdio master packet
 680	 * 2. Busy Wait for mdio master command
 681	 * 3. Get the data if we are reading
 682	 * 4. Reset the mdio master (even with error)
 683	 */
 684	mutex_lock(&mgmt_eth_data->mutex);
 685
 686	/* Check if mgmt_conduit is operational */
 687	mgmt_conduit = priv->mgmt_conduit;
 688	if (!mgmt_conduit) {
 689		mutex_unlock(&mgmt_eth_data->mutex);
 690		mutex_unlock(&priv->bus->mdio_lock);
 691		ret = -EINVAL;
 692		goto err_mgmt_conduit;
 693	}
 694
 695	read_skb->dev = mgmt_conduit;
 696	clear_skb->dev = mgmt_conduit;
 697	write_skb->dev = mgmt_conduit;
 698
 699	reinit_completion(&mgmt_eth_data->rw_done);
 700
 701	/* Increment seq_num and set it in the write pkt */
 702	mgmt_eth_data->seq++;
 703	qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
 704	mgmt_eth_data->ack = false;
 705
 706	dev_queue_xmit(write_skb);
 707
 708	ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
 709					  QCA8K_ETHERNET_TIMEOUT);
 710
 711	ack = mgmt_eth_data->ack;
 712
 713	if (ret <= 0) {
 714		ret = -ETIMEDOUT;
 715		kfree_skb(read_skb);
 716		goto exit;
 717	}
 718
 719	if (!ack) {
 720		ret = -EINVAL;
 721		kfree_skb(read_skb);
 722		goto exit;
 723	}
 724
 725	ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1,
 726				!(val & QCA8K_MDIO_MASTER_BUSY), 0,
 727				QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
 728				mgmt_eth_data, read_skb, &val);
 729
 730	if (ret < 0 && ret1 < 0) {
 731		ret = ret1;
 732		goto exit;
 733	}
 734
 735	if (read) {
 736		reinit_completion(&mgmt_eth_data->rw_done);
 737
 738		/* Increment seq_num and set it in the read pkt */
 739		mgmt_eth_data->seq++;
 740		qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
 741		mgmt_eth_data->ack = false;
 742
 743		dev_queue_xmit(read_skb);
 744
 745		ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
 746						  QCA8K_ETHERNET_TIMEOUT);
 747
 748		ack = mgmt_eth_data->ack;
 749
 750		if (ret <= 0) {
 751			ret = -ETIMEDOUT;
 752			goto exit;
 753		}
 754
 755		if (!ack) {
 756			ret = -EINVAL;
 757			goto exit;
 758		}
 759
 760		ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK;
 761	} else {
 762		kfree_skb(read_skb);
 763	}
 764exit:
 765	reinit_completion(&mgmt_eth_data->rw_done);
 766
 767	/* Increment seq_num and set it in the clear pkt */
 768	mgmt_eth_data->seq++;
 769	qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
 770	mgmt_eth_data->ack = false;
 771
 772	dev_queue_xmit(clear_skb);
 773
 774	wait_for_completion_timeout(&mgmt_eth_data->rw_done,
 775				    QCA8K_ETHERNET_TIMEOUT);
 776
 777	mutex_unlock(&mgmt_eth_data->mutex);
 778	mutex_unlock(&priv->bus->mdio_lock);
 779
 780	return ret;
 781
 782	/* Error handling before lock */
 783err_mgmt_conduit:
 784	kfree_skb(read_skb);
 785err_read_skb:
 786	kfree_skb(clear_skb);
 787err_clear_skb:
 788	kfree_skb(write_skb);
 789
 790	return ret;
 791}
 792
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 793static int
 794qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
 795{
 796	u16 r1, r2, page;
 797	u32 val;
 798	int ret, ret1;
 799
 800	qca8k_split_addr(reg, &r1, &r2, &page);
 801
 802	ret = read_poll_timeout(qca8k_mii_read_hi, ret1, !(val & mask), 0,
 803				QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
 804				bus, 0x10 | r2, r1 + 1, &val);
 805
 806	/* Check if qca8k_read has failed for a different reason
 807	 * before returnting -ETIMEDOUT
 808	 */
 809	if (ret < 0 && ret1 < 0)
 810		return ret1;
 811
 812	return ret;
 813}
 814
 815static int
 816qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data)
 817{
 818	struct mii_bus *bus = priv->bus;
 819	u16 r1, r2, page;
 820	u32 val;
 821	int ret;
 822
 823	if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
 824		return -EINVAL;
 825
 826	val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
 827	      QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
 828	      QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
 829	      QCA8K_MDIO_MASTER_DATA(data);
 830
 831	qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
 832
 833	mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
 834
 835	ret = qca8k_set_page(priv, page);
 836	if (ret)
 837		goto exit;
 838
 839	qca8k_mii_write32(bus, 0x10 | r2, r1, val);
 840
 841	ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
 842				   QCA8K_MDIO_MASTER_BUSY);
 843
 844exit:
 845	/* even if the busy_wait timeouts try to clear the MASTER_EN */
 846	qca8k_mii_write_hi(bus, 0x10 | r2, r1 + 1, 0);
 847
 848	mutex_unlock(&bus->mdio_lock);
 849
 850	return ret;
 851}
 852
 853static int
 854qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum)
 855{
 856	struct mii_bus *bus = priv->bus;
 857	u16 r1, r2, page;
 858	u32 val;
 859	int ret;
 860
 861	if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
 862		return -EINVAL;
 863
 864	val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
 865	      QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
 866	      QCA8K_MDIO_MASTER_REG_ADDR(regnum);
 867
 868	qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
 869
 870	mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
 871
 872	ret = qca8k_set_page(priv, page);
 873	if (ret)
 874		goto exit;
 875
 876	qca8k_mii_write_hi(bus, 0x10 | r2, r1 + 1, val);
 877
 878	ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
 879				   QCA8K_MDIO_MASTER_BUSY);
 880	if (ret)
 881		goto exit;
 882
 883	ret = qca8k_mii_read_lo(bus, 0x10 | r2, r1, &val);
 884
 885exit:
 886	/* even if the busy_wait timeouts try to clear the MASTER_EN */
 887	qca8k_mii_write_hi(bus, 0x10 | r2, r1 + 1, 0);
 888
 889	mutex_unlock(&bus->mdio_lock);
 890
 891	if (ret >= 0)
 892		ret = val & QCA8K_MDIO_MASTER_DATA_MASK;
 893
 894	return ret;
 895}
 896
 897static int
 898qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
 899{
 900	struct qca8k_priv *priv = slave_bus->priv;
 901	int ret;
 902
 903	/* Use mdio Ethernet when available, fallback to legacy one on error */
 904	ret = qca8k_phy_eth_command(priv, false, phy, regnum, data);
 905	if (!ret)
 906		return 0;
 907
 908	return qca8k_mdio_write(priv, phy, regnum, data);
 909}
 910
 911static int
 912qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
 913{
 914	struct qca8k_priv *priv = slave_bus->priv;
 915	int ret;
 916
 917	/* Use mdio Ethernet when available, fallback to legacy one on error */
 918	ret = qca8k_phy_eth_command(priv, true, phy, regnum, 0);
 919	if (ret >= 0)
 920		return ret;
 921
 922	ret = qca8k_mdio_read(priv, phy, regnum);
 923
 924	if (ret < 0)
 925		return 0xffff;
 926
 927	return ret;
 928}
 929
 930static int
 931qca8k_legacy_mdio_write(struct mii_bus *slave_bus, int port, int regnum, u16 data)
 932{
 933	port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
 934
 935	return qca8k_internal_mdio_write(slave_bus, port, regnum, data);
 936}
 937
 938static int
 939qca8k_legacy_mdio_read(struct mii_bus *slave_bus, int port, int regnum)
 940{
 941	port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
 942
 943	return qca8k_internal_mdio_read(slave_bus, port, regnum);
 944}
 945
 946static int
 947qca8k_mdio_register(struct qca8k_priv *priv)
 948{
 949	struct dsa_switch *ds = priv->ds;
 950	struct device *dev = ds->dev;
 951	struct device_node *mdio;
 952	struct mii_bus *bus;
 953	int err = 0;
 954
 955	mdio = of_get_child_by_name(dev->of_node, "mdio");
 956	if (mdio && !of_device_is_available(mdio))
 957		goto out;
 958
 959	bus = devm_mdiobus_alloc(dev);
 960	if (!bus) {
 961		err = -ENOMEM;
 962		goto out_put_node;
 963	}
 964
 965	priv->internal_mdio_bus = bus;
 966	bus->priv = (void *)priv;
 967	snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d",
 968		 ds->dst->index, ds->index);
 969	bus->parent = dev;
 970
 971	if (mdio) {
 972		/* Check if the device tree declares the port:phy mapping */
 973		bus->name = "qca8k user mii";
 
 
 
 974		bus->read = qca8k_internal_mdio_read;
 975		bus->write = qca8k_internal_mdio_write;
 976	} else {
 977		/* If a mapping can't be found, the legacy mapping is used,
 978		 * using qca8k_port_to_phy()
 979		 */
 980		ds->user_mii_bus = bus;
 981		bus->phy_mask = ~ds->phys_mii_mask;
 982		bus->name = "qca8k-legacy user mii";
 983		bus->read = qca8k_legacy_mdio_read;
 984		bus->write = qca8k_legacy_mdio_write;
 985	}
 986
 987	err = devm_of_mdiobus_register(dev, bus, mdio);
 988
 989out_put_node:
 990	of_node_put(mdio);
 991out:
 992	return err;
 
 993}
 994
 995static int
 996qca8k_setup_mdio_bus(struct qca8k_priv *priv)
 997{
 998	u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
 999	struct device_node *ports, *port;
1000	phy_interface_t mode;
1001	int err;
1002
1003	ports = of_get_child_by_name(priv->dev->of_node, "ports");
1004	if (!ports)
1005		ports = of_get_child_by_name(priv->dev->of_node, "ethernet-ports");
1006
1007	if (!ports)
1008		return -EINVAL;
1009
1010	for_each_available_child_of_node(ports, port) {
1011		err = of_property_read_u32(port, "reg", &reg);
1012		if (err) {
1013			of_node_put(port);
1014			of_node_put(ports);
1015			return err;
1016		}
1017
1018		if (!dsa_is_user_port(priv->ds, reg))
1019			continue;
1020
1021		of_get_phy_mode(port, &mode);
1022
1023		if (of_property_read_bool(port, "phy-handle") &&
1024		    mode != PHY_INTERFACE_MODE_INTERNAL)
1025			external_mdio_mask |= BIT(reg);
1026		else
1027			internal_mdio_mask |= BIT(reg);
1028	}
1029
1030	of_node_put(ports);
1031	if (!external_mdio_mask && !internal_mdio_mask) {
1032		dev_err(priv->dev, "no PHYs are defined.\n");
1033		return -EINVAL;
1034	}
1035
1036	/* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
1037	 * the MDIO_MASTER register also _disconnects_ the external MDC
1038	 * passthrough to the internal PHYs. It's not possible to use both
1039	 * configurations at the same time!
1040	 *
1041	 * Because this came up during the review process:
1042	 * If the external mdio-bus driver is capable magically disabling
1043	 * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's
1044	 * accessors for the time being, it would be possible to pull this
1045	 * off.
1046	 */
1047	if (!!external_mdio_mask && !!internal_mdio_mask) {
1048		dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
1049		return -EINVAL;
1050	}
1051
1052	if (external_mdio_mask) {
1053		/* Make sure to disable the internal mdio bus in cases
1054		 * a dt-overlay and driver reload changed the configuration
1055		 */
1056
1057		return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL,
1058					 QCA8K_MDIO_MASTER_EN);
1059	}
1060
1061	return qca8k_mdio_register(priv);
1062}
1063
1064static int
1065qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv)
1066{
1067	u32 mask = 0;
1068	int ret = 0;
1069
1070	/* SoC specific settings for ipq8064.
1071	 * If more device require this consider adding
1072	 * a dedicated binding.
1073	 */
1074	if (of_machine_is_compatible("qcom,ipq8064"))
1075		mask |= QCA8K_MAC_PWR_RGMII0_1_8V;
1076
1077	/* SoC specific settings for ipq8065 */
1078	if (of_machine_is_compatible("qcom,ipq8065"))
1079		mask |= QCA8K_MAC_PWR_RGMII1_1_8V;
1080
1081	if (mask) {
1082		ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL,
1083				QCA8K_MAC_PWR_RGMII0_1_8V |
1084				QCA8K_MAC_PWR_RGMII1_1_8V,
1085				mask);
1086	}
1087
1088	return ret;
1089}
1090
1091static int qca8k_find_cpu_port(struct dsa_switch *ds)
1092{
1093	struct qca8k_priv *priv = ds->priv;
1094
1095	/* Find the connected cpu port. Valid port are 0 or 6 */
1096	if (dsa_is_cpu_port(ds, 0))
1097		return 0;
1098
1099	dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6");
1100
1101	if (dsa_is_cpu_port(ds, 6))
1102		return 6;
1103
1104	return -EINVAL;
1105}
1106
1107static int
1108qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
1109{
1110	const struct qca8k_match_data *data = priv->info;
1111	struct device_node *node = priv->dev->of_node;
1112	u32 val = 0;
1113	int ret;
1114
1115	/* QCA8327 require to set to the correct mode.
1116	 * His bigger brother QCA8328 have the 172 pin layout.
1117	 * Should be applied by default but we set this just to make sure.
1118	 */
1119	if (priv->switch_id == QCA8K_ID_QCA8327) {
1120		/* Set the correct package of 148 pin for QCA8327 */
1121		if (data->reduced_package)
1122			val |= QCA8327_PWS_PACKAGE148_EN;
1123
1124		ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN,
1125				val);
1126		if (ret)
1127			return ret;
1128	}
1129
1130	if (of_property_read_bool(node, "qca,ignore-power-on-sel"))
1131		val |= QCA8K_PWS_POWER_ON_SEL;
1132
1133	if (of_property_read_bool(node, "qca,led-open-drain")) {
1134		if (!(val & QCA8K_PWS_POWER_ON_SEL)) {
1135			dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set.");
1136			return -EINVAL;
1137		}
1138
1139		val |= QCA8K_PWS_LED_OPEN_EN_CSR;
1140	}
1141
1142	return qca8k_rmw(priv, QCA8K_REG_PWS,
1143			QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL,
1144			val);
1145}
1146
1147static int
1148qca8k_parse_port_config(struct qca8k_priv *priv)
1149{
1150	int port, cpu_port_index = -1, ret;
1151	struct device_node *port_dn;
1152	phy_interface_t mode;
1153	struct dsa_port *dp;
1154	u32 delay;
1155
1156	/* We have 2 CPU port. Check them */
1157	for (port = 0; port < QCA8K_NUM_PORTS; port++) {
1158		/* Skip every other port */
1159		if (port != 0 && port != 6)
1160			continue;
1161
1162		dp = dsa_to_port(priv->ds, port);
1163		port_dn = dp->dn;
1164		cpu_port_index++;
1165
1166		if (!of_device_is_available(port_dn))
1167			continue;
1168
1169		ret = of_get_phy_mode(port_dn, &mode);
1170		if (ret)
1171			continue;
1172
1173		switch (mode) {
1174		case PHY_INTERFACE_MODE_RGMII:
1175		case PHY_INTERFACE_MODE_RGMII_ID:
1176		case PHY_INTERFACE_MODE_RGMII_TXID:
1177		case PHY_INTERFACE_MODE_RGMII_RXID:
1178		case PHY_INTERFACE_MODE_SGMII:
1179			delay = 0;
1180
1181			if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay))
1182				/* Switch regs accept value in ns, convert ps to ns */
1183				delay = delay / 1000;
1184			else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
1185				 mode == PHY_INTERFACE_MODE_RGMII_TXID)
1186				delay = 1;
1187
1188			if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) {
1189				dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
1190				delay = 3;
1191			}
1192
1193			priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay;
1194
1195			delay = 0;
1196
1197			if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay))
1198				/* Switch regs accept value in ns, convert ps to ns */
1199				delay = delay / 1000;
1200			else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
1201				 mode == PHY_INTERFACE_MODE_RGMII_RXID)
1202				delay = 2;
1203
1204			if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) {
1205				dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
1206				delay = 3;
1207			}
1208
1209			priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay;
1210
1211			/* Skip sgmii parsing for rgmii* mode */
1212			if (mode == PHY_INTERFACE_MODE_RGMII ||
1213			    mode == PHY_INTERFACE_MODE_RGMII_ID ||
1214			    mode == PHY_INTERFACE_MODE_RGMII_TXID ||
1215			    mode == PHY_INTERFACE_MODE_RGMII_RXID)
1216				break;
1217
1218			if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge"))
1219				priv->ports_config.sgmii_tx_clk_falling_edge = true;
1220
1221			if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge"))
1222				priv->ports_config.sgmii_rx_clk_falling_edge = true;
1223
1224			if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) {
1225				priv->ports_config.sgmii_enable_pll = true;
1226
1227				if (priv->switch_id == QCA8K_ID_QCA8327) {
1228					dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling");
1229					priv->ports_config.sgmii_enable_pll = false;
1230				}
1231
1232				if (priv->switch_revision < 2)
1233					dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more.");
1234			}
1235
1236			break;
1237		default:
1238			continue;
1239		}
1240	}
1241
1242	return 0;
1243}
1244
1245static void
1246qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index,
1247				      u32 reg)
1248{
1249	u32 delay, val = 0;
1250	int ret;
1251
1252	/* Delay can be declared in 3 different way.
1253	 * Mode to rgmii and internal-delay standard binding defined
1254	 * rgmii-id or rgmii-tx/rx phy mode set.
1255	 * The parse logic set a delay different than 0 only when one
1256	 * of the 3 different way is used. In all other case delay is
1257	 * not enabled. With ID or TX/RXID delay is enabled and set
1258	 * to the default and recommended value.
1259	 */
1260	if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) {
1261		delay = priv->ports_config.rgmii_tx_delay[cpu_port_index];
1262
1263		val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) |
1264			QCA8K_PORT_PAD_RGMII_TX_DELAY_EN;
1265	}
1266
1267	if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) {
1268		delay = priv->ports_config.rgmii_rx_delay[cpu_port_index];
1269
1270		val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) |
1271			QCA8K_PORT_PAD_RGMII_RX_DELAY_EN;
1272	}
1273
1274	/* Set RGMII delay based on the selected values */
1275	ret = qca8k_rmw(priv, reg,
1276			QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK |
1277			QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK |
1278			QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
1279			QCA8K_PORT_PAD_RGMII_RX_DELAY_EN,
1280			val);
1281	if (ret)
1282		dev_err(priv->dev, "Failed to set internal delay for CPU port%d",
1283			cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6);
1284}
1285
1286static struct phylink_pcs *
1287qca8k_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
1288			     phy_interface_t interface)
1289{
1290	struct qca8k_priv *priv = ds->priv;
1291	struct phylink_pcs *pcs = NULL;
1292
1293	switch (interface) {
1294	case PHY_INTERFACE_MODE_SGMII:
1295	case PHY_INTERFACE_MODE_1000BASEX:
1296		switch (port) {
1297		case 0:
1298			pcs = &priv->pcs_port_0.pcs;
1299			break;
1300
1301		case 6:
1302			pcs = &priv->pcs_port_6.pcs;
1303			break;
1304		}
1305		break;
1306
1307	default:
1308		break;
1309	}
1310
1311	return pcs;
1312}
1313
1314static void
1315qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
1316			 const struct phylink_link_state *state)
1317{
1318	struct qca8k_priv *priv = ds->priv;
1319	int cpu_port_index;
1320	u32 reg;
1321
1322	switch (port) {
1323	case 0: /* 1st CPU port */
1324		if (state->interface != PHY_INTERFACE_MODE_RGMII &&
1325		    state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
1326		    state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
1327		    state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
1328		    state->interface != PHY_INTERFACE_MODE_SGMII)
1329			return;
1330
1331		reg = QCA8K_REG_PORT0_PAD_CTRL;
1332		cpu_port_index = QCA8K_CPU_PORT0;
1333		break;
1334	case 1:
1335	case 2:
1336	case 3:
1337	case 4:
1338	case 5:
1339		/* Internal PHY, nothing to do */
1340		return;
1341	case 6: /* 2nd CPU port / external PHY */
1342		if (state->interface != PHY_INTERFACE_MODE_RGMII &&
1343		    state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
1344		    state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
1345		    state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
1346		    state->interface != PHY_INTERFACE_MODE_SGMII &&
1347		    state->interface != PHY_INTERFACE_MODE_1000BASEX)
1348			return;
1349
1350		reg = QCA8K_REG_PORT6_PAD_CTRL;
1351		cpu_port_index = QCA8K_CPU_PORT6;
1352		break;
1353	default:
1354		dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
1355		return;
1356	}
1357
1358	if (port != 6 && phylink_autoneg_inband(mode)) {
1359		dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
1360			__func__);
1361		return;
1362	}
1363
1364	switch (state->interface) {
1365	case PHY_INTERFACE_MODE_RGMII:
1366	case PHY_INTERFACE_MODE_RGMII_ID:
1367	case PHY_INTERFACE_MODE_RGMII_TXID:
1368	case PHY_INTERFACE_MODE_RGMII_RXID:
1369		qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
1370
1371		/* Configure rgmii delay */
1372		qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
1373
1374		/* QCA8337 requires to set rgmii rx delay for all ports.
1375		 * This is enabled through PORT5_PAD_CTRL for all ports,
1376		 * rather than individual port registers.
1377		 */
1378		if (priv->switch_id == QCA8K_ID_QCA8337)
1379			qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
1380				    QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
1381		break;
1382	case PHY_INTERFACE_MODE_SGMII:
1383	case PHY_INTERFACE_MODE_1000BASEX:
1384		/* Enable SGMII on the port */
1385		qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
1386		break;
1387	default:
1388		dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
1389			phy_modes(state->interface), port);
1390		return;
1391	}
1392}
1393
1394static void qca8k_phylink_get_caps(struct dsa_switch *ds, int port,
1395				   struct phylink_config *config)
1396{
1397	switch (port) {
1398	case 0: /* 1st CPU port */
1399		phy_interface_set_rgmii(config->supported_interfaces);
1400		__set_bit(PHY_INTERFACE_MODE_SGMII,
1401			  config->supported_interfaces);
1402		break;
1403
1404	case 1:
1405	case 2:
1406	case 3:
1407	case 4:
1408	case 5:
1409		/* Internal PHY */
1410		__set_bit(PHY_INTERFACE_MODE_GMII,
1411			  config->supported_interfaces);
1412		__set_bit(PHY_INTERFACE_MODE_INTERNAL,
1413			  config->supported_interfaces);
1414		break;
1415
1416	case 6: /* 2nd CPU port / external PHY */
1417		phy_interface_set_rgmii(config->supported_interfaces);
1418		__set_bit(PHY_INTERFACE_MODE_SGMII,
1419			  config->supported_interfaces);
1420		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
1421			  config->supported_interfaces);
1422		break;
1423	}
1424
1425	config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1426		MAC_10 | MAC_100 | MAC_1000FD;
 
 
1427}
1428
1429static void
1430qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
1431			    phy_interface_t interface)
1432{
1433	struct qca8k_priv *priv = ds->priv;
1434
1435	qca8k_port_set_status(priv, port, 0);
1436}
1437
1438static void
1439qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
1440			  phy_interface_t interface, struct phy_device *phydev,
1441			  int speed, int duplex, bool tx_pause, bool rx_pause)
1442{
1443	struct qca8k_priv *priv = ds->priv;
1444	u32 reg;
1445
1446	if (phylink_autoneg_inband(mode)) {
1447		reg = QCA8K_PORT_STATUS_LINK_AUTO;
1448	} else {
1449		switch (speed) {
1450		case SPEED_10:
1451			reg = QCA8K_PORT_STATUS_SPEED_10;
1452			break;
1453		case SPEED_100:
1454			reg = QCA8K_PORT_STATUS_SPEED_100;
1455			break;
1456		case SPEED_1000:
1457			reg = QCA8K_PORT_STATUS_SPEED_1000;
1458			break;
1459		default:
1460			reg = QCA8K_PORT_STATUS_LINK_AUTO;
1461			break;
1462		}
1463
1464		if (duplex == DUPLEX_FULL)
1465			reg |= QCA8K_PORT_STATUS_DUPLEX;
1466
1467		if (rx_pause || dsa_is_cpu_port(ds, port))
1468			reg |= QCA8K_PORT_STATUS_RXFLOW;
1469
1470		if (tx_pause || dsa_is_cpu_port(ds, port))
1471			reg |= QCA8K_PORT_STATUS_TXFLOW;
1472	}
1473
1474	reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
1475
1476	qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
1477}
1478
1479static struct qca8k_pcs *pcs_to_qca8k_pcs(struct phylink_pcs *pcs)
1480{
1481	return container_of(pcs, struct qca8k_pcs, pcs);
1482}
1483
1484static void qca8k_pcs_get_state(struct phylink_pcs *pcs,
1485				struct phylink_link_state *state)
1486{
1487	struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
1488	int port = pcs_to_qca8k_pcs(pcs)->port;
1489	u32 reg;
1490	int ret;
1491
1492	ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), &reg);
1493	if (ret < 0) {
1494		state->link = false;
1495		return;
1496	}
1497
1498	state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
1499	state->an_complete = state->link;
 
1500	state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
1501							   DUPLEX_HALF;
1502
1503	switch (reg & QCA8K_PORT_STATUS_SPEED) {
1504	case QCA8K_PORT_STATUS_SPEED_10:
1505		state->speed = SPEED_10;
1506		break;
1507	case QCA8K_PORT_STATUS_SPEED_100:
1508		state->speed = SPEED_100;
1509		break;
1510	case QCA8K_PORT_STATUS_SPEED_1000:
1511		state->speed = SPEED_1000;
1512		break;
1513	default:
1514		state->speed = SPEED_UNKNOWN;
1515		break;
1516	}
1517
1518	if (reg & QCA8K_PORT_STATUS_RXFLOW)
1519		state->pause |= MLO_PAUSE_RX;
1520	if (reg & QCA8K_PORT_STATUS_TXFLOW)
1521		state->pause |= MLO_PAUSE_TX;
1522}
1523
1524static int qca8k_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
1525			    phy_interface_t interface,
1526			    const unsigned long *advertising,
1527			    bool permit_pause_to_mac)
1528{
1529	struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
1530	int cpu_port_index, ret, port;
1531	u32 reg, val;
1532
1533	port = pcs_to_qca8k_pcs(pcs)->port;
1534	switch (port) {
1535	case 0:
1536		reg = QCA8K_REG_PORT0_PAD_CTRL;
1537		cpu_port_index = QCA8K_CPU_PORT0;
1538		break;
1539
1540	case 6:
1541		reg = QCA8K_REG_PORT6_PAD_CTRL;
1542		cpu_port_index = QCA8K_CPU_PORT6;
1543		break;
1544
1545	default:
1546		WARN_ON(1);
1547		return -EINVAL;
1548	}
1549
1550	/* Enable/disable SerDes auto-negotiation as necessary */
1551	val = neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED ?
1552		0 : QCA8K_PWS_SERDES_AEN_DIS;
1553
1554	ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8K_PWS_SERDES_AEN_DIS, val);
1555	if (ret)
1556		return ret;
 
 
 
 
 
1557
1558	/* Configure the SGMII parameters */
1559	ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val);
1560	if (ret)
1561		return ret;
1562
1563	val |= QCA8K_SGMII_EN_SD;
1564
1565	if (priv->ports_config.sgmii_enable_pll)
1566		val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
1567		       QCA8K_SGMII_EN_TX;
1568
1569	if (dsa_is_cpu_port(priv->ds, port)) {
1570		/* CPU port, we're talking to the CPU MAC, be a PHY */
1571		val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1572		val |= QCA8K_SGMII_MODE_CTRL_PHY;
1573	} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1574		val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1575		val |= QCA8K_SGMII_MODE_CTRL_MAC;
1576	} else if (interface == PHY_INTERFACE_MODE_1000BASEX) {
1577		val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1578		val |= QCA8K_SGMII_MODE_CTRL_BASEX;
1579	}
1580
1581	qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
1582
1583	/* From original code is reported port instability as SGMII also
1584	 * require delay set. Apply advised values here or take them from DT.
1585	 */
1586	if (interface == PHY_INTERFACE_MODE_SGMII)
1587		qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
1588	/* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
1589	 * falling edge is set writing in the PORT0 PAD reg
1590	 */
1591	if (priv->switch_id == QCA8K_ID_QCA8327 ||
1592	    priv->switch_id == QCA8K_ID_QCA8337)
1593		reg = QCA8K_REG_PORT0_PAD_CTRL;
1594
1595	val = 0;
1596
1597	/* SGMII Clock phase configuration */
1598	if (priv->ports_config.sgmii_rx_clk_falling_edge)
1599		val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
1600
1601	if (priv->ports_config.sgmii_tx_clk_falling_edge)
1602		val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
1603
1604	if (val)
1605		ret = qca8k_rmw(priv, reg,
1606				QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
1607				QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
1608				val);
1609
1610	return 0;
1611}
1612
1613static void qca8k_pcs_an_restart(struct phylink_pcs *pcs)
1614{
1615}
1616
1617static const struct phylink_pcs_ops qca8k_pcs_ops = {
1618	.pcs_get_state = qca8k_pcs_get_state,
1619	.pcs_config = qca8k_pcs_config,
1620	.pcs_an_restart = qca8k_pcs_an_restart,
1621};
1622
1623static void qca8k_setup_pcs(struct qca8k_priv *priv, struct qca8k_pcs *qpcs,
1624			    int port)
1625{
1626	qpcs->pcs.ops = &qca8k_pcs_ops;
1627	qpcs->pcs.neg_mode = true;
1628
1629	/* We don't have interrupts for link changes, so we need to poll */
1630	qpcs->pcs.poll = true;
1631	qpcs->priv = priv;
1632	qpcs->port = port;
1633}
1634
1635static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb)
1636{
1637	struct qca8k_mib_eth_data *mib_eth_data;
1638	struct qca8k_priv *priv = ds->priv;
1639	const struct qca8k_mib_desc *mib;
1640	struct mib_ethhdr *mib_ethhdr;
1641	__le32 *data2;
1642	u8 port;
1643	int i;
1644
1645	mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb);
1646	mib_eth_data = &priv->mib_eth_data;
1647
1648	/* The switch autocast every port. Ignore other packet and
1649	 * parse only the requested one.
1650	 */
1651	port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, ntohs(mib_ethhdr->hdr));
1652	if (port != mib_eth_data->req_port)
1653		goto exit;
1654
1655	data2 = (__le32 *)skb->data;
1656
1657	for (i = 0; i < priv->info->mib_count; i++) {
1658		mib = &ar8327_mib[i];
1659
1660		/* First 3 mib are present in the skb head */
1661		if (i < 3) {
1662			mib_eth_data->data[i] = get_unaligned_le32(mib_ethhdr->data + i);
1663			continue;
1664		}
1665
1666		/* Some mib are 64 bit wide */
1667		if (mib->size == 2)
1668			mib_eth_data->data[i] = get_unaligned_le64((__le64 *)data2);
1669		else
1670			mib_eth_data->data[i] = get_unaligned_le32(data2);
1671
1672		data2 += mib->size;
1673	}
1674
1675exit:
1676	/* Complete on receiving all the mib packet */
1677	if (refcount_dec_and_test(&mib_eth_data->port_parsed))
1678		complete(&mib_eth_data->rw_done);
1679}
1680
1681static int
1682qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
1683{
1684	struct dsa_port *dp = dsa_to_port(ds, port);
1685	struct qca8k_mib_eth_data *mib_eth_data;
1686	struct qca8k_priv *priv = ds->priv;
1687	int ret;
1688
1689	mib_eth_data = &priv->mib_eth_data;
1690
1691	mutex_lock(&mib_eth_data->mutex);
1692
1693	reinit_completion(&mib_eth_data->rw_done);
1694
1695	mib_eth_data->req_port = dp->index;
1696	mib_eth_data->data = data;
1697	refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS);
1698
1699	mutex_lock(&priv->reg_mutex);
1700
1701	/* Send mib autocast request */
1702	ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
1703				 QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
1704				 FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_CAST) |
1705				 QCA8K_MIB_BUSY);
1706
1707	mutex_unlock(&priv->reg_mutex);
1708
1709	if (ret)
1710		goto exit;
1711
1712	ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
1713
1714exit:
1715	mutex_unlock(&mib_eth_data->mutex);
1716
1717	return ret;
1718}
1719
1720static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port)
1721{
1722	struct qca8k_priv *priv = ds->priv;
1723
1724	/* Communicate to the phy internal driver the switch revision.
1725	 * Based on the switch revision different values needs to be
1726	 * set to the dbg and mmd reg on the phy.
1727	 * The first 2 bit are used to communicate the switch revision
1728	 * to the phy driver.
1729	 */
1730	if (port > 0 && port < 6)
1731		return priv->switch_revision;
1732
1733	return 0;
1734}
1735
1736static enum dsa_tag_protocol
1737qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
1738		       enum dsa_tag_protocol mp)
1739{
1740	return DSA_TAG_PROTO_QCA;
1741}
1742
1743static void
1744qca8k_conduit_change(struct dsa_switch *ds, const struct net_device *conduit,
1745		     bool operational)
1746{
1747	struct dsa_port *dp = conduit->dsa_ptr;
1748	struct qca8k_priv *priv = ds->priv;
1749
1750	/* Ethernet MIB/MDIO is only supported for CPU port 0 */
1751	if (dp->index != 0)
1752		return;
1753
1754	mutex_lock(&priv->mgmt_eth_data.mutex);
1755	mutex_lock(&priv->mib_eth_data.mutex);
1756
1757	priv->mgmt_conduit = operational ? (struct net_device *)conduit : NULL;
1758
1759	mutex_unlock(&priv->mib_eth_data.mutex);
1760	mutex_unlock(&priv->mgmt_eth_data.mutex);
1761}
1762
1763static int qca8k_connect_tag_protocol(struct dsa_switch *ds,
1764				      enum dsa_tag_protocol proto)
1765{
1766	struct qca_tagger_data *tagger_data;
1767
1768	switch (proto) {
1769	case DSA_TAG_PROTO_QCA:
1770		tagger_data = ds->tagger_data;
1771
1772		tagger_data->rw_reg_ack_handler = qca8k_rw_reg_ack_handler;
1773		tagger_data->mib_autocast_handler = qca8k_mib_autocast_handler;
1774
1775		break;
1776	default:
1777		return -EOPNOTSUPP;
1778	}
1779
1780	return 0;
1781}
1782
1783static void qca8k_setup_hol_fixup(struct qca8k_priv *priv, int port)
1784{
1785	u32 mask;
1786
1787	switch (port) {
1788	/* The 2 CPU port and port 5 requires some different
1789	 * priority than any other ports.
1790	 */
1791	case 0:
1792	case 5:
1793	case 6:
1794		mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
1795			QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
1796			QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) |
1797			QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) |
1798			QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) |
1799			QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) |
1800			QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e);
1801		break;
1802	default:
1803		mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
1804			QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
1805			QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) |
1806			QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) |
1807			QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19);
1808	}
1809	regmap_write(priv->regmap, QCA8K_REG_PORT_HOL_CTRL0(port), mask);
1810
1811	mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) |
1812	       QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
1813	       QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
1814	       QCA8K_PORT_HOL_CTRL1_WRED_EN;
1815	regmap_update_bits(priv->regmap, QCA8K_REG_PORT_HOL_CTRL1(port),
1816			   QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK |
1817			   QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
1818			   QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
1819			   QCA8K_PORT_HOL_CTRL1_WRED_EN,
1820			   mask);
1821}
1822
1823static int
1824qca8k_setup(struct dsa_switch *ds)
1825{
1826	struct qca8k_priv *priv = ds->priv;
1827	struct dsa_port *dp;
1828	int cpu_port, ret;
1829	u32 mask;
1830
1831	cpu_port = qca8k_find_cpu_port(ds);
1832	if (cpu_port < 0) {
1833		dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6");
1834		return cpu_port;
1835	}
1836
1837	/* Parse CPU port config to be later used in phy_link mac_config */
1838	ret = qca8k_parse_port_config(priv);
1839	if (ret)
1840		return ret;
1841
1842	ret = qca8k_setup_mdio_bus(priv);
1843	if (ret)
1844		return ret;
1845
1846	ret = qca8k_setup_of_pws_reg(priv);
1847	if (ret)
1848		return ret;
1849
1850	ret = qca8k_setup_mac_pwr_sel(priv);
1851	if (ret)
1852		return ret;
1853
1854	ret = qca8k_setup_led_ctrl(priv);
1855	if (ret)
1856		return ret;
1857
1858	qca8k_setup_pcs(priv, &priv->pcs_port_0, 0);
1859	qca8k_setup_pcs(priv, &priv->pcs_port_6, 6);
1860
1861	/* Make sure MAC06 is disabled */
1862	ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL,
1863				QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
1864	if (ret) {
1865		dev_err(priv->dev, "failed disabling MAC06 exchange");
1866		return ret;
1867	}
1868
1869	/* Enable CPU Port */
1870	ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
1871			      QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
1872	if (ret) {
1873		dev_err(priv->dev, "failed enabling CPU port");
1874		return ret;
1875	}
1876
1877	/* Enable MIB counters */
1878	ret = qca8k_mib_init(priv);
1879	if (ret)
1880		dev_warn(priv->dev, "mib init failed");
1881
1882	/* Initial setup of all ports */
1883	dsa_switch_for_each_port(dp, ds) {
1884		/* Disable forwarding by default on all ports */
1885		ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(dp->index),
1886				QCA8K_PORT_LOOKUP_MEMBER, 0);
1887		if (ret)
1888			return ret;
1889	}
1890
1891	/* Disable MAC by default on all user ports */
1892	dsa_switch_for_each_user_port(dp, ds)
1893		qca8k_port_set_status(priv, dp->index, 0);
1894
1895	/* Enable QCA header mode on all cpu ports */
1896	dsa_switch_for_each_cpu_port(dp, ds) {
1897		ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(dp->index),
1898				  FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) |
1899				  FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL));
1900		if (ret) {
1901			dev_err(priv->dev, "failed enabling QCA header mode on port %d", dp->index);
1902			return ret;
1903		}
 
 
 
 
1904	}
1905
1906	/* Forward all unknown frames to CPU port for Linux processing
1907	 * Notice that in multi-cpu config only one port should be set
1908	 * for igmp, unknown, multicast and broadcast packet
1909	 */
1910	ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
1911			  FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) |
1912			  FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) |
1913			  FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) |
1914			  FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port)));
1915	if (ret)
1916		return ret;
1917
1918	/* CPU port gets connected to all user ports of the switch */
1919	ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(cpu_port),
1920			QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
1921	if (ret)
1922		return ret;
1923
1924	/* Setup connection between CPU port & user ports
1925	 * Individual user ports get connected to CPU port only
1926	 */
1927	dsa_switch_for_each_user_port(dp, ds) {
1928		u8 port = dp->index;
1929
1930		ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
1931				QCA8K_PORT_LOOKUP_MEMBER,
1932				BIT(cpu_port));
1933		if (ret)
1934			return ret;
1935
1936		ret = regmap_clear_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(port),
1937					QCA8K_PORT_LOOKUP_LEARN);
1938		if (ret)
1939			return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1940
1941		/* For port based vlans to work we need to set the
1942		 * default egress vid
 
 
 
1943		 */
1944		ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
1945				QCA8K_EGREES_VLAN_PORT_MASK(port),
1946				QCA8K_EGREES_VLAN_PORT(port, QCA8K_PORT_VID_DEF));
1947		if (ret)
1948			return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1949
1950		ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
1951				  QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
1952				  QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
1953		if (ret)
1954			return ret;
 
 
 
 
 
 
1955	}
1956
1957	/* The port 5 of the qca8337 have some problem in flood condition. The
1958	 * original legacy driver had some specific buffer and priority settings
1959	 * for the different port suggested by the QCA switch team. Add this
1960	 * missing settings to improve switch stability under load condition.
1961	 * This problem is limited to qca8337 and other qca8k switch are not affected.
1962	 */
1963	if (priv->switch_id == QCA8K_ID_QCA8337)
1964		dsa_switch_for_each_available_port(dp, ds)
1965			qca8k_setup_hol_fixup(priv, dp->index);
1966
1967	/* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
1968	if (priv->switch_id == QCA8K_ID_QCA8327) {
1969		mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
1970		       QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
1971		qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
1972			  QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK |
1973			  QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK,
1974			  mask);
1975	}
1976
1977	/* Setup our port MTUs to match power on defaults */
1978	ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
1979	if (ret)
1980		dev_warn(priv->dev, "failed setting MTU settings");
1981
1982	/* Flush the FDB table */
1983	qca8k_fdb_flush(priv);
1984
1985	/* Set min a max ageing value supported */
1986	ds->ageing_time_min = 7000;
1987	ds->ageing_time_max = 458745000;
1988
1989	/* Set max number of LAGs supported */
1990	ds->num_lag_ids = QCA8K_NUM_LAGS;
1991
1992	return 0;
1993}
1994
1995static const struct dsa_switch_ops qca8k_switch_ops = {
1996	.get_tag_protocol	= qca8k_get_tag_protocol,
1997	.setup			= qca8k_setup,
1998	.get_strings		= qca8k_get_strings,
1999	.get_ethtool_stats	= qca8k_get_ethtool_stats,
2000	.get_sset_count		= qca8k_get_sset_count,
2001	.set_ageing_time	= qca8k_set_ageing_time,
2002	.get_mac_eee		= qca8k_get_mac_eee,
2003	.set_mac_eee		= qca8k_set_mac_eee,
2004	.port_enable		= qca8k_port_enable,
2005	.port_disable		= qca8k_port_disable,
2006	.port_change_mtu	= qca8k_port_change_mtu,
2007	.port_max_mtu		= qca8k_port_max_mtu,
2008	.port_stp_state_set	= qca8k_port_stp_state_set,
2009	.port_pre_bridge_flags	= qca8k_port_pre_bridge_flags,
2010	.port_bridge_flags	= qca8k_port_bridge_flags,
2011	.port_bridge_join	= qca8k_port_bridge_join,
2012	.port_bridge_leave	= qca8k_port_bridge_leave,
2013	.port_fast_age		= qca8k_port_fast_age,
2014	.port_fdb_add		= qca8k_port_fdb_add,
2015	.port_fdb_del		= qca8k_port_fdb_del,
2016	.port_fdb_dump		= qca8k_port_fdb_dump,
2017	.port_mdb_add		= qca8k_port_mdb_add,
2018	.port_mdb_del		= qca8k_port_mdb_del,
2019	.port_mirror_add	= qca8k_port_mirror_add,
2020	.port_mirror_del	= qca8k_port_mirror_del,
2021	.port_vlan_filtering	= qca8k_port_vlan_filtering,
2022	.port_vlan_add		= qca8k_port_vlan_add,
2023	.port_vlan_del		= qca8k_port_vlan_del,
2024	.phylink_get_caps	= qca8k_phylink_get_caps,
2025	.phylink_mac_select_pcs	= qca8k_phylink_mac_select_pcs,
2026	.phylink_mac_config	= qca8k_phylink_mac_config,
2027	.phylink_mac_link_down	= qca8k_phylink_mac_link_down,
2028	.phylink_mac_link_up	= qca8k_phylink_mac_link_up,
2029	.get_phy_flags		= qca8k_get_phy_flags,
2030	.port_lag_join		= qca8k_port_lag_join,
2031	.port_lag_leave		= qca8k_port_lag_leave,
2032	.conduit_state_change	= qca8k_conduit_change,
2033	.connect_tag_protocol	= qca8k_connect_tag_protocol,
2034};
2035
2036static int
2037qca8k_sw_probe(struct mdio_device *mdiodev)
2038{
2039	struct qca8k_priv *priv;
2040	int ret;
2041
2042	/* allocate the private data struct so that we can probe the switches
2043	 * ID register
2044	 */
2045	priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
2046	if (!priv)
2047		return -ENOMEM;
2048
2049	priv->bus = mdiodev->bus;
2050	priv->dev = &mdiodev->dev;
2051	priv->info = of_device_get_match_data(priv->dev);
2052
2053	priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
2054						   GPIOD_OUT_HIGH);
2055	if (IS_ERR(priv->reset_gpio))
2056		return PTR_ERR(priv->reset_gpio);
2057
2058	if (priv->reset_gpio) {
 
2059		/* The active low duration must be greater than 10 ms
2060		 * and checkpatch.pl wants 20 ms.
2061		 */
2062		msleep(20);
2063		gpiod_set_value_cansleep(priv->reset_gpio, 0);
2064	}
2065
2066	/* Start by setting up the register mapping */
2067	priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv,
2068					&qca8k_regmap_config);
2069	if (IS_ERR(priv->regmap)) {
2070		dev_err(priv->dev, "regmap initialization failed");
2071		return PTR_ERR(priv->regmap);
2072	}
2073
2074	priv->mdio_cache.page = 0xffff;
2075
2076	/* Check the detected switch id */
2077	ret = qca8k_read_switch_id(priv);
2078	if (ret)
2079		return ret;
2080
2081	priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
2082	if (!priv->ds)
2083		return -ENOMEM;
2084
2085	mutex_init(&priv->mgmt_eth_data.mutex);
2086	init_completion(&priv->mgmt_eth_data.rw_done);
2087
2088	mutex_init(&priv->mib_eth_data.mutex);
2089	init_completion(&priv->mib_eth_data.rw_done);
2090
2091	priv->ds->dev = &mdiodev->dev;
2092	priv->ds->num_ports = QCA8K_NUM_PORTS;
2093	priv->ds->priv = priv;
2094	priv->ds->ops = &qca8k_switch_ops;
2095	mutex_init(&priv->reg_mutex);
2096	dev_set_drvdata(&mdiodev->dev, priv);
2097
2098	return dsa_register_switch(priv->ds);
2099}
2100
2101static void
2102qca8k_sw_remove(struct mdio_device *mdiodev)
2103{
2104	struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
2105	int i;
2106
2107	if (!priv)
2108		return;
2109
2110	for (i = 0; i < QCA8K_NUM_PORTS; i++)
2111		qca8k_port_set_status(priv, i, 0);
2112
2113	dsa_unregister_switch(priv->ds);
2114}
2115
2116static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
2117{
2118	struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
2119
2120	if (!priv)
2121		return;
2122
2123	dsa_switch_shutdown(priv->ds);
2124
2125	dev_set_drvdata(&mdiodev->dev, NULL);
2126}
2127
2128#ifdef CONFIG_PM_SLEEP
2129static void
2130qca8k_set_pm(struct qca8k_priv *priv, int enable)
2131{
2132	int port;
2133
2134	for (port = 0; port < QCA8K_NUM_PORTS; port++) {
2135		/* Do not enable on resume if the port was
2136		 * disabled before.
2137		 */
2138		if (!(priv->port_enabled_map & BIT(port)))
2139			continue;
2140
2141		qca8k_port_set_status(priv, port, enable);
2142	}
2143}
2144
2145static int qca8k_suspend(struct device *dev)
2146{
2147	struct qca8k_priv *priv = dev_get_drvdata(dev);
2148
2149	qca8k_set_pm(priv, 0);
2150
2151	return dsa_switch_suspend(priv->ds);
2152}
2153
2154static int qca8k_resume(struct device *dev)
2155{
2156	struct qca8k_priv *priv = dev_get_drvdata(dev);
2157
2158	qca8k_set_pm(priv, 1);
2159
2160	return dsa_switch_resume(priv->ds);
2161}
2162#endif /* CONFIG_PM_SLEEP */
2163
2164static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
2165			 qca8k_suspend, qca8k_resume);
2166
2167static const struct qca8k_info_ops qca8xxx_ops = {
2168	.autocast_mib = qca8k_get_ethtool_stats_eth,
 
 
2169};
2170
2171static const struct qca8k_match_data qca8327 = {
2172	.id = QCA8K_ID_QCA8327,
2173	.reduced_package = true,
2174	.mib_count = QCA8K_QCA832X_MIB_COUNT,
2175	.ops = &qca8xxx_ops,
2176};
2177
2178static const struct qca8k_match_data qca8328 = {
2179	.id = QCA8K_ID_QCA8327,
2180	.mib_count = QCA8K_QCA832X_MIB_COUNT,
2181	.ops = &qca8xxx_ops,
2182};
2183
2184static const struct qca8k_match_data qca833x = {
2185	.id = QCA8K_ID_QCA8337,
2186	.mib_count = QCA8K_QCA833X_MIB_COUNT,
2187	.ops = &qca8xxx_ops,
2188};
2189
2190static const struct of_device_id qca8k_of_match[] = {
2191	{ .compatible = "qca,qca8327", .data = &qca8327 },
2192	{ .compatible = "qca,qca8328", .data = &qca8328 },
2193	{ .compatible = "qca,qca8334", .data = &qca833x },
2194	{ .compatible = "qca,qca8337", .data = &qca833x },
2195	{ /* sentinel */ },
2196};
2197
2198static struct mdio_driver qca8kmdio_driver = {
2199	.probe  = qca8k_sw_probe,
2200	.remove = qca8k_sw_remove,
2201	.shutdown = qca8k_sw_shutdown,
2202	.mdiodrv.driver = {
2203		.name = "qca8k",
2204		.of_match_table = qca8k_of_match,
2205		.pm = &qca8k_pm_ops,
2206	},
2207};
2208
2209mdio_module_driver(qca8kmdio_driver);
2210
2211MODULE_AUTHOR("Mathieu Olivari, John Crispin <john@phrozen.org>");
2212MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family");
2213MODULE_LICENSE("GPL v2");
2214MODULE_ALIAS("platform:qca8k");
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
   4 * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
   5 * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
   6 * Copyright (c) 2016 John Crispin <john@phrozen.org>
   7 */
   8
   9#include <linux/module.h>
  10#include <linux/phy.h>
  11#include <linux/netdevice.h>
  12#include <linux/bitfield.h>
  13#include <linux/regmap.h>
  14#include <net/dsa.h>
  15#include <linux/of_net.h>
  16#include <linux/of_mdio.h>
  17#include <linux/of_platform.h>
  18#include <linux/mdio.h>
  19#include <linux/phylink.h>
  20#include <linux/gpio/consumer.h>
  21#include <linux/etherdevice.h>
  22#include <linux/dsa/tag_qca.h>
  23
  24#include "qca8k.h"
 
  25
  26static void
  27qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
  28{
  29	regaddr >>= 1;
  30	*r1 = regaddr & 0x1e;
  31
  32	regaddr >>= 5;
  33	*r2 = regaddr & 0x7;
  34
  35	regaddr >>= 3;
  36	*page = regaddr & 0x3ff;
  37}
  38
  39static int
  40qca8k_mii_write_lo(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
  41{
  42	int ret;
  43	u16 lo;
  44
  45	lo = val & 0xffff;
  46	ret = bus->write(bus, phy_id, regnum, lo);
  47	if (ret < 0)
  48		dev_err_ratelimited(&bus->dev,
  49				    "failed to write qca8k 32bit lo register\n");
  50
  51	return ret;
  52}
  53
  54static int
  55qca8k_mii_write_hi(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
  56{
  57	int ret;
  58	u16 hi;
  59
  60	hi = (u16)(val >> 16);
  61	ret = bus->write(bus, phy_id, regnum, hi);
  62	if (ret < 0)
  63		dev_err_ratelimited(&bus->dev,
  64				    "failed to write qca8k 32bit hi register\n");
  65
  66	return ret;
  67}
  68
  69static int
  70qca8k_mii_read_lo(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
  71{
  72	int ret;
  73
  74	ret = bus->read(bus, phy_id, regnum);
  75	if (ret < 0)
  76		goto err;
  77
  78	*val = ret & 0xffff;
  79	return 0;
  80
  81err:
  82	dev_err_ratelimited(&bus->dev,
  83			    "failed to read qca8k 32bit lo register\n");
  84	*val = 0;
  85
  86	return ret;
  87}
  88
  89static int
  90qca8k_mii_read_hi(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
  91{
  92	int ret;
  93
  94	ret = bus->read(bus, phy_id, regnum);
  95	if (ret < 0)
  96		goto err;
  97
  98	*val = ret << 16;
  99	return 0;
 100
 101err:
 102	dev_err_ratelimited(&bus->dev,
 103			    "failed to read qca8k 32bit hi register\n");
 104	*val = 0;
 105
 106	return ret;
 107}
 108
 109static int
 110qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
 111{
 112	u32 hi, lo;
 113	int ret;
 114
 115	*val = 0;
 116
 117	ret = qca8k_mii_read_lo(bus, phy_id, regnum, &lo);
 118	if (ret < 0)
 119		goto err;
 120
 121	ret = qca8k_mii_read_hi(bus, phy_id, regnum + 1, &hi);
 122	if (ret < 0)
 123		goto err;
 124
 125	*val = lo | hi;
 126
 127err:
 128	return ret;
 129}
 130
 131static void
 132qca8k_mii_write32(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
 133{
 134	if (qca8k_mii_write_lo(bus, phy_id, regnum, val) < 0)
 135		return;
 136
 137	qca8k_mii_write_hi(bus, phy_id, regnum + 1, val);
 138}
 139
 140static int
 141qca8k_set_page(struct qca8k_priv *priv, u16 page)
 142{
 143	u16 *cached_page = &priv->mdio_cache.page;
 144	struct mii_bus *bus = priv->bus;
 145	int ret;
 146
 147	if (page == *cached_page)
 148		return 0;
 149
 150	ret = bus->write(bus, 0x18, 0, page);
 151	if (ret < 0) {
 152		dev_err_ratelimited(&bus->dev,
 153				    "failed to set qca8k page\n");
 154		return ret;
 155	}
 156
 157	*cached_page = page;
 158	usleep_range(1000, 2000);
 159	return 0;
 160}
 161
 162static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
 163{
 164	struct qca8k_mgmt_eth_data *mgmt_eth_data;
 165	struct qca8k_priv *priv = ds->priv;
 166	struct qca_mgmt_ethhdr *mgmt_ethhdr;
 167	u32 command;
 168	u8 len, cmd;
 169	int i;
 170
 171	mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb);
 172	mgmt_eth_data = &priv->mgmt_eth_data;
 173
 174	command = get_unaligned_le32(&mgmt_ethhdr->command);
 175	cmd = FIELD_GET(QCA_HDR_MGMT_CMD, command);
 176
 177	len = FIELD_GET(QCA_HDR_MGMT_LENGTH, command);
 178	/* Special case for len of 15 as this is the max value for len and needs to
 179	 * be increased before converting it from word to dword.
 180	 */
 181	if (len == 15)
 182		len++;
 183
 184	/* We can ignore odd value, we always round up them in the alloc function. */
 185	len *= sizeof(u16);
 186
 187	/* Make sure the seq match the requested packet */
 188	if (get_unaligned_le32(&mgmt_ethhdr->seq) == mgmt_eth_data->seq)
 189		mgmt_eth_data->ack = true;
 190
 191	if (cmd == MDIO_READ) {
 192		u32 *val = mgmt_eth_data->data;
 193
 194		*val = get_unaligned_le32(&mgmt_ethhdr->mdio_data);
 195
 196		/* Get the rest of the 12 byte of data.
 197		 * The read/write function will extract the requested data.
 198		 */
 199		if (len > QCA_HDR_MGMT_DATA1_LEN) {
 200			__le32 *data2 = (__le32 *)skb->data;
 201			int data_len = min_t(int, QCA_HDR_MGMT_DATA2_LEN,
 202					     len - QCA_HDR_MGMT_DATA1_LEN);
 203
 204			val++;
 205
 206			for (i = sizeof(u32); i <= data_len; i += sizeof(u32)) {
 207				*val = get_unaligned_le32(data2);
 208				val++;
 209				data2++;
 210			}
 211		}
 212	}
 213
 214	complete(&mgmt_eth_data->rw_done);
 215}
 216
 217static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val,
 218					       int priority, unsigned int len)
 219{
 220	struct qca_mgmt_ethhdr *mgmt_ethhdr;
 221	unsigned int real_len;
 222	struct sk_buff *skb;
 223	__le32 *data2;
 224	u32 command;
 225	u16 hdr;
 226	int i;
 227
 228	skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN);
 229	if (!skb)
 230		return NULL;
 231
 232	/* Hdr mgmt length value is in step of word size.
 233	 * As an example to process 4 byte of data the correct length to set is 2.
 234	 * To process 8 byte 4, 12 byte 6, 16 byte 8...
 235	 *
 236	 * Odd values will always return the next size on the ack packet.
 237	 * (length of 3 (6 byte) will always return 8 bytes of data)
 238	 *
 239	 * This means that a value of 15 (0xf) actually means reading/writing 32 bytes
 240	 * of data.
 241	 *
 242	 * To correctly calculate the length we devide the requested len by word and
 243	 * round up.
 244	 * On the ack function we can skip the odd check as we already handle the
 245	 * case here.
 246	 */
 247	real_len = DIV_ROUND_UP(len, sizeof(u16));
 248
 249	/* We check if the result len is odd and we round up another time to
 250	 * the next size. (length of 3 will be increased to 4 as switch will always
 251	 * return 8 bytes)
 252	 */
 253	if (real_len % sizeof(u16) != 0)
 254		real_len++;
 255
 256	/* Max reg value is 0xf(15) but switch will always return the next size (32 byte) */
 257	if (real_len == 16)
 258		real_len--;
 259
 260	skb_reset_mac_header(skb);
 261	skb_set_network_header(skb, skb->len);
 262
 263	mgmt_ethhdr = skb_push(skb, QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN);
 264
 265	hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION);
 266	hdr |= FIELD_PREP(QCA_HDR_XMIT_PRIORITY, priority);
 267	hdr |= QCA_HDR_XMIT_FROM_CPU;
 268	hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0));
 269	hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG);
 270
 271	command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg);
 272	command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len);
 273	command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd);
 274	command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE,
 275					   QCA_HDR_MGMT_CHECK_CODE_VAL);
 276
 277	put_unaligned_le32(command, &mgmt_ethhdr->command);
 278
 279	if (cmd == MDIO_WRITE)
 280		put_unaligned_le32(*val, &mgmt_ethhdr->mdio_data);
 281
 282	mgmt_ethhdr->hdr = htons(hdr);
 283
 284	data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN);
 285	if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN) {
 286		int data_len = min_t(int, QCA_HDR_MGMT_DATA2_LEN,
 287				     len - QCA_HDR_MGMT_DATA1_LEN);
 288
 289		val++;
 290
 291		for (i = sizeof(u32); i <= data_len; i += sizeof(u32)) {
 292			put_unaligned_le32(*val, data2);
 293			data2++;
 294			val++;
 295		}
 296	}
 297
 298	return skb;
 299}
 300
 301static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num)
 302{
 303	struct qca_mgmt_ethhdr *mgmt_ethhdr;
 304	u32 seq;
 305
 306	seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num);
 307	mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data;
 308	put_unaligned_le32(seq, &mgmt_ethhdr->seq);
 309}
 310
 311static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
 312{
 313	struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
 314	struct sk_buff *skb;
 315	bool ack;
 316	int ret;
 317
 318	skb = qca8k_alloc_mdio_header(MDIO_READ, reg, NULL,
 319				      QCA8K_ETHERNET_MDIO_PRIORITY, len);
 320	if (!skb)
 321		return -ENOMEM;
 322
 323	mutex_lock(&mgmt_eth_data->mutex);
 324
 325	/* Check mgmt_master if is operational */
 326	if (!priv->mgmt_master) {
 327		kfree_skb(skb);
 328		mutex_unlock(&mgmt_eth_data->mutex);
 329		return -EINVAL;
 330	}
 331
 332	skb->dev = priv->mgmt_master;
 333
 334	reinit_completion(&mgmt_eth_data->rw_done);
 335
 336	/* Increment seq_num and set it in the mdio pkt */
 337	mgmt_eth_data->seq++;
 338	qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
 339	mgmt_eth_data->ack = false;
 340
 341	dev_queue_xmit(skb);
 342
 343	ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
 344					  msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
 345
 346	*val = mgmt_eth_data->data[0];
 347	if (len > QCA_HDR_MGMT_DATA1_LEN)
 348		memcpy(val + 1, mgmt_eth_data->data + 1, len - QCA_HDR_MGMT_DATA1_LEN);
 349
 350	ack = mgmt_eth_data->ack;
 351
 352	mutex_unlock(&mgmt_eth_data->mutex);
 353
 354	if (ret <= 0)
 355		return -ETIMEDOUT;
 356
 357	if (!ack)
 358		return -EINVAL;
 359
 360	return 0;
 361}
 362
 363static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
 364{
 365	struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
 366	struct sk_buff *skb;
 367	bool ack;
 368	int ret;
 369
 370	skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, val,
 371				      QCA8K_ETHERNET_MDIO_PRIORITY, len);
 372	if (!skb)
 373		return -ENOMEM;
 374
 375	mutex_lock(&mgmt_eth_data->mutex);
 376
 377	/* Check mgmt_master if is operational */
 378	if (!priv->mgmt_master) {
 379		kfree_skb(skb);
 380		mutex_unlock(&mgmt_eth_data->mutex);
 381		return -EINVAL;
 382	}
 383
 384	skb->dev = priv->mgmt_master;
 385
 386	reinit_completion(&mgmt_eth_data->rw_done);
 387
 388	/* Increment seq_num and set it in the mdio pkt */
 389	mgmt_eth_data->seq++;
 390	qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
 391	mgmt_eth_data->ack = false;
 392
 393	dev_queue_xmit(skb);
 394
 395	ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
 396					  msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
 397
 398	ack = mgmt_eth_data->ack;
 399
 400	mutex_unlock(&mgmt_eth_data->mutex);
 401
 402	if (ret <= 0)
 403		return -ETIMEDOUT;
 404
 405	if (!ack)
 406		return -EINVAL;
 407
 408	return 0;
 409}
 410
 411static int
 412qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
 413{
 414	u32 val = 0;
 415	int ret;
 416
 417	ret = qca8k_read_eth(priv, reg, &val, sizeof(val));
 418	if (ret)
 419		return ret;
 420
 421	val &= ~mask;
 422	val |= write_val;
 423
 424	return qca8k_write_eth(priv, reg, &val, sizeof(val));
 425}
 426
 427static int
 428qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
 429{
 430	struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
 431	struct mii_bus *bus = priv->bus;
 432	u16 r1, r2, page;
 433	int ret;
 434
 435	if (!qca8k_read_eth(priv, reg, val, sizeof(*val)))
 436		return 0;
 437
 438	qca8k_split_addr(reg, &r1, &r2, &page);
 439
 440	mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
 441
 442	ret = qca8k_set_page(priv, page);
 443	if (ret < 0)
 444		goto exit;
 445
 446	ret = qca8k_mii_read32(bus, 0x10 | r2, r1, val);
 447
 448exit:
 449	mutex_unlock(&bus->mdio_lock);
 450	return ret;
 451}
 452
 453static int
 454qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
 455{
 456	struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
 457	struct mii_bus *bus = priv->bus;
 458	u16 r1, r2, page;
 459	int ret;
 460
 461	if (!qca8k_write_eth(priv, reg, &val, sizeof(val)))
 462		return 0;
 463
 464	qca8k_split_addr(reg, &r1, &r2, &page);
 465
 466	mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
 467
 468	ret = qca8k_set_page(priv, page);
 469	if (ret < 0)
 470		goto exit;
 471
 472	qca8k_mii_write32(bus, 0x10 | r2, r1, val);
 473
 474exit:
 475	mutex_unlock(&bus->mdio_lock);
 476	return ret;
 477}
 478
 479static int
 480qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val)
 
 481{
 482	struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
 483	struct mii_bus *bus = priv->bus;
 484	u16 r1, r2, page;
 485	u32 val;
 486	int ret;
 487
 488	if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val))
 489		return 0;
 490
 491	qca8k_split_addr(reg, &r1, &r2, &page);
 492
 493	mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
 494
 495	ret = qca8k_set_page(priv, page);
 496	if (ret < 0)
 497		goto exit;
 498
 499	ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
 500	if (ret < 0)
 501		goto exit;
 502
 503	val &= ~mask;
 504	val |= write_val;
 505	qca8k_mii_write32(bus, 0x10 | r2, r1, val);
 506
 507exit:
 508	mutex_unlock(&bus->mdio_lock);
 509
 510	return ret;
 511}
 512
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 513static struct regmap_config qca8k_regmap_config = {
 514	.reg_bits = 16,
 515	.val_bits = 32,
 516	.reg_stride = 4,
 517	.max_register = 0x16ac, /* end MIB - Port6 range */
 518	.reg_read = qca8k_regmap_read,
 519	.reg_write = qca8k_regmap_write,
 520	.reg_update_bits = qca8k_regmap_update_bits,
 521	.rd_table = &qca8k_readable_table,
 522	.disable_locking = true, /* Locking is handled by qca8k read/write */
 523	.cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */
 
 
 
 
 
 524};
 525
 526static int
 527qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
 528			struct sk_buff *read_skb, u32 *val)
 529{
 530	struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL);
 531	bool ack;
 532	int ret;
 533
 
 
 
 534	reinit_completion(&mgmt_eth_data->rw_done);
 535
 536	/* Increment seq_num and set it in the copy pkt */
 537	mgmt_eth_data->seq++;
 538	qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
 539	mgmt_eth_data->ack = false;
 540
 541	dev_queue_xmit(skb);
 542
 543	ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
 544					  QCA8K_ETHERNET_TIMEOUT);
 545
 546	ack = mgmt_eth_data->ack;
 547
 548	if (ret <= 0)
 549		return -ETIMEDOUT;
 550
 551	if (!ack)
 552		return -EINVAL;
 553
 554	*val = mgmt_eth_data->data[0];
 555
 556	return 0;
 557}
 558
 559static int
 560qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
 561		      int regnum, u16 data)
 562{
 563	struct sk_buff *write_skb, *clear_skb, *read_skb;
 564	struct qca8k_mgmt_eth_data *mgmt_eth_data;
 565	u32 write_val, clear_val = 0, val;
 566	struct net_device *mgmt_master;
 567	int ret, ret1;
 568	bool ack;
 569
 570	if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
 571		return -EINVAL;
 572
 573	mgmt_eth_data = &priv->mgmt_eth_data;
 574
 575	write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
 576		    QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
 577		    QCA8K_MDIO_MASTER_REG_ADDR(regnum);
 578
 579	if (read) {
 580		write_val |= QCA8K_MDIO_MASTER_READ;
 581	} else {
 582		write_val |= QCA8K_MDIO_MASTER_WRITE;
 583		write_val |= QCA8K_MDIO_MASTER_DATA(data);
 584	}
 585
 586	/* Prealloc all the needed skb before the lock */
 587	write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &write_val,
 588					    QCA8K_ETHERNET_PHY_PRIORITY, sizeof(write_val));
 589	if (!write_skb)
 590		return -ENOMEM;
 591
 592	clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &clear_val,
 593					    QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
 594	if (!clear_skb) {
 595		ret = -ENOMEM;
 596		goto err_clear_skb;
 597	}
 598
 599	read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, &clear_val,
 600					   QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
 601	if (!read_skb) {
 602		ret = -ENOMEM;
 603		goto err_read_skb;
 604	}
 605
 
 
 
 
 
 
 
 
 
 606	/* Actually start the request:
 607	 * 1. Send mdio master packet
 608	 * 2. Busy Wait for mdio master command
 609	 * 3. Get the data if we are reading
 610	 * 4. Reset the mdio master (even with error)
 611	 */
 612	mutex_lock(&mgmt_eth_data->mutex);
 613
 614	/* Check if mgmt_master is operational */
 615	mgmt_master = priv->mgmt_master;
 616	if (!mgmt_master) {
 617		mutex_unlock(&mgmt_eth_data->mutex);
 
 618		ret = -EINVAL;
 619		goto err_mgmt_master;
 620	}
 621
 622	read_skb->dev = mgmt_master;
 623	clear_skb->dev = mgmt_master;
 624	write_skb->dev = mgmt_master;
 625
 626	reinit_completion(&mgmt_eth_data->rw_done);
 627
 628	/* Increment seq_num and set it in the write pkt */
 629	mgmt_eth_data->seq++;
 630	qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
 631	mgmt_eth_data->ack = false;
 632
 633	dev_queue_xmit(write_skb);
 634
 635	ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
 636					  QCA8K_ETHERNET_TIMEOUT);
 637
 638	ack = mgmt_eth_data->ack;
 639
 640	if (ret <= 0) {
 641		ret = -ETIMEDOUT;
 642		kfree_skb(read_skb);
 643		goto exit;
 644	}
 645
 646	if (!ack) {
 647		ret = -EINVAL;
 648		kfree_skb(read_skb);
 649		goto exit;
 650	}
 651
 652	ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1,
 653				!(val & QCA8K_MDIO_MASTER_BUSY), 0,
 654				QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
 655				mgmt_eth_data, read_skb, &val);
 656
 657	if (ret < 0 && ret1 < 0) {
 658		ret = ret1;
 659		goto exit;
 660	}
 661
 662	if (read) {
 663		reinit_completion(&mgmt_eth_data->rw_done);
 664
 665		/* Increment seq_num and set it in the read pkt */
 666		mgmt_eth_data->seq++;
 667		qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
 668		mgmt_eth_data->ack = false;
 669
 670		dev_queue_xmit(read_skb);
 671
 672		ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
 673						  QCA8K_ETHERNET_TIMEOUT);
 674
 675		ack = mgmt_eth_data->ack;
 676
 677		if (ret <= 0) {
 678			ret = -ETIMEDOUT;
 679			goto exit;
 680		}
 681
 682		if (!ack) {
 683			ret = -EINVAL;
 684			goto exit;
 685		}
 686
 687		ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK;
 688	} else {
 689		kfree_skb(read_skb);
 690	}
 691exit:
 692	reinit_completion(&mgmt_eth_data->rw_done);
 693
 694	/* Increment seq_num and set it in the clear pkt */
 695	mgmt_eth_data->seq++;
 696	qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
 697	mgmt_eth_data->ack = false;
 698
 699	dev_queue_xmit(clear_skb);
 700
 701	wait_for_completion_timeout(&mgmt_eth_data->rw_done,
 702				    QCA8K_ETHERNET_TIMEOUT);
 703
 704	mutex_unlock(&mgmt_eth_data->mutex);
 
 705
 706	return ret;
 707
 708	/* Error handling before lock */
 709err_mgmt_master:
 710	kfree_skb(read_skb);
 711err_read_skb:
 712	kfree_skb(clear_skb);
 713err_clear_skb:
 714	kfree_skb(write_skb);
 715
 716	return ret;
 717}
 718
 719static u32
 720qca8k_port_to_phy(int port)
 721{
 722	/* From Andrew Lunn:
 723	 * Port 0 has no internal phy.
 724	 * Port 1 has an internal PHY at MDIO address 0.
 725	 * Port 2 has an internal PHY at MDIO address 1.
 726	 * ...
 727	 * Port 5 has an internal PHY at MDIO address 4.
 728	 * Port 6 has no internal PHY.
 729	 */
 730
 731	return port - 1;
 732}
 733
 734static int
 735qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
 736{
 737	u16 r1, r2, page;
 738	u32 val;
 739	int ret, ret1;
 740
 741	qca8k_split_addr(reg, &r1, &r2, &page);
 742
 743	ret = read_poll_timeout(qca8k_mii_read_hi, ret1, !(val & mask), 0,
 744				QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
 745				bus, 0x10 | r2, r1 + 1, &val);
 746
 747	/* Check if qca8k_read has failed for a different reason
 748	 * before returnting -ETIMEDOUT
 749	 */
 750	if (ret < 0 && ret1 < 0)
 751		return ret1;
 752
 753	return ret;
 754}
 755
 756static int
 757qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data)
 758{
 759	struct mii_bus *bus = priv->bus;
 760	u16 r1, r2, page;
 761	u32 val;
 762	int ret;
 763
 764	if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
 765		return -EINVAL;
 766
 767	val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
 768	      QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
 769	      QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
 770	      QCA8K_MDIO_MASTER_DATA(data);
 771
 772	qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
 773
 774	mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
 775
 776	ret = qca8k_set_page(priv, page);
 777	if (ret)
 778		goto exit;
 779
 780	qca8k_mii_write32(bus, 0x10 | r2, r1, val);
 781
 782	ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
 783				   QCA8K_MDIO_MASTER_BUSY);
 784
 785exit:
 786	/* even if the busy_wait timeouts try to clear the MASTER_EN */
 787	qca8k_mii_write_hi(bus, 0x10 | r2, r1 + 1, 0);
 788
 789	mutex_unlock(&bus->mdio_lock);
 790
 791	return ret;
 792}
 793
 794static int
 795qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum)
 796{
 797	struct mii_bus *bus = priv->bus;
 798	u16 r1, r2, page;
 799	u32 val;
 800	int ret;
 801
 802	if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
 803		return -EINVAL;
 804
 805	val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
 806	      QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
 807	      QCA8K_MDIO_MASTER_REG_ADDR(regnum);
 808
 809	qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
 810
 811	mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
 812
 813	ret = qca8k_set_page(priv, page);
 814	if (ret)
 815		goto exit;
 816
 817	qca8k_mii_write_hi(bus, 0x10 | r2, r1 + 1, val);
 818
 819	ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
 820				   QCA8K_MDIO_MASTER_BUSY);
 821	if (ret)
 822		goto exit;
 823
 824	ret = qca8k_mii_read_lo(bus, 0x10 | r2, r1, &val);
 825
 826exit:
 827	/* even if the busy_wait timeouts try to clear the MASTER_EN */
 828	qca8k_mii_write_hi(bus, 0x10 | r2, r1 + 1, 0);
 829
 830	mutex_unlock(&bus->mdio_lock);
 831
 832	if (ret >= 0)
 833		ret = val & QCA8K_MDIO_MASTER_DATA_MASK;
 834
 835	return ret;
 836}
 837
 838static int
 839qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
 840{
 841	struct qca8k_priv *priv = slave_bus->priv;
 842	int ret;
 843
 844	/* Use mdio Ethernet when available, fallback to legacy one on error */
 845	ret = qca8k_phy_eth_command(priv, false, phy, regnum, data);
 846	if (!ret)
 847		return 0;
 848
 849	return qca8k_mdio_write(priv, phy, regnum, data);
 850}
 851
 852static int
 853qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
 854{
 855	struct qca8k_priv *priv = slave_bus->priv;
 856	int ret;
 857
 858	/* Use mdio Ethernet when available, fallback to legacy one on error */
 859	ret = qca8k_phy_eth_command(priv, true, phy, regnum, 0);
 860	if (ret >= 0)
 861		return ret;
 862
 863	ret = qca8k_mdio_read(priv, phy, regnum);
 864
 865	if (ret < 0)
 866		return 0xffff;
 867
 868	return ret;
 869}
 870
 871static int
 872qca8k_legacy_mdio_write(struct mii_bus *slave_bus, int port, int regnum, u16 data)
 873{
 874	port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
 875
 876	return qca8k_internal_mdio_write(slave_bus, port, regnum, data);
 877}
 878
 879static int
 880qca8k_legacy_mdio_read(struct mii_bus *slave_bus, int port, int regnum)
 881{
 882	port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
 883
 884	return qca8k_internal_mdio_read(slave_bus, port, regnum);
 885}
 886
 887static int
 888qca8k_mdio_register(struct qca8k_priv *priv)
 889{
 890	struct dsa_switch *ds = priv->ds;
 
 891	struct device_node *mdio;
 892	struct mii_bus *bus;
 
 
 
 
 
 893
 894	bus = devm_mdiobus_alloc(ds->dev);
 895	if (!bus)
 896		return -ENOMEM;
 
 
 897
 
 898	bus->priv = (void *)priv;
 899	snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d",
 900		 ds->dst->index, ds->index);
 901	bus->parent = ds->dev;
 902	bus->phy_mask = ~ds->phys_mii_mask;
 903	ds->slave_mii_bus = bus;
 904
 905	/* Check if the devicetree declare the port:phy mapping */
 906	mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
 907	if (of_device_is_available(mdio)) {
 908		bus->name = "qca8k slave mii";
 909		bus->read = qca8k_internal_mdio_read;
 910		bus->write = qca8k_internal_mdio_write;
 911		return devm_of_mdiobus_register(priv->dev, bus, mdio);
 
 
 
 
 
 
 
 
 912	}
 913
 914	/* If a mapping can't be found the legacy mapping is used,
 915	 * using the qca8k_port_to_phy function
 916	 */
 917	bus->name = "qca8k-legacy slave mii";
 918	bus->read = qca8k_legacy_mdio_read;
 919	bus->write = qca8k_legacy_mdio_write;
 920	return devm_mdiobus_register(priv->dev, bus);
 921}
 922
 923static int
 924qca8k_setup_mdio_bus(struct qca8k_priv *priv)
 925{
 926	u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
 927	struct device_node *ports, *port;
 928	phy_interface_t mode;
 929	int err;
 930
 931	ports = of_get_child_by_name(priv->dev->of_node, "ports");
 932	if (!ports)
 933		ports = of_get_child_by_name(priv->dev->of_node, "ethernet-ports");
 934
 935	if (!ports)
 936		return -EINVAL;
 937
 938	for_each_available_child_of_node(ports, port) {
 939		err = of_property_read_u32(port, "reg", &reg);
 940		if (err) {
 941			of_node_put(port);
 942			of_node_put(ports);
 943			return err;
 944		}
 945
 946		if (!dsa_is_user_port(priv->ds, reg))
 947			continue;
 948
 949		of_get_phy_mode(port, &mode);
 950
 951		if (of_property_read_bool(port, "phy-handle") &&
 952		    mode != PHY_INTERFACE_MODE_INTERNAL)
 953			external_mdio_mask |= BIT(reg);
 954		else
 955			internal_mdio_mask |= BIT(reg);
 956	}
 957
 958	of_node_put(ports);
 959	if (!external_mdio_mask && !internal_mdio_mask) {
 960		dev_err(priv->dev, "no PHYs are defined.\n");
 961		return -EINVAL;
 962	}
 963
 964	/* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
 965	 * the MDIO_MASTER register also _disconnects_ the external MDC
 966	 * passthrough to the internal PHYs. It's not possible to use both
 967	 * configurations at the same time!
 968	 *
 969	 * Because this came up during the review process:
 970	 * If the external mdio-bus driver is capable magically disabling
 971	 * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's
 972	 * accessors for the time being, it would be possible to pull this
 973	 * off.
 974	 */
 975	if (!!external_mdio_mask && !!internal_mdio_mask) {
 976		dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
 977		return -EINVAL;
 978	}
 979
 980	if (external_mdio_mask) {
 981		/* Make sure to disable the internal mdio bus in cases
 982		 * a dt-overlay and driver reload changed the configuration
 983		 */
 984
 985		return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL,
 986					 QCA8K_MDIO_MASTER_EN);
 987	}
 988
 989	return qca8k_mdio_register(priv);
 990}
 991
 992static int
 993qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv)
 994{
 995	u32 mask = 0;
 996	int ret = 0;
 997
 998	/* SoC specific settings for ipq8064.
 999	 * If more device require this consider adding
1000	 * a dedicated binding.
1001	 */
1002	if (of_machine_is_compatible("qcom,ipq8064"))
1003		mask |= QCA8K_MAC_PWR_RGMII0_1_8V;
1004
1005	/* SoC specific settings for ipq8065 */
1006	if (of_machine_is_compatible("qcom,ipq8065"))
1007		mask |= QCA8K_MAC_PWR_RGMII1_1_8V;
1008
1009	if (mask) {
1010		ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL,
1011				QCA8K_MAC_PWR_RGMII0_1_8V |
1012				QCA8K_MAC_PWR_RGMII1_1_8V,
1013				mask);
1014	}
1015
1016	return ret;
1017}
1018
1019static int qca8k_find_cpu_port(struct dsa_switch *ds)
1020{
1021	struct qca8k_priv *priv = ds->priv;
1022
1023	/* Find the connected cpu port. Valid port are 0 or 6 */
1024	if (dsa_is_cpu_port(ds, 0))
1025		return 0;
1026
1027	dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6");
1028
1029	if (dsa_is_cpu_port(ds, 6))
1030		return 6;
1031
1032	return -EINVAL;
1033}
1034
1035static int
1036qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
1037{
1038	const struct qca8k_match_data *data = priv->info;
1039	struct device_node *node = priv->dev->of_node;
1040	u32 val = 0;
1041	int ret;
1042
1043	/* QCA8327 require to set to the correct mode.
1044	 * His bigger brother QCA8328 have the 172 pin layout.
1045	 * Should be applied by default but we set this just to make sure.
1046	 */
1047	if (priv->switch_id == QCA8K_ID_QCA8327) {
1048		/* Set the correct package of 148 pin for QCA8327 */
1049		if (data->reduced_package)
1050			val |= QCA8327_PWS_PACKAGE148_EN;
1051
1052		ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN,
1053				val);
1054		if (ret)
1055			return ret;
1056	}
1057
1058	if (of_property_read_bool(node, "qca,ignore-power-on-sel"))
1059		val |= QCA8K_PWS_POWER_ON_SEL;
1060
1061	if (of_property_read_bool(node, "qca,led-open-drain")) {
1062		if (!(val & QCA8K_PWS_POWER_ON_SEL)) {
1063			dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set.");
1064			return -EINVAL;
1065		}
1066
1067		val |= QCA8K_PWS_LED_OPEN_EN_CSR;
1068	}
1069
1070	return qca8k_rmw(priv, QCA8K_REG_PWS,
1071			QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL,
1072			val);
1073}
1074
1075static int
1076qca8k_parse_port_config(struct qca8k_priv *priv)
1077{
1078	int port, cpu_port_index = -1, ret;
1079	struct device_node *port_dn;
1080	phy_interface_t mode;
1081	struct dsa_port *dp;
1082	u32 delay;
1083
1084	/* We have 2 CPU port. Check them */
1085	for (port = 0; port < QCA8K_NUM_PORTS; port++) {
1086		/* Skip every other port */
1087		if (port != 0 && port != 6)
1088			continue;
1089
1090		dp = dsa_to_port(priv->ds, port);
1091		port_dn = dp->dn;
1092		cpu_port_index++;
1093
1094		if (!of_device_is_available(port_dn))
1095			continue;
1096
1097		ret = of_get_phy_mode(port_dn, &mode);
1098		if (ret)
1099			continue;
1100
1101		switch (mode) {
1102		case PHY_INTERFACE_MODE_RGMII:
1103		case PHY_INTERFACE_MODE_RGMII_ID:
1104		case PHY_INTERFACE_MODE_RGMII_TXID:
1105		case PHY_INTERFACE_MODE_RGMII_RXID:
1106		case PHY_INTERFACE_MODE_SGMII:
1107			delay = 0;
1108
1109			if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay))
1110				/* Switch regs accept value in ns, convert ps to ns */
1111				delay = delay / 1000;
1112			else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
1113				 mode == PHY_INTERFACE_MODE_RGMII_TXID)
1114				delay = 1;
1115
1116			if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) {
1117				dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
1118				delay = 3;
1119			}
1120
1121			priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay;
1122
1123			delay = 0;
1124
1125			if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay))
1126				/* Switch regs accept value in ns, convert ps to ns */
1127				delay = delay / 1000;
1128			else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
1129				 mode == PHY_INTERFACE_MODE_RGMII_RXID)
1130				delay = 2;
1131
1132			if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) {
1133				dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
1134				delay = 3;
1135			}
1136
1137			priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay;
1138
1139			/* Skip sgmii parsing for rgmii* mode */
1140			if (mode == PHY_INTERFACE_MODE_RGMII ||
1141			    mode == PHY_INTERFACE_MODE_RGMII_ID ||
1142			    mode == PHY_INTERFACE_MODE_RGMII_TXID ||
1143			    mode == PHY_INTERFACE_MODE_RGMII_RXID)
1144				break;
1145
1146			if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge"))
1147				priv->ports_config.sgmii_tx_clk_falling_edge = true;
1148
1149			if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge"))
1150				priv->ports_config.sgmii_rx_clk_falling_edge = true;
1151
1152			if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) {
1153				priv->ports_config.sgmii_enable_pll = true;
1154
1155				if (priv->switch_id == QCA8K_ID_QCA8327) {
1156					dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling");
1157					priv->ports_config.sgmii_enable_pll = false;
1158				}
1159
1160				if (priv->switch_revision < 2)
1161					dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more.");
1162			}
1163
1164			break;
1165		default:
1166			continue;
1167		}
1168	}
1169
1170	return 0;
1171}
1172
1173static void
1174qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index,
1175				      u32 reg)
1176{
1177	u32 delay, val = 0;
1178	int ret;
1179
1180	/* Delay can be declared in 3 different way.
1181	 * Mode to rgmii and internal-delay standard binding defined
1182	 * rgmii-id or rgmii-tx/rx phy mode set.
1183	 * The parse logic set a delay different than 0 only when one
1184	 * of the 3 different way is used. In all other case delay is
1185	 * not enabled. With ID or TX/RXID delay is enabled and set
1186	 * to the default and recommended value.
1187	 */
1188	if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) {
1189		delay = priv->ports_config.rgmii_tx_delay[cpu_port_index];
1190
1191		val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) |
1192			QCA8K_PORT_PAD_RGMII_TX_DELAY_EN;
1193	}
1194
1195	if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) {
1196		delay = priv->ports_config.rgmii_rx_delay[cpu_port_index];
1197
1198		val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) |
1199			QCA8K_PORT_PAD_RGMII_RX_DELAY_EN;
1200	}
1201
1202	/* Set RGMII delay based on the selected values */
1203	ret = qca8k_rmw(priv, reg,
1204			QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK |
1205			QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK |
1206			QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
1207			QCA8K_PORT_PAD_RGMII_RX_DELAY_EN,
1208			val);
1209	if (ret)
1210		dev_err(priv->dev, "Failed to set internal delay for CPU port%d",
1211			cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6);
1212}
1213
1214static struct phylink_pcs *
1215qca8k_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
1216			     phy_interface_t interface)
1217{
1218	struct qca8k_priv *priv = ds->priv;
1219	struct phylink_pcs *pcs = NULL;
1220
1221	switch (interface) {
1222	case PHY_INTERFACE_MODE_SGMII:
1223	case PHY_INTERFACE_MODE_1000BASEX:
1224		switch (port) {
1225		case 0:
1226			pcs = &priv->pcs_port_0.pcs;
1227			break;
1228
1229		case 6:
1230			pcs = &priv->pcs_port_6.pcs;
1231			break;
1232		}
1233		break;
1234
1235	default:
1236		break;
1237	}
1238
1239	return pcs;
1240}
1241
1242static void
1243qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
1244			 const struct phylink_link_state *state)
1245{
1246	struct qca8k_priv *priv = ds->priv;
1247	int cpu_port_index;
1248	u32 reg;
1249
1250	switch (port) {
1251	case 0: /* 1st CPU port */
1252		if (state->interface != PHY_INTERFACE_MODE_RGMII &&
1253		    state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
1254		    state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
1255		    state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
1256		    state->interface != PHY_INTERFACE_MODE_SGMII)
1257			return;
1258
1259		reg = QCA8K_REG_PORT0_PAD_CTRL;
1260		cpu_port_index = QCA8K_CPU_PORT0;
1261		break;
1262	case 1:
1263	case 2:
1264	case 3:
1265	case 4:
1266	case 5:
1267		/* Internal PHY, nothing to do */
1268		return;
1269	case 6: /* 2nd CPU port / external PHY */
1270		if (state->interface != PHY_INTERFACE_MODE_RGMII &&
1271		    state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
1272		    state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
1273		    state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
1274		    state->interface != PHY_INTERFACE_MODE_SGMII &&
1275		    state->interface != PHY_INTERFACE_MODE_1000BASEX)
1276			return;
1277
1278		reg = QCA8K_REG_PORT6_PAD_CTRL;
1279		cpu_port_index = QCA8K_CPU_PORT6;
1280		break;
1281	default:
1282		dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
1283		return;
1284	}
1285
1286	if (port != 6 && phylink_autoneg_inband(mode)) {
1287		dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
1288			__func__);
1289		return;
1290	}
1291
1292	switch (state->interface) {
1293	case PHY_INTERFACE_MODE_RGMII:
1294	case PHY_INTERFACE_MODE_RGMII_ID:
1295	case PHY_INTERFACE_MODE_RGMII_TXID:
1296	case PHY_INTERFACE_MODE_RGMII_RXID:
1297		qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
1298
1299		/* Configure rgmii delay */
1300		qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
1301
1302		/* QCA8337 requires to set rgmii rx delay for all ports.
1303		 * This is enabled through PORT5_PAD_CTRL for all ports,
1304		 * rather than individual port registers.
1305		 */
1306		if (priv->switch_id == QCA8K_ID_QCA8337)
1307			qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
1308				    QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
1309		break;
1310	case PHY_INTERFACE_MODE_SGMII:
1311	case PHY_INTERFACE_MODE_1000BASEX:
1312		/* Enable SGMII on the port */
1313		qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
1314		break;
1315	default:
1316		dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
1317			phy_modes(state->interface), port);
1318		return;
1319	}
1320}
1321
1322static void qca8k_phylink_get_caps(struct dsa_switch *ds, int port,
1323				   struct phylink_config *config)
1324{
1325	switch (port) {
1326	case 0: /* 1st CPU port */
1327		phy_interface_set_rgmii(config->supported_interfaces);
1328		__set_bit(PHY_INTERFACE_MODE_SGMII,
1329			  config->supported_interfaces);
1330		break;
1331
1332	case 1:
1333	case 2:
1334	case 3:
1335	case 4:
1336	case 5:
1337		/* Internal PHY */
1338		__set_bit(PHY_INTERFACE_MODE_GMII,
1339			  config->supported_interfaces);
1340		__set_bit(PHY_INTERFACE_MODE_INTERNAL,
1341			  config->supported_interfaces);
1342		break;
1343
1344	case 6: /* 2nd CPU port / external PHY */
1345		phy_interface_set_rgmii(config->supported_interfaces);
1346		__set_bit(PHY_INTERFACE_MODE_SGMII,
1347			  config->supported_interfaces);
1348		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
1349			  config->supported_interfaces);
1350		break;
1351	}
1352
1353	config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1354		MAC_10 | MAC_100 | MAC_1000FD;
1355
1356	config->legacy_pre_march2020 = false;
1357}
1358
1359static void
1360qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
1361			    phy_interface_t interface)
1362{
1363	struct qca8k_priv *priv = ds->priv;
1364
1365	qca8k_port_set_status(priv, port, 0);
1366}
1367
1368static void
1369qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
1370			  phy_interface_t interface, struct phy_device *phydev,
1371			  int speed, int duplex, bool tx_pause, bool rx_pause)
1372{
1373	struct qca8k_priv *priv = ds->priv;
1374	u32 reg;
1375
1376	if (phylink_autoneg_inband(mode)) {
1377		reg = QCA8K_PORT_STATUS_LINK_AUTO;
1378	} else {
1379		switch (speed) {
1380		case SPEED_10:
1381			reg = QCA8K_PORT_STATUS_SPEED_10;
1382			break;
1383		case SPEED_100:
1384			reg = QCA8K_PORT_STATUS_SPEED_100;
1385			break;
1386		case SPEED_1000:
1387			reg = QCA8K_PORT_STATUS_SPEED_1000;
1388			break;
1389		default:
1390			reg = QCA8K_PORT_STATUS_LINK_AUTO;
1391			break;
1392		}
1393
1394		if (duplex == DUPLEX_FULL)
1395			reg |= QCA8K_PORT_STATUS_DUPLEX;
1396
1397		if (rx_pause || dsa_is_cpu_port(ds, port))
1398			reg |= QCA8K_PORT_STATUS_RXFLOW;
1399
1400		if (tx_pause || dsa_is_cpu_port(ds, port))
1401			reg |= QCA8K_PORT_STATUS_TXFLOW;
1402	}
1403
1404	reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
1405
1406	qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
1407}
1408
1409static struct qca8k_pcs *pcs_to_qca8k_pcs(struct phylink_pcs *pcs)
1410{
1411	return container_of(pcs, struct qca8k_pcs, pcs);
1412}
1413
1414static void qca8k_pcs_get_state(struct phylink_pcs *pcs,
1415				struct phylink_link_state *state)
1416{
1417	struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
1418	int port = pcs_to_qca8k_pcs(pcs)->port;
1419	u32 reg;
1420	int ret;
1421
1422	ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), &reg);
1423	if (ret < 0) {
1424		state->link = false;
1425		return;
1426	}
1427
1428	state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
1429	state->an_complete = state->link;
1430	state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO);
1431	state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
1432							   DUPLEX_HALF;
1433
1434	switch (reg & QCA8K_PORT_STATUS_SPEED) {
1435	case QCA8K_PORT_STATUS_SPEED_10:
1436		state->speed = SPEED_10;
1437		break;
1438	case QCA8K_PORT_STATUS_SPEED_100:
1439		state->speed = SPEED_100;
1440		break;
1441	case QCA8K_PORT_STATUS_SPEED_1000:
1442		state->speed = SPEED_1000;
1443		break;
1444	default:
1445		state->speed = SPEED_UNKNOWN;
1446		break;
1447	}
1448
1449	if (reg & QCA8K_PORT_STATUS_RXFLOW)
1450		state->pause |= MLO_PAUSE_RX;
1451	if (reg & QCA8K_PORT_STATUS_TXFLOW)
1452		state->pause |= MLO_PAUSE_TX;
1453}
1454
1455static int qca8k_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
1456			    phy_interface_t interface,
1457			    const unsigned long *advertising,
1458			    bool permit_pause_to_mac)
1459{
1460	struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
1461	int cpu_port_index, ret, port;
1462	u32 reg, val;
1463
1464	port = pcs_to_qca8k_pcs(pcs)->port;
1465	switch (port) {
1466	case 0:
1467		reg = QCA8K_REG_PORT0_PAD_CTRL;
1468		cpu_port_index = QCA8K_CPU_PORT0;
1469		break;
1470
1471	case 6:
1472		reg = QCA8K_REG_PORT6_PAD_CTRL;
1473		cpu_port_index = QCA8K_CPU_PORT6;
1474		break;
1475
1476	default:
1477		WARN_ON(1);
1478		return -EINVAL;
1479	}
1480
1481	/* Enable/disable SerDes auto-negotiation as necessary */
1482	ret = qca8k_read(priv, QCA8K_REG_PWS, &val);
 
 
 
1483	if (ret)
1484		return ret;
1485	if (phylink_autoneg_inband(mode))
1486		val &= ~QCA8K_PWS_SERDES_AEN_DIS;
1487	else
1488		val |= QCA8K_PWS_SERDES_AEN_DIS;
1489	qca8k_write(priv, QCA8K_REG_PWS, val);
1490
1491	/* Configure the SGMII parameters */
1492	ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val);
1493	if (ret)
1494		return ret;
1495
1496	val |= QCA8K_SGMII_EN_SD;
1497
1498	if (priv->ports_config.sgmii_enable_pll)
1499		val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
1500		       QCA8K_SGMII_EN_TX;
1501
1502	if (dsa_is_cpu_port(priv->ds, port)) {
1503		/* CPU port, we're talking to the CPU MAC, be a PHY */
1504		val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1505		val |= QCA8K_SGMII_MODE_CTRL_PHY;
1506	} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1507		val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1508		val |= QCA8K_SGMII_MODE_CTRL_MAC;
1509	} else if (interface == PHY_INTERFACE_MODE_1000BASEX) {
1510		val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1511		val |= QCA8K_SGMII_MODE_CTRL_BASEX;
1512	}
1513
1514	qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
1515
1516	/* From original code is reported port instability as SGMII also
1517	 * require delay set. Apply advised values here or take them from DT.
1518	 */
1519	if (interface == PHY_INTERFACE_MODE_SGMII)
1520		qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
1521	/* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
1522	 * falling edge is set writing in the PORT0 PAD reg
1523	 */
1524	if (priv->switch_id == QCA8K_ID_QCA8327 ||
1525	    priv->switch_id == QCA8K_ID_QCA8337)
1526		reg = QCA8K_REG_PORT0_PAD_CTRL;
1527
1528	val = 0;
1529
1530	/* SGMII Clock phase configuration */
1531	if (priv->ports_config.sgmii_rx_clk_falling_edge)
1532		val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
1533
1534	if (priv->ports_config.sgmii_tx_clk_falling_edge)
1535		val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
1536
1537	if (val)
1538		ret = qca8k_rmw(priv, reg,
1539				QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
1540				QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
1541				val);
1542
1543	return 0;
1544}
1545
1546static void qca8k_pcs_an_restart(struct phylink_pcs *pcs)
1547{
1548}
1549
1550static const struct phylink_pcs_ops qca8k_pcs_ops = {
1551	.pcs_get_state = qca8k_pcs_get_state,
1552	.pcs_config = qca8k_pcs_config,
1553	.pcs_an_restart = qca8k_pcs_an_restart,
1554};
1555
1556static void qca8k_setup_pcs(struct qca8k_priv *priv, struct qca8k_pcs *qpcs,
1557			    int port)
1558{
1559	qpcs->pcs.ops = &qca8k_pcs_ops;
 
1560
1561	/* We don't have interrupts for link changes, so we need to poll */
1562	qpcs->pcs.poll = true;
1563	qpcs->priv = priv;
1564	qpcs->port = port;
1565}
1566
1567static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb)
1568{
1569	struct qca8k_mib_eth_data *mib_eth_data;
1570	struct qca8k_priv *priv = ds->priv;
1571	const struct qca8k_mib_desc *mib;
1572	struct mib_ethhdr *mib_ethhdr;
1573	__le32 *data2;
1574	u8 port;
1575	int i;
1576
1577	mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb);
1578	mib_eth_data = &priv->mib_eth_data;
1579
1580	/* The switch autocast every port. Ignore other packet and
1581	 * parse only the requested one.
1582	 */
1583	port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, ntohs(mib_ethhdr->hdr));
1584	if (port != mib_eth_data->req_port)
1585		goto exit;
1586
1587	data2 = (__le32 *)skb->data;
1588
1589	for (i = 0; i < priv->info->mib_count; i++) {
1590		mib = &ar8327_mib[i];
1591
1592		/* First 3 mib are present in the skb head */
1593		if (i < 3) {
1594			mib_eth_data->data[i] = get_unaligned_le32(mib_ethhdr->data + i);
1595			continue;
1596		}
1597
1598		/* Some mib are 64 bit wide */
1599		if (mib->size == 2)
1600			mib_eth_data->data[i] = get_unaligned_le64((__le64 *)data2);
1601		else
1602			mib_eth_data->data[i] = get_unaligned_le32(data2);
1603
1604		data2 += mib->size;
1605	}
1606
1607exit:
1608	/* Complete on receiving all the mib packet */
1609	if (refcount_dec_and_test(&mib_eth_data->port_parsed))
1610		complete(&mib_eth_data->rw_done);
1611}
1612
1613static int
1614qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
1615{
1616	struct dsa_port *dp = dsa_to_port(ds, port);
1617	struct qca8k_mib_eth_data *mib_eth_data;
1618	struct qca8k_priv *priv = ds->priv;
1619	int ret;
1620
1621	mib_eth_data = &priv->mib_eth_data;
1622
1623	mutex_lock(&mib_eth_data->mutex);
1624
1625	reinit_completion(&mib_eth_data->rw_done);
1626
1627	mib_eth_data->req_port = dp->index;
1628	mib_eth_data->data = data;
1629	refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS);
1630
1631	mutex_lock(&priv->reg_mutex);
1632
1633	/* Send mib autocast request */
1634	ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
1635				 QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
1636				 FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_CAST) |
1637				 QCA8K_MIB_BUSY);
1638
1639	mutex_unlock(&priv->reg_mutex);
1640
1641	if (ret)
1642		goto exit;
1643
1644	ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
1645
1646exit:
1647	mutex_unlock(&mib_eth_data->mutex);
1648
1649	return ret;
1650}
1651
1652static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port)
1653{
1654	struct qca8k_priv *priv = ds->priv;
1655
1656	/* Communicate to the phy internal driver the switch revision.
1657	 * Based on the switch revision different values needs to be
1658	 * set to the dbg and mmd reg on the phy.
1659	 * The first 2 bit are used to communicate the switch revision
1660	 * to the phy driver.
1661	 */
1662	if (port > 0 && port < 6)
1663		return priv->switch_revision;
1664
1665	return 0;
1666}
1667
1668static enum dsa_tag_protocol
1669qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
1670		       enum dsa_tag_protocol mp)
1671{
1672	return DSA_TAG_PROTO_QCA;
1673}
1674
1675static void
1676qca8k_master_change(struct dsa_switch *ds, const struct net_device *master,
1677		    bool operational)
1678{
1679	struct dsa_port *dp = master->dsa_ptr;
1680	struct qca8k_priv *priv = ds->priv;
1681
1682	/* Ethernet MIB/MDIO is only supported for CPU port 0 */
1683	if (dp->index != 0)
1684		return;
1685
1686	mutex_lock(&priv->mgmt_eth_data.mutex);
1687	mutex_lock(&priv->mib_eth_data.mutex);
1688
1689	priv->mgmt_master = operational ? (struct net_device *)master : NULL;
1690
1691	mutex_unlock(&priv->mib_eth_data.mutex);
1692	mutex_unlock(&priv->mgmt_eth_data.mutex);
1693}
1694
1695static int qca8k_connect_tag_protocol(struct dsa_switch *ds,
1696				      enum dsa_tag_protocol proto)
1697{
1698	struct qca_tagger_data *tagger_data;
1699
1700	switch (proto) {
1701	case DSA_TAG_PROTO_QCA:
1702		tagger_data = ds->tagger_data;
1703
1704		tagger_data->rw_reg_ack_handler = qca8k_rw_reg_ack_handler;
1705		tagger_data->mib_autocast_handler = qca8k_mib_autocast_handler;
1706
1707		break;
1708	default:
1709		return -EOPNOTSUPP;
1710	}
1711
1712	return 0;
1713}
1714
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1715static int
1716qca8k_setup(struct dsa_switch *ds)
1717{
1718	struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
1719	int cpu_port, ret, i;
 
1720	u32 mask;
1721
1722	cpu_port = qca8k_find_cpu_port(ds);
1723	if (cpu_port < 0) {
1724		dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6");
1725		return cpu_port;
1726	}
1727
1728	/* Parse CPU port config to be later used in phy_link mac_config */
1729	ret = qca8k_parse_port_config(priv);
1730	if (ret)
1731		return ret;
1732
1733	ret = qca8k_setup_mdio_bus(priv);
1734	if (ret)
1735		return ret;
1736
1737	ret = qca8k_setup_of_pws_reg(priv);
1738	if (ret)
1739		return ret;
1740
1741	ret = qca8k_setup_mac_pwr_sel(priv);
1742	if (ret)
1743		return ret;
1744
 
 
 
 
1745	qca8k_setup_pcs(priv, &priv->pcs_port_0, 0);
1746	qca8k_setup_pcs(priv, &priv->pcs_port_6, 6);
1747
1748	/* Make sure MAC06 is disabled */
1749	ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL,
1750				QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
1751	if (ret) {
1752		dev_err(priv->dev, "failed disabling MAC06 exchange");
1753		return ret;
1754	}
1755
1756	/* Enable CPU Port */
1757	ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
1758			      QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
1759	if (ret) {
1760		dev_err(priv->dev, "failed enabling CPU port");
1761		return ret;
1762	}
1763
1764	/* Enable MIB counters */
1765	ret = qca8k_mib_init(priv);
1766	if (ret)
1767		dev_warn(priv->dev, "mib init failed");
1768
1769	/* Initial setup of all ports */
1770	for (i = 0; i < QCA8K_NUM_PORTS; i++) {
1771		/* Disable forwarding by default on all ports */
1772		ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
1773				QCA8K_PORT_LOOKUP_MEMBER, 0);
1774		if (ret)
1775			return ret;
 
1776
1777		/* Enable QCA header mode on all cpu ports */
1778		if (dsa_is_cpu_port(ds, i)) {
1779			ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i),
1780					  FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) |
1781					  FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL));
1782			if (ret) {
1783				dev_err(priv->dev, "failed enabling QCA header mode");
1784				return ret;
1785			}
 
 
 
1786		}
1787
1788		/* Disable MAC by default on all user ports */
1789		if (dsa_is_user_port(ds, i))
1790			qca8k_port_set_status(priv, i, 0);
1791	}
1792
1793	/* Forward all unknown frames to CPU port for Linux processing
1794	 * Notice that in multi-cpu config only one port should be set
1795	 * for igmp, unknown, multicast and broadcast packet
1796	 */
1797	ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
1798			  FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) |
1799			  FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) |
1800			  FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) |
1801			  FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port)));
1802	if (ret)
1803		return ret;
1804
 
 
 
 
 
 
1805	/* Setup connection between CPU port & user ports
1806	 * Configure specific switch configuration for ports
1807	 */
1808	for (i = 0; i < QCA8K_NUM_PORTS; i++) {
1809		/* CPU port gets connected to all user ports of the switch */
1810		if (dsa_is_cpu_port(ds, i)) {
1811			ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
1812					QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
1813			if (ret)
1814				return ret;
1815		}
1816
1817		/* Individual user ports get connected to CPU port only */
1818		if (dsa_is_user_port(ds, i)) {
1819			ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
1820					QCA8K_PORT_LOOKUP_MEMBER,
1821					BIT(cpu_port));
1822			if (ret)
1823				return ret;
1824
1825			/* Enable ARP Auto-learning by default */
1826			ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i),
1827					      QCA8K_PORT_LOOKUP_LEARN);
1828			if (ret)
1829				return ret;
1830
1831			/* For port based vlans to work we need to set the
1832			 * default egress vid
1833			 */
1834			ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
1835					QCA8K_EGREES_VLAN_PORT_MASK(i),
1836					QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF));
1837			if (ret)
1838				return ret;
1839
1840			ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i),
1841					  QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
1842					  QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
1843			if (ret)
1844				return ret;
1845		}
1846
1847		/* The port 5 of the qca8337 have some problem in flood condition. The
1848		 * original legacy driver had some specific buffer and priority settings
1849		 * for the different port suggested by the QCA switch team. Add this
1850		 * missing settings to improve switch stability under load condition.
1851		 * This problem is limited to qca8337 and other qca8k switch are not affected.
1852		 */
1853		if (priv->switch_id == QCA8K_ID_QCA8337) {
1854			switch (i) {
1855			/* The 2 CPU port and port 5 requires some different
1856			 * priority than any other ports.
1857			 */
1858			case 0:
1859			case 5:
1860			case 6:
1861				mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
1862					QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
1863					QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) |
1864					QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) |
1865					QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) |
1866					QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) |
1867					QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e);
1868				break;
1869			default:
1870				mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
1871					QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
1872					QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) |
1873					QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) |
1874					QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19);
1875			}
1876			qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask);
1877
1878			mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) |
1879			QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
1880			QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
1881			QCA8K_PORT_HOL_CTRL1_WRED_EN;
1882			qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i),
1883				  QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK |
1884				  QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
1885				  QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
1886				  QCA8K_PORT_HOL_CTRL1_WRED_EN,
1887				  mask);
1888		}
1889	}
1890
 
 
 
 
 
 
 
 
 
 
1891	/* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
1892	if (priv->switch_id == QCA8K_ID_QCA8327) {
1893		mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
1894		       QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
1895		qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
1896			  QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK |
1897			  QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK,
1898			  mask);
1899	}
1900
1901	/* Setup our port MTUs to match power on defaults */
1902	ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
1903	if (ret)
1904		dev_warn(priv->dev, "failed setting MTU settings");
1905
1906	/* Flush the FDB table */
1907	qca8k_fdb_flush(priv);
1908
1909	/* Set min a max ageing value supported */
1910	ds->ageing_time_min = 7000;
1911	ds->ageing_time_max = 458745000;
1912
1913	/* Set max number of LAGs supported */
1914	ds->num_lag_ids = QCA8K_NUM_LAGS;
1915
1916	return 0;
1917}
1918
1919static const struct dsa_switch_ops qca8k_switch_ops = {
1920	.get_tag_protocol	= qca8k_get_tag_protocol,
1921	.setup			= qca8k_setup,
1922	.get_strings		= qca8k_get_strings,
1923	.get_ethtool_stats	= qca8k_get_ethtool_stats,
1924	.get_sset_count		= qca8k_get_sset_count,
1925	.set_ageing_time	= qca8k_set_ageing_time,
1926	.get_mac_eee		= qca8k_get_mac_eee,
1927	.set_mac_eee		= qca8k_set_mac_eee,
1928	.port_enable		= qca8k_port_enable,
1929	.port_disable		= qca8k_port_disable,
1930	.port_change_mtu	= qca8k_port_change_mtu,
1931	.port_max_mtu		= qca8k_port_max_mtu,
1932	.port_stp_state_set	= qca8k_port_stp_state_set,
 
 
1933	.port_bridge_join	= qca8k_port_bridge_join,
1934	.port_bridge_leave	= qca8k_port_bridge_leave,
1935	.port_fast_age		= qca8k_port_fast_age,
1936	.port_fdb_add		= qca8k_port_fdb_add,
1937	.port_fdb_del		= qca8k_port_fdb_del,
1938	.port_fdb_dump		= qca8k_port_fdb_dump,
1939	.port_mdb_add		= qca8k_port_mdb_add,
1940	.port_mdb_del		= qca8k_port_mdb_del,
1941	.port_mirror_add	= qca8k_port_mirror_add,
1942	.port_mirror_del	= qca8k_port_mirror_del,
1943	.port_vlan_filtering	= qca8k_port_vlan_filtering,
1944	.port_vlan_add		= qca8k_port_vlan_add,
1945	.port_vlan_del		= qca8k_port_vlan_del,
1946	.phylink_get_caps	= qca8k_phylink_get_caps,
1947	.phylink_mac_select_pcs	= qca8k_phylink_mac_select_pcs,
1948	.phylink_mac_config	= qca8k_phylink_mac_config,
1949	.phylink_mac_link_down	= qca8k_phylink_mac_link_down,
1950	.phylink_mac_link_up	= qca8k_phylink_mac_link_up,
1951	.get_phy_flags		= qca8k_get_phy_flags,
1952	.port_lag_join		= qca8k_port_lag_join,
1953	.port_lag_leave		= qca8k_port_lag_leave,
1954	.master_state_change	= qca8k_master_change,
1955	.connect_tag_protocol	= qca8k_connect_tag_protocol,
1956};
1957
1958static int
1959qca8k_sw_probe(struct mdio_device *mdiodev)
1960{
1961	struct qca8k_priv *priv;
1962	int ret;
1963
1964	/* allocate the private data struct so that we can probe the switches
1965	 * ID register
1966	 */
1967	priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
1968	if (!priv)
1969		return -ENOMEM;
1970
1971	priv->bus = mdiodev->bus;
1972	priv->dev = &mdiodev->dev;
1973	priv->info = of_device_get_match_data(priv->dev);
1974
1975	priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
1976						   GPIOD_ASIS);
1977	if (IS_ERR(priv->reset_gpio))
1978		return PTR_ERR(priv->reset_gpio);
1979
1980	if (priv->reset_gpio) {
1981		gpiod_set_value_cansleep(priv->reset_gpio, 1);
1982		/* The active low duration must be greater than 10 ms
1983		 * and checkpatch.pl wants 20 ms.
1984		 */
1985		msleep(20);
1986		gpiod_set_value_cansleep(priv->reset_gpio, 0);
1987	}
1988
1989	/* Start by setting up the register mapping */
1990	priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv,
1991					&qca8k_regmap_config);
1992	if (IS_ERR(priv->regmap)) {
1993		dev_err(priv->dev, "regmap initialization failed");
1994		return PTR_ERR(priv->regmap);
1995	}
1996
1997	priv->mdio_cache.page = 0xffff;
1998
1999	/* Check the detected switch id */
2000	ret = qca8k_read_switch_id(priv);
2001	if (ret)
2002		return ret;
2003
2004	priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
2005	if (!priv->ds)
2006		return -ENOMEM;
2007
2008	mutex_init(&priv->mgmt_eth_data.mutex);
2009	init_completion(&priv->mgmt_eth_data.rw_done);
2010
2011	mutex_init(&priv->mib_eth_data.mutex);
2012	init_completion(&priv->mib_eth_data.rw_done);
2013
2014	priv->ds->dev = &mdiodev->dev;
2015	priv->ds->num_ports = QCA8K_NUM_PORTS;
2016	priv->ds->priv = priv;
2017	priv->ds->ops = &qca8k_switch_ops;
2018	mutex_init(&priv->reg_mutex);
2019	dev_set_drvdata(&mdiodev->dev, priv);
2020
2021	return dsa_register_switch(priv->ds);
2022}
2023
2024static void
2025qca8k_sw_remove(struct mdio_device *mdiodev)
2026{
2027	struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
2028	int i;
2029
2030	if (!priv)
2031		return;
2032
2033	for (i = 0; i < QCA8K_NUM_PORTS; i++)
2034		qca8k_port_set_status(priv, i, 0);
2035
2036	dsa_unregister_switch(priv->ds);
2037}
2038
2039static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
2040{
2041	struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
2042
2043	if (!priv)
2044		return;
2045
2046	dsa_switch_shutdown(priv->ds);
2047
2048	dev_set_drvdata(&mdiodev->dev, NULL);
2049}
2050
2051#ifdef CONFIG_PM_SLEEP
2052static void
2053qca8k_set_pm(struct qca8k_priv *priv, int enable)
2054{
2055	int port;
2056
2057	for (port = 0; port < QCA8K_NUM_PORTS; port++) {
2058		/* Do not enable on resume if the port was
2059		 * disabled before.
2060		 */
2061		if (!(priv->port_enabled_map & BIT(port)))
2062			continue;
2063
2064		qca8k_port_set_status(priv, port, enable);
2065	}
2066}
2067
2068static int qca8k_suspend(struct device *dev)
2069{
2070	struct qca8k_priv *priv = dev_get_drvdata(dev);
2071
2072	qca8k_set_pm(priv, 0);
2073
2074	return dsa_switch_suspend(priv->ds);
2075}
2076
2077static int qca8k_resume(struct device *dev)
2078{
2079	struct qca8k_priv *priv = dev_get_drvdata(dev);
2080
2081	qca8k_set_pm(priv, 1);
2082
2083	return dsa_switch_resume(priv->ds);
2084}
2085#endif /* CONFIG_PM_SLEEP */
2086
2087static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
2088			 qca8k_suspend, qca8k_resume);
2089
2090static const struct qca8k_info_ops qca8xxx_ops = {
2091	.autocast_mib = qca8k_get_ethtool_stats_eth,
2092	.read_eth = qca8k_read_eth,
2093	.write_eth = qca8k_write_eth,
2094};
2095
2096static const struct qca8k_match_data qca8327 = {
2097	.id = QCA8K_ID_QCA8327,
2098	.reduced_package = true,
2099	.mib_count = QCA8K_QCA832X_MIB_COUNT,
2100	.ops = &qca8xxx_ops,
2101};
2102
2103static const struct qca8k_match_data qca8328 = {
2104	.id = QCA8K_ID_QCA8327,
2105	.mib_count = QCA8K_QCA832X_MIB_COUNT,
2106	.ops = &qca8xxx_ops,
2107};
2108
2109static const struct qca8k_match_data qca833x = {
2110	.id = QCA8K_ID_QCA8337,
2111	.mib_count = QCA8K_QCA833X_MIB_COUNT,
2112	.ops = &qca8xxx_ops,
2113};
2114
2115static const struct of_device_id qca8k_of_match[] = {
2116	{ .compatible = "qca,qca8327", .data = &qca8327 },
2117	{ .compatible = "qca,qca8328", .data = &qca8328 },
2118	{ .compatible = "qca,qca8334", .data = &qca833x },
2119	{ .compatible = "qca,qca8337", .data = &qca833x },
2120	{ /* sentinel */ },
2121};
2122
2123static struct mdio_driver qca8kmdio_driver = {
2124	.probe  = qca8k_sw_probe,
2125	.remove = qca8k_sw_remove,
2126	.shutdown = qca8k_sw_shutdown,
2127	.mdiodrv.driver = {
2128		.name = "qca8k",
2129		.of_match_table = qca8k_of_match,
2130		.pm = &qca8k_pm_ops,
2131	},
2132};
2133
2134mdio_module_driver(qca8kmdio_driver);
2135
2136MODULE_AUTHOR("Mathieu Olivari, John Crispin <john@phrozen.org>");
2137MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family");
2138MODULE_LICENSE("GPL v2");
2139MODULE_ALIAS("platform:qca8k");