Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.4.
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * OPEN Alliance 10BASE‑T1x MAC‑PHY Serial Interface framework
   4 *
   5 * Author: Parthiban Veerasooran <parthiban.veerasooran@microchip.com>
   6 */
   7
   8#include <linux/bitfield.h>
   9#include <linux/iopoll.h>
  10#include <linux/mdio.h>
  11#include <linux/phy.h>
  12#include <linux/oa_tc6.h>
  13
  14/* OPEN Alliance TC6 registers */
  15/* Standard Capabilities Register */
  16#define OA_TC6_REG_STDCAP			0x0002
  17#define STDCAP_DIRECT_PHY_REG_ACCESS		BIT(8)
  18
  19/* Reset Control and Status Register */
  20#define OA_TC6_REG_RESET			0x0003
  21#define RESET_SWRESET				BIT(0)	/* Software Reset */
  22
  23/* Configuration Register #0 */
  24#define OA_TC6_REG_CONFIG0			0x0004
  25#define CONFIG0_SYNC				BIT(15)
  26#define CONFIG0_ZARFE_ENABLE			BIT(12)
  27
  28/* Status Register #0 */
  29#define OA_TC6_REG_STATUS0			0x0008
  30#define STATUS0_RESETC				BIT(6)	/* Reset Complete */
  31#define STATUS0_HEADER_ERROR			BIT(5)
  32#define STATUS0_LOSS_OF_FRAME_ERROR		BIT(4)
  33#define STATUS0_RX_BUFFER_OVERFLOW_ERROR	BIT(3)
  34#define STATUS0_TX_PROTOCOL_ERROR		BIT(0)
  35
  36/* Buffer Status Register */
  37#define OA_TC6_REG_BUFFER_STATUS		0x000B
  38#define BUFFER_STATUS_TX_CREDITS_AVAILABLE	GENMASK(15, 8)
  39#define BUFFER_STATUS_RX_CHUNKS_AVAILABLE	GENMASK(7, 0)
  40
  41/* Interrupt Mask Register #0 */
  42#define OA_TC6_REG_INT_MASK0			0x000C
  43#define INT_MASK0_HEADER_ERR_MASK		BIT(5)
  44#define INT_MASK0_LOSS_OF_FRAME_ERR_MASK	BIT(4)
  45#define INT_MASK0_RX_BUFFER_OVERFLOW_ERR_MASK	BIT(3)
  46#define INT_MASK0_TX_PROTOCOL_ERR_MASK		BIT(0)
  47
  48/* PHY Clause 22 registers base address and mask */
  49#define OA_TC6_PHY_STD_REG_ADDR_BASE		0xFF00
  50#define OA_TC6_PHY_STD_REG_ADDR_MASK		0x1F
  51
  52/* Control command header */
  53#define OA_TC6_CTRL_HEADER_DATA_NOT_CTRL	BIT(31)
  54#define OA_TC6_CTRL_HEADER_WRITE_NOT_READ	BIT(29)
  55#define OA_TC6_CTRL_HEADER_MEM_MAP_SELECTOR	GENMASK(27, 24)
  56#define OA_TC6_CTRL_HEADER_ADDR			GENMASK(23, 8)
  57#define OA_TC6_CTRL_HEADER_LENGTH		GENMASK(7, 1)
  58#define OA_TC6_CTRL_HEADER_PARITY		BIT(0)
  59
  60/* Data header */
  61#define OA_TC6_DATA_HEADER_DATA_NOT_CTRL	BIT(31)
  62#define OA_TC6_DATA_HEADER_DATA_VALID		BIT(21)
  63#define OA_TC6_DATA_HEADER_START_VALID		BIT(20)
  64#define OA_TC6_DATA_HEADER_START_WORD_OFFSET	GENMASK(19, 16)
  65#define OA_TC6_DATA_HEADER_END_VALID		BIT(14)
  66#define OA_TC6_DATA_HEADER_END_BYTE_OFFSET	GENMASK(13, 8)
  67#define OA_TC6_DATA_HEADER_PARITY		BIT(0)
  68
  69/* Data footer */
  70#define OA_TC6_DATA_FOOTER_EXTENDED_STS		BIT(31)
  71#define OA_TC6_DATA_FOOTER_RXD_HEADER_BAD	BIT(30)
  72#define OA_TC6_DATA_FOOTER_CONFIG_SYNC		BIT(29)
  73#define OA_TC6_DATA_FOOTER_RX_CHUNKS		GENMASK(28, 24)
  74#define OA_TC6_DATA_FOOTER_DATA_VALID		BIT(21)
  75#define OA_TC6_DATA_FOOTER_START_VALID		BIT(20)
  76#define OA_TC6_DATA_FOOTER_START_WORD_OFFSET	GENMASK(19, 16)
  77#define OA_TC6_DATA_FOOTER_END_VALID		BIT(14)
  78#define OA_TC6_DATA_FOOTER_END_BYTE_OFFSET	GENMASK(13, 8)
  79#define OA_TC6_DATA_FOOTER_TX_CREDITS		GENMASK(5, 1)
  80
  81/* PHY – Clause 45 registers memory map selector (MMS) as per table 6 in the
  82 * OPEN Alliance specification.
  83 */
  84#define OA_TC6_PHY_C45_PCS_MMS2			2	/* MMD 3 */
  85#define OA_TC6_PHY_C45_PMA_PMD_MMS3		3	/* MMD 1 */
  86#define OA_TC6_PHY_C45_VS_PLCA_MMS4		4	/* MMD 31 */
  87#define OA_TC6_PHY_C45_AUTO_NEG_MMS5		5	/* MMD 7 */
  88#define OA_TC6_PHY_C45_POWER_UNIT_MMS6		6	/* MMD 13 */
  89
  90#define OA_TC6_CTRL_HEADER_SIZE			4
  91#define OA_TC6_CTRL_REG_VALUE_SIZE		4
  92#define OA_TC6_CTRL_IGNORED_SIZE		4
  93#define OA_TC6_CTRL_MAX_REGISTERS		128
  94#define OA_TC6_CTRL_SPI_BUF_SIZE		(OA_TC6_CTRL_HEADER_SIZE +\
  95						(OA_TC6_CTRL_MAX_REGISTERS *\
  96						OA_TC6_CTRL_REG_VALUE_SIZE) +\
  97						OA_TC6_CTRL_IGNORED_SIZE)
  98#define OA_TC6_CHUNK_PAYLOAD_SIZE		64
  99#define OA_TC6_DATA_HEADER_SIZE			4
 100#define OA_TC6_CHUNK_SIZE			(OA_TC6_DATA_HEADER_SIZE +\
 101						OA_TC6_CHUNK_PAYLOAD_SIZE)
 102#define OA_TC6_MAX_TX_CHUNKS			48
 103#define OA_TC6_SPI_DATA_BUF_SIZE		(OA_TC6_MAX_TX_CHUNKS *\
 104						OA_TC6_CHUNK_SIZE)
 105#define STATUS0_RESETC_POLL_DELAY		1000
 106#define STATUS0_RESETC_POLL_TIMEOUT		1000000
 107
 108/* Internal structure for MAC-PHY drivers */
 109struct oa_tc6 {
 110	struct device *dev;
 111	struct net_device *netdev;
 112	struct phy_device *phydev;
 113	struct mii_bus *mdiobus;
 114	struct spi_device *spi;
 115	struct mutex spi_ctrl_lock; /* Protects spi control transfer */
 116	spinlock_t tx_skb_lock; /* Protects tx skb handling */
 117	void *spi_ctrl_tx_buf;
 118	void *spi_ctrl_rx_buf;
 119	void *spi_data_tx_buf;
 120	void *spi_data_rx_buf;
 121	struct sk_buff *ongoing_tx_skb;
 122	struct sk_buff *waiting_tx_skb;
 123	struct sk_buff *rx_skb;
 124	struct task_struct *spi_thread;
 125	wait_queue_head_t spi_wq;
 126	u16 tx_skb_offset;
 127	u16 spi_data_tx_buf_offset;
 128	u16 tx_credits;
 129	u8 rx_chunks_available;
 130	bool rx_buf_overflow;
 131	bool int_flag;
 132};
 133
 134enum oa_tc6_header_type {
 135	OA_TC6_CTRL_HEADER,
 136	OA_TC6_DATA_HEADER,
 137};
 138
 139enum oa_tc6_register_op {
 140	OA_TC6_CTRL_REG_READ = 0,
 141	OA_TC6_CTRL_REG_WRITE = 1,
 142};
 143
 144enum oa_tc6_data_valid_info {
 145	OA_TC6_DATA_INVALID,
 146	OA_TC6_DATA_VALID,
 147};
 148
 149enum oa_tc6_data_start_valid_info {
 150	OA_TC6_DATA_START_INVALID,
 151	OA_TC6_DATA_START_VALID,
 152};
 153
 154enum oa_tc6_data_end_valid_info {
 155	OA_TC6_DATA_END_INVALID,
 156	OA_TC6_DATA_END_VALID,
 157};
 158
 159static int oa_tc6_spi_transfer(struct oa_tc6 *tc6,
 160			       enum oa_tc6_header_type header_type, u16 length)
 161{
 162	struct spi_transfer xfer = { 0 };
 163	struct spi_message msg;
 164
 165	if (header_type == OA_TC6_DATA_HEADER) {
 166		xfer.tx_buf = tc6->spi_data_tx_buf;
 167		xfer.rx_buf = tc6->spi_data_rx_buf;
 168	} else {
 169		xfer.tx_buf = tc6->spi_ctrl_tx_buf;
 170		xfer.rx_buf = tc6->spi_ctrl_rx_buf;
 171	}
 172	xfer.len = length;
 173
 174	spi_message_init(&msg);
 175	spi_message_add_tail(&xfer, &msg);
 176
 177	return spi_sync(tc6->spi, &msg);
 178}
 179
 180static int oa_tc6_get_parity(u32 p)
 181{
 182	/* Public domain code snippet, lifted from
 183	 * http://www-graphics.stanford.edu/~seander/bithacks.html
 184	 */
 185	p ^= p >> 1;
 186	p ^= p >> 2;
 187	p = (p & 0x11111111U) * 0x11111111U;
 188
 189	/* Odd parity is used here */
 190	return !((p >> 28) & 1);
 191}
 192
 193static __be32 oa_tc6_prepare_ctrl_header(u32 addr, u8 length,
 194					 enum oa_tc6_register_op reg_op)
 195{
 196	u32 header;
 197
 198	header = FIELD_PREP(OA_TC6_CTRL_HEADER_DATA_NOT_CTRL,
 199			    OA_TC6_CTRL_HEADER) |
 200		 FIELD_PREP(OA_TC6_CTRL_HEADER_WRITE_NOT_READ, reg_op) |
 201		 FIELD_PREP(OA_TC6_CTRL_HEADER_MEM_MAP_SELECTOR, addr >> 16) |
 202		 FIELD_PREP(OA_TC6_CTRL_HEADER_ADDR, addr) |
 203		 FIELD_PREP(OA_TC6_CTRL_HEADER_LENGTH, length - 1);
 204	header |= FIELD_PREP(OA_TC6_CTRL_HEADER_PARITY,
 205			     oa_tc6_get_parity(header));
 206
 207	return cpu_to_be32(header);
 208}
 209
 210static void oa_tc6_update_ctrl_write_data(struct oa_tc6 *tc6, u32 value[],
 211					  u8 length)
 212{
 213	__be32 *tx_buf = tc6->spi_ctrl_tx_buf + OA_TC6_CTRL_HEADER_SIZE;
 214
 215	for (int i = 0; i < length; i++)
 216		*tx_buf++ = cpu_to_be32(value[i]);
 217}
 218
 219static u16 oa_tc6_calculate_ctrl_buf_size(u8 length)
 220{
 221	/* Control command consists 4 bytes header + 4 bytes register value for
 222	 * each register + 4 bytes ignored value.
 223	 */
 224	return OA_TC6_CTRL_HEADER_SIZE + OA_TC6_CTRL_REG_VALUE_SIZE * length +
 225	       OA_TC6_CTRL_IGNORED_SIZE;
 226}
 227
 228static void oa_tc6_prepare_ctrl_spi_buf(struct oa_tc6 *tc6, u32 address,
 229					u32 value[], u8 length,
 230					enum oa_tc6_register_op reg_op)
 231{
 232	__be32 *tx_buf = tc6->spi_ctrl_tx_buf;
 233
 234	*tx_buf = oa_tc6_prepare_ctrl_header(address, length, reg_op);
 235
 236	if (reg_op == OA_TC6_CTRL_REG_WRITE)
 237		oa_tc6_update_ctrl_write_data(tc6, value, length);
 238}
 239
 240static int oa_tc6_check_ctrl_write_reply(struct oa_tc6 *tc6, u8 size)
 241{
 242	u8 *tx_buf = tc6->spi_ctrl_tx_buf;
 243	u8 *rx_buf = tc6->spi_ctrl_rx_buf;
 244
 245	rx_buf += OA_TC6_CTRL_IGNORED_SIZE;
 246
 247	/* The echoed control write must match with the one that was
 248	 * transmitted.
 249	 */
 250	if (memcmp(tx_buf, rx_buf, size - OA_TC6_CTRL_IGNORED_SIZE))
 251		return -EPROTO;
 252
 253	return 0;
 254}
 255
 256static int oa_tc6_check_ctrl_read_reply(struct oa_tc6 *tc6, u8 size)
 257{
 258	u32 *rx_buf = tc6->spi_ctrl_rx_buf + OA_TC6_CTRL_IGNORED_SIZE;
 259	u32 *tx_buf = tc6->spi_ctrl_tx_buf;
 260
 261	/* The echoed control read header must match with the one that was
 262	 * transmitted.
 263	 */
 264	if (*tx_buf != *rx_buf)
 265		return -EPROTO;
 266
 267	return 0;
 268}
 269
 270static void oa_tc6_copy_ctrl_read_data(struct oa_tc6 *tc6, u32 value[],
 271				       u8 length)
 272{
 273	__be32 *rx_buf = tc6->spi_ctrl_rx_buf + OA_TC6_CTRL_IGNORED_SIZE +
 274			 OA_TC6_CTRL_HEADER_SIZE;
 275
 276	for (int i = 0; i < length; i++)
 277		value[i] = be32_to_cpu(*rx_buf++);
 278}
 279
 280static int oa_tc6_perform_ctrl(struct oa_tc6 *tc6, u32 address, u32 value[],
 281			       u8 length, enum oa_tc6_register_op reg_op)
 282{
 283	u16 size;
 284	int ret;
 285
 286	/* Prepare control command and copy to SPI control buffer */
 287	oa_tc6_prepare_ctrl_spi_buf(tc6, address, value, length, reg_op);
 288
 289	size = oa_tc6_calculate_ctrl_buf_size(length);
 290
 291	/* Perform SPI transfer */
 292	ret = oa_tc6_spi_transfer(tc6, OA_TC6_CTRL_HEADER, size);
 293	if (ret) {
 294		dev_err(&tc6->spi->dev, "SPI transfer failed for control: %d\n",
 295			ret);
 296		return ret;
 297	}
 298
 299	/* Check echoed/received control write command reply for errors */
 300	if (reg_op == OA_TC6_CTRL_REG_WRITE)
 301		return oa_tc6_check_ctrl_write_reply(tc6, size);
 302
 303	/* Check echoed/received control read command reply for errors */
 304	ret = oa_tc6_check_ctrl_read_reply(tc6, size);
 305	if (ret)
 306		return ret;
 307
 308	oa_tc6_copy_ctrl_read_data(tc6, value, length);
 309
 310	return 0;
 311}
 312
 313/**
 314 * oa_tc6_read_registers - function for reading multiple consecutive registers.
 315 * @tc6: oa_tc6 struct.
 316 * @address: address of the first register to be read in the MAC-PHY.
 317 * @value: values to be read from the starting register address @address.
 318 * @length: number of consecutive registers to be read from @address.
 319 *
 320 * Maximum of 128 consecutive registers can be read starting at @address.
 321 *
 322 * Return: 0 on success otherwise failed.
 323 */
 324int oa_tc6_read_registers(struct oa_tc6 *tc6, u32 address, u32 value[],
 325			  u8 length)
 326{
 327	int ret;
 328
 329	if (!length || length > OA_TC6_CTRL_MAX_REGISTERS) {
 330		dev_err(&tc6->spi->dev, "Invalid register length parameter\n");
 331		return -EINVAL;
 332	}
 333
 334	mutex_lock(&tc6->spi_ctrl_lock);
 335	ret = oa_tc6_perform_ctrl(tc6, address, value, length,
 336				  OA_TC6_CTRL_REG_READ);
 337	mutex_unlock(&tc6->spi_ctrl_lock);
 338
 339	return ret;
 340}
 341EXPORT_SYMBOL_GPL(oa_tc6_read_registers);
 342
 343/**
 344 * oa_tc6_read_register - function for reading a MAC-PHY register.
 345 * @tc6: oa_tc6 struct.
 346 * @address: register address of the MAC-PHY to be read.
 347 * @value: value read from the @address register address of the MAC-PHY.
 348 *
 349 * Return: 0 on success otherwise failed.
 350 */
 351int oa_tc6_read_register(struct oa_tc6 *tc6, u32 address, u32 *value)
 352{
 353	return oa_tc6_read_registers(tc6, address, value, 1);
 354}
 355EXPORT_SYMBOL_GPL(oa_tc6_read_register);
 356
 357/**
 358 * oa_tc6_write_registers - function for writing multiple consecutive registers.
 359 * @tc6: oa_tc6 struct.
 360 * @address: address of the first register to be written in the MAC-PHY.
 361 * @value: values to be written from the starting register address @address.
 362 * @length: number of consecutive registers to be written from @address.
 363 *
 364 * Maximum of 128 consecutive registers can be written starting at @address.
 365 *
 366 * Return: 0 on success otherwise failed.
 367 */
 368int oa_tc6_write_registers(struct oa_tc6 *tc6, u32 address, u32 value[],
 369			   u8 length)
 370{
 371	int ret;
 372
 373	if (!length || length > OA_TC6_CTRL_MAX_REGISTERS) {
 374		dev_err(&tc6->spi->dev, "Invalid register length parameter\n");
 375		return -EINVAL;
 376	}
 377
 378	mutex_lock(&tc6->spi_ctrl_lock);
 379	ret = oa_tc6_perform_ctrl(tc6, address, value, length,
 380				  OA_TC6_CTRL_REG_WRITE);
 381	mutex_unlock(&tc6->spi_ctrl_lock);
 382
 383	return ret;
 384}
 385EXPORT_SYMBOL_GPL(oa_tc6_write_registers);
 386
 387/**
 388 * oa_tc6_write_register - function for writing a MAC-PHY register.
 389 * @tc6: oa_tc6 struct.
 390 * @address: register address of the MAC-PHY to be written.
 391 * @value: value to be written in the @address register address of the MAC-PHY.
 392 *
 393 * Return: 0 on success otherwise failed.
 394 */
 395int oa_tc6_write_register(struct oa_tc6 *tc6, u32 address, u32 value)
 396{
 397	return oa_tc6_write_registers(tc6, address, &value, 1);
 398}
 399EXPORT_SYMBOL_GPL(oa_tc6_write_register);
 400
 401static int oa_tc6_check_phy_reg_direct_access_capability(struct oa_tc6 *tc6)
 402{
 403	u32 regval;
 404	int ret;
 405
 406	ret = oa_tc6_read_register(tc6, OA_TC6_REG_STDCAP, &regval);
 407	if (ret)
 408		return ret;
 409
 410	if (!(regval & STDCAP_DIRECT_PHY_REG_ACCESS))
 411		return -ENODEV;
 412
 413	return 0;
 414}
 415
 416static void oa_tc6_handle_link_change(struct net_device *netdev)
 417{
 418	phy_print_status(netdev->phydev);
 419}
 420
 421static int oa_tc6_mdiobus_read(struct mii_bus *bus, int addr, int regnum)
 422{
 423	struct oa_tc6 *tc6 = bus->priv;
 424	u32 regval;
 425	bool ret;
 426
 427	ret = oa_tc6_read_register(tc6, OA_TC6_PHY_STD_REG_ADDR_BASE |
 428				   (regnum & OA_TC6_PHY_STD_REG_ADDR_MASK),
 429				   &regval);
 430	if (ret)
 431		return ret;
 432
 433	return regval;
 434}
 435
 436static int oa_tc6_mdiobus_write(struct mii_bus *bus, int addr, int regnum,
 437				u16 val)
 438{
 439	struct oa_tc6 *tc6 = bus->priv;
 440
 441	return oa_tc6_write_register(tc6, OA_TC6_PHY_STD_REG_ADDR_BASE |
 442				     (regnum & OA_TC6_PHY_STD_REG_ADDR_MASK),
 443				     val);
 444}
 445
 446static int oa_tc6_get_phy_c45_mms(int devnum)
 447{
 448	switch (devnum) {
 449	case MDIO_MMD_PCS:
 450		return OA_TC6_PHY_C45_PCS_MMS2;
 451	case MDIO_MMD_PMAPMD:
 452		return OA_TC6_PHY_C45_PMA_PMD_MMS3;
 453	case MDIO_MMD_VEND2:
 454		return OA_TC6_PHY_C45_VS_PLCA_MMS4;
 455	case MDIO_MMD_AN:
 456		return OA_TC6_PHY_C45_AUTO_NEG_MMS5;
 457	case MDIO_MMD_POWER_UNIT:
 458		return OA_TC6_PHY_C45_POWER_UNIT_MMS6;
 459	default:
 460		return -EOPNOTSUPP;
 461	}
 462}
 463
 464static int oa_tc6_mdiobus_read_c45(struct mii_bus *bus, int addr, int devnum,
 465				   int regnum)
 466{
 467	struct oa_tc6 *tc6 = bus->priv;
 468	u32 regval;
 469	int ret;
 470
 471	ret = oa_tc6_get_phy_c45_mms(devnum);
 472	if (ret < 0)
 473		return ret;
 474
 475	ret = oa_tc6_read_register(tc6, (ret << 16) | regnum, &regval);
 476	if (ret)
 477		return ret;
 478
 479	return regval;
 480}
 481
 482static int oa_tc6_mdiobus_write_c45(struct mii_bus *bus, int addr, int devnum,
 483				    int regnum, u16 val)
 484{
 485	struct oa_tc6 *tc6 = bus->priv;
 486	int ret;
 487
 488	ret = oa_tc6_get_phy_c45_mms(devnum);
 489	if (ret < 0)
 490		return ret;
 491
 492	return oa_tc6_write_register(tc6, (ret << 16) | regnum, val);
 493}
 494
 495static int oa_tc6_mdiobus_register(struct oa_tc6 *tc6)
 496{
 497	int ret;
 498
 499	tc6->mdiobus = mdiobus_alloc();
 500	if (!tc6->mdiobus) {
 501		netdev_err(tc6->netdev, "MDIO bus alloc failed\n");
 502		return -ENOMEM;
 503	}
 504
 505	tc6->mdiobus->priv = tc6;
 506	tc6->mdiobus->read = oa_tc6_mdiobus_read;
 507	tc6->mdiobus->write = oa_tc6_mdiobus_write;
 508	/* OPEN Alliance 10BASE-T1x compliance MAC-PHYs will have both C22 and
 509	 * C45 registers space. If the PHY is discovered via C22 bus protocol it
 510	 * assumes it uses C22 protocol and always uses C22 registers indirect
 511	 * access to access C45 registers. This is because, we don't have a
 512	 * clean separation between C22/C45 register space and C22/C45 MDIO bus
 513	 * protocols. Resulting, PHY C45 registers direct access can't be used
 514	 * which can save multiple SPI bus access. To support this feature, PHY
 515	 * drivers can set .read_mmd/.write_mmd in the PHY driver to call
 516	 * .read_c45/.write_c45. Ex: drivers/net/phy/microchip_t1s.c
 517	 */
 518	tc6->mdiobus->read_c45 = oa_tc6_mdiobus_read_c45;
 519	tc6->mdiobus->write_c45 = oa_tc6_mdiobus_write_c45;
 520	tc6->mdiobus->name = "oa-tc6-mdiobus";
 521	tc6->mdiobus->parent = tc6->dev;
 522
 523	snprintf(tc6->mdiobus->id, ARRAY_SIZE(tc6->mdiobus->id), "%s",
 524		 dev_name(&tc6->spi->dev));
 525
 526	ret = mdiobus_register(tc6->mdiobus);
 527	if (ret) {
 528		netdev_err(tc6->netdev, "Could not register MDIO bus\n");
 529		mdiobus_free(tc6->mdiobus);
 530		return ret;
 531	}
 532
 533	return 0;
 534}
 535
 536static void oa_tc6_mdiobus_unregister(struct oa_tc6 *tc6)
 537{
 538	mdiobus_unregister(tc6->mdiobus);
 539	mdiobus_free(tc6->mdiobus);
 540}
 541
 542static int oa_tc6_phy_init(struct oa_tc6 *tc6)
 543{
 544	int ret;
 545
 546	ret = oa_tc6_check_phy_reg_direct_access_capability(tc6);
 547	if (ret) {
 548		netdev_err(tc6->netdev,
 549			   "Direct PHY register access is not supported by the MAC-PHY\n");
 550		return ret;
 551	}
 552
 553	ret = oa_tc6_mdiobus_register(tc6);
 554	if (ret)
 555		return ret;
 556
 557	tc6->phydev = phy_find_first(tc6->mdiobus);
 558	if (!tc6->phydev) {
 559		netdev_err(tc6->netdev, "No PHY found\n");
 560		oa_tc6_mdiobus_unregister(tc6);
 561		return -ENODEV;
 562	}
 563
 564	tc6->phydev->is_internal = true;
 565	ret = phy_connect_direct(tc6->netdev, tc6->phydev,
 566				 &oa_tc6_handle_link_change,
 567				 PHY_INTERFACE_MODE_INTERNAL);
 568	if (ret) {
 569		netdev_err(tc6->netdev, "Can't attach PHY to %s\n",
 570			   tc6->mdiobus->id);
 571		oa_tc6_mdiobus_unregister(tc6);
 572		return ret;
 573	}
 574
 575	phy_attached_info(tc6->netdev->phydev);
 576
 577	return 0;
 578}
 579
 580static void oa_tc6_phy_exit(struct oa_tc6 *tc6)
 581{
 582	phy_disconnect(tc6->phydev);
 583	oa_tc6_mdiobus_unregister(tc6);
 584}
 585
 586static int oa_tc6_read_status0(struct oa_tc6 *tc6)
 587{
 588	u32 regval;
 589	int ret;
 590
 591	ret = oa_tc6_read_register(tc6, OA_TC6_REG_STATUS0, &regval);
 592	if (ret) {
 593		dev_err(&tc6->spi->dev, "STATUS0 register read failed: %d\n",
 594			ret);
 595		return 0;
 596	}
 597
 598	return regval;
 599}
 600
 601static int oa_tc6_sw_reset_macphy(struct oa_tc6 *tc6)
 602{
 603	u32 regval = RESET_SWRESET;
 604	int ret;
 605
 606	ret = oa_tc6_write_register(tc6, OA_TC6_REG_RESET, regval);
 607	if (ret)
 608		return ret;
 609
 610	/* Poll for soft reset complete for every 1ms until 1s timeout */
 611	ret = readx_poll_timeout(oa_tc6_read_status0, tc6, regval,
 612				 regval & STATUS0_RESETC,
 613				 STATUS0_RESETC_POLL_DELAY,
 614				 STATUS0_RESETC_POLL_TIMEOUT);
 615	if (ret)
 616		return -ENODEV;
 617
 618	/* Clear the reset complete status */
 619	return oa_tc6_write_register(tc6, OA_TC6_REG_STATUS0, regval);
 620}
 621
 622static int oa_tc6_unmask_macphy_error_interrupts(struct oa_tc6 *tc6)
 623{
 624	u32 regval;
 625	int ret;
 626
 627	ret = oa_tc6_read_register(tc6, OA_TC6_REG_INT_MASK0, &regval);
 628	if (ret)
 629		return ret;
 630
 631	regval &= ~(INT_MASK0_TX_PROTOCOL_ERR_MASK |
 632		    INT_MASK0_RX_BUFFER_OVERFLOW_ERR_MASK |
 633		    INT_MASK0_LOSS_OF_FRAME_ERR_MASK |
 634		    INT_MASK0_HEADER_ERR_MASK);
 635
 636	return oa_tc6_write_register(tc6, OA_TC6_REG_INT_MASK0, regval);
 637}
 638
 639static int oa_tc6_enable_data_transfer(struct oa_tc6 *tc6)
 640{
 641	u32 value;
 642	int ret;
 643
 644	ret = oa_tc6_read_register(tc6, OA_TC6_REG_CONFIG0, &value);
 645	if (ret)
 646		return ret;
 647
 648	/* Enable configuration synchronization for data transfer */
 649	value |= CONFIG0_SYNC;
 650
 651	return oa_tc6_write_register(tc6, OA_TC6_REG_CONFIG0, value);
 652}
 653
 654static void oa_tc6_cleanup_ongoing_rx_skb(struct oa_tc6 *tc6)
 655{
 656	if (tc6->rx_skb) {
 657		tc6->netdev->stats.rx_dropped++;
 658		kfree_skb(tc6->rx_skb);
 659		tc6->rx_skb = NULL;
 660	}
 661}
 662
 663static void oa_tc6_cleanup_ongoing_tx_skb(struct oa_tc6 *tc6)
 664{
 665	if (tc6->ongoing_tx_skb) {
 666		tc6->netdev->stats.tx_dropped++;
 667		kfree_skb(tc6->ongoing_tx_skb);
 668		tc6->ongoing_tx_skb = NULL;
 669	}
 670}
 671
 672static int oa_tc6_process_extended_status(struct oa_tc6 *tc6)
 673{
 674	u32 value;
 675	int ret;
 676
 677	ret = oa_tc6_read_register(tc6, OA_TC6_REG_STATUS0, &value);
 678	if (ret) {
 679		netdev_err(tc6->netdev, "STATUS0 register read failed: %d\n",
 680			   ret);
 681		return ret;
 682	}
 683
 684	/* Clear the error interrupts status */
 685	ret = oa_tc6_write_register(tc6, OA_TC6_REG_STATUS0, value);
 686	if (ret) {
 687		netdev_err(tc6->netdev, "STATUS0 register write failed: %d\n",
 688			   ret);
 689		return ret;
 690	}
 691
 692	if (FIELD_GET(STATUS0_RX_BUFFER_OVERFLOW_ERROR, value)) {
 693		tc6->rx_buf_overflow = true;
 694		oa_tc6_cleanup_ongoing_rx_skb(tc6);
 695		net_err_ratelimited("%s: Receive buffer overflow error\n",
 696				    tc6->netdev->name);
 697		return -EAGAIN;
 698	}
 699	if (FIELD_GET(STATUS0_TX_PROTOCOL_ERROR, value)) {
 700		netdev_err(tc6->netdev, "Transmit protocol error\n");
 701		return -ENODEV;
 702	}
 703	/* TODO: Currently loss of frame and header errors are treated as
 704	 * non-recoverable errors. They will be handled in the next version.
 705	 */
 706	if (FIELD_GET(STATUS0_LOSS_OF_FRAME_ERROR, value)) {
 707		netdev_err(tc6->netdev, "Loss of frame error\n");
 708		return -ENODEV;
 709	}
 710	if (FIELD_GET(STATUS0_HEADER_ERROR, value)) {
 711		netdev_err(tc6->netdev, "Header error\n");
 712		return -ENODEV;
 713	}
 714
 715	return 0;
 716}
 717
 718static int oa_tc6_process_rx_chunk_footer(struct oa_tc6 *tc6, u32 footer)
 719{
 720	/* Process rx chunk footer for the following,
 721	 * 1. tx credits
 722	 * 2. errors if any from MAC-PHY
 723	 * 3. receive chunks available
 724	 */
 725	tc6->tx_credits = FIELD_GET(OA_TC6_DATA_FOOTER_TX_CREDITS, footer);
 726	tc6->rx_chunks_available = FIELD_GET(OA_TC6_DATA_FOOTER_RX_CHUNKS,
 727					     footer);
 728
 729	if (FIELD_GET(OA_TC6_DATA_FOOTER_EXTENDED_STS, footer)) {
 730		int ret = oa_tc6_process_extended_status(tc6);
 731
 732		if (ret)
 733			return ret;
 734	}
 735
 736	/* TODO: Currently received header bad and configuration unsync errors
 737	 * are treated as non-recoverable errors. They will be handled in the
 738	 * next version.
 739	 */
 740	if (FIELD_GET(OA_TC6_DATA_FOOTER_RXD_HEADER_BAD, footer)) {
 741		netdev_err(tc6->netdev, "Rxd header bad error\n");
 742		return -ENODEV;
 743	}
 744
 745	if (!FIELD_GET(OA_TC6_DATA_FOOTER_CONFIG_SYNC, footer)) {
 746		netdev_err(tc6->netdev, "Config unsync error\n");
 747		return -ENODEV;
 748	}
 749
 750	return 0;
 751}
 752
 753static void oa_tc6_submit_rx_skb(struct oa_tc6 *tc6)
 754{
 755	tc6->rx_skb->protocol = eth_type_trans(tc6->rx_skb, tc6->netdev);
 756	tc6->netdev->stats.rx_packets++;
 757	tc6->netdev->stats.rx_bytes += tc6->rx_skb->len;
 758
 759	netif_rx(tc6->rx_skb);
 760
 761	tc6->rx_skb = NULL;
 762}
 763
 764static void oa_tc6_update_rx_skb(struct oa_tc6 *tc6, u8 *payload, u8 length)
 765{
 766	memcpy(skb_put(tc6->rx_skb, length), payload, length);
 767}
 768
 769static int oa_tc6_allocate_rx_skb(struct oa_tc6 *tc6)
 770{
 771	tc6->rx_skb = netdev_alloc_skb_ip_align(tc6->netdev, tc6->netdev->mtu +
 772						ETH_HLEN + ETH_FCS_LEN);
 773	if (!tc6->rx_skb) {
 774		tc6->netdev->stats.rx_dropped++;
 775		return -ENOMEM;
 776	}
 777
 778	return 0;
 779}
 780
 781static int oa_tc6_prcs_complete_rx_frame(struct oa_tc6 *tc6, u8 *payload,
 782					 u16 size)
 783{
 784	int ret;
 785
 786	ret = oa_tc6_allocate_rx_skb(tc6);
 787	if (ret)
 788		return ret;
 789
 790	oa_tc6_update_rx_skb(tc6, payload, size);
 791
 792	oa_tc6_submit_rx_skb(tc6);
 793
 794	return 0;
 795}
 796
 797static int oa_tc6_prcs_rx_frame_start(struct oa_tc6 *tc6, u8 *payload, u16 size)
 798{
 799	int ret;
 800
 801	ret = oa_tc6_allocate_rx_skb(tc6);
 802	if (ret)
 803		return ret;
 804
 805	oa_tc6_update_rx_skb(tc6, payload, size);
 806
 807	return 0;
 808}
 809
 810static void oa_tc6_prcs_rx_frame_end(struct oa_tc6 *tc6, u8 *payload, u16 size)
 811{
 812	oa_tc6_update_rx_skb(tc6, payload, size);
 813
 814	oa_tc6_submit_rx_skb(tc6);
 815}
 816
 817static void oa_tc6_prcs_ongoing_rx_frame(struct oa_tc6 *tc6, u8 *payload,
 818					 u32 footer)
 819{
 820	oa_tc6_update_rx_skb(tc6, payload, OA_TC6_CHUNK_PAYLOAD_SIZE);
 821}
 822
 823static int oa_tc6_prcs_rx_chunk_payload(struct oa_tc6 *tc6, u8 *data,
 824					u32 footer)
 825{
 826	u8 start_byte_offset = FIELD_GET(OA_TC6_DATA_FOOTER_START_WORD_OFFSET,
 827					 footer) * sizeof(u32);
 828	u8 end_byte_offset = FIELD_GET(OA_TC6_DATA_FOOTER_END_BYTE_OFFSET,
 829				       footer);
 830	bool start_valid = FIELD_GET(OA_TC6_DATA_FOOTER_START_VALID, footer);
 831	bool end_valid = FIELD_GET(OA_TC6_DATA_FOOTER_END_VALID, footer);
 832	u16 size;
 833
 834	/* Restart the new rx frame after receiving rx buffer overflow error */
 835	if (start_valid && tc6->rx_buf_overflow)
 836		tc6->rx_buf_overflow = false;
 837
 838	if (tc6->rx_buf_overflow)
 839		return 0;
 840
 841	/* Process the chunk with complete rx frame */
 842	if (start_valid && end_valid && start_byte_offset < end_byte_offset) {
 843		size = end_byte_offset + 1 - start_byte_offset;
 844		return oa_tc6_prcs_complete_rx_frame(tc6,
 845						     &data[start_byte_offset],
 846						     size);
 847	}
 848
 849	/* Process the chunk with only rx frame start */
 850	if (start_valid && !end_valid) {
 851		size = OA_TC6_CHUNK_PAYLOAD_SIZE - start_byte_offset;
 852		return oa_tc6_prcs_rx_frame_start(tc6,
 853						  &data[start_byte_offset],
 854						  size);
 855	}
 856
 857	/* Process the chunk with only rx frame end */
 858	if (end_valid && !start_valid) {
 859		size = end_byte_offset + 1;
 860		oa_tc6_prcs_rx_frame_end(tc6, data, size);
 861		return 0;
 862	}
 863
 864	/* Process the chunk with previous rx frame end and next rx frame
 865	 * start.
 866	 */
 867	if (start_valid && end_valid && start_byte_offset > end_byte_offset) {
 868		/* After rx buffer overflow error received, there might be a
 869		 * possibility of getting an end valid of a previously
 870		 * incomplete rx frame along with the new rx frame start valid.
 871		 */
 872		if (tc6->rx_skb) {
 873			size = end_byte_offset + 1;
 874			oa_tc6_prcs_rx_frame_end(tc6, data, size);
 875		}
 876		size = OA_TC6_CHUNK_PAYLOAD_SIZE - start_byte_offset;
 877		return oa_tc6_prcs_rx_frame_start(tc6,
 878						  &data[start_byte_offset],
 879						  size);
 880	}
 881
 882	/* Process the chunk with ongoing rx frame data */
 883	oa_tc6_prcs_ongoing_rx_frame(tc6, data, footer);
 884
 885	return 0;
 886}
 887
 888static u32 oa_tc6_get_rx_chunk_footer(struct oa_tc6 *tc6, u16 footer_offset)
 889{
 890	u8 *rx_buf = tc6->spi_data_rx_buf;
 891	__be32 footer;
 892
 893	footer = *((__be32 *)&rx_buf[footer_offset]);
 894
 895	return be32_to_cpu(footer);
 896}
 897
 898static int oa_tc6_process_spi_data_rx_buf(struct oa_tc6 *tc6, u16 length)
 899{
 900	u16 no_of_rx_chunks = length / OA_TC6_CHUNK_SIZE;
 901	u32 footer;
 902	int ret;
 903
 904	/* All the rx chunks in the receive SPI data buffer are examined here */
 905	for (int i = 0; i < no_of_rx_chunks; i++) {
 906		/* Last 4 bytes in each received chunk consist footer info */
 907		footer = oa_tc6_get_rx_chunk_footer(tc6, i * OA_TC6_CHUNK_SIZE +
 908						    OA_TC6_CHUNK_PAYLOAD_SIZE);
 909
 910		ret = oa_tc6_process_rx_chunk_footer(tc6, footer);
 911		if (ret)
 912			return ret;
 913
 914		/* If there is a data valid chunks then process it for the
 915		 * information needed to determine the validity and the location
 916		 * of the receive frame data.
 917		 */
 918		if (FIELD_GET(OA_TC6_DATA_FOOTER_DATA_VALID, footer)) {
 919			u8 *payload = tc6->spi_data_rx_buf + i *
 920				      OA_TC6_CHUNK_SIZE;
 921
 922			ret = oa_tc6_prcs_rx_chunk_payload(tc6, payload,
 923							   footer);
 924			if (ret)
 925				return ret;
 926		}
 927	}
 928
 929	return 0;
 930}
 931
 932static __be32 oa_tc6_prepare_data_header(bool data_valid, bool start_valid,
 933					 bool end_valid, u8 end_byte_offset)
 934{
 935	u32 header = FIELD_PREP(OA_TC6_DATA_HEADER_DATA_NOT_CTRL,
 936				OA_TC6_DATA_HEADER) |
 937		     FIELD_PREP(OA_TC6_DATA_HEADER_DATA_VALID, data_valid) |
 938		     FIELD_PREP(OA_TC6_DATA_HEADER_START_VALID, start_valid) |
 939		     FIELD_PREP(OA_TC6_DATA_HEADER_END_VALID, end_valid) |
 940		     FIELD_PREP(OA_TC6_DATA_HEADER_END_BYTE_OFFSET,
 941				end_byte_offset);
 942
 943	header |= FIELD_PREP(OA_TC6_DATA_HEADER_PARITY,
 944			     oa_tc6_get_parity(header));
 945
 946	return cpu_to_be32(header);
 947}
 948
 949static void oa_tc6_add_tx_skb_to_spi_buf(struct oa_tc6 *tc6)
 950{
 951	enum oa_tc6_data_end_valid_info end_valid = OA_TC6_DATA_END_INVALID;
 952	__be32 *tx_buf = tc6->spi_data_tx_buf + tc6->spi_data_tx_buf_offset;
 953	u16 remaining_len = tc6->ongoing_tx_skb->len - tc6->tx_skb_offset;
 954	u8 *tx_skb_data = tc6->ongoing_tx_skb->data + tc6->tx_skb_offset;
 955	enum oa_tc6_data_start_valid_info start_valid;
 956	u8 end_byte_offset = 0;
 957	u16 length_to_copy;
 958
 959	/* Initial value is assigned here to avoid more than 80 characters in
 960	 * the declaration place.
 961	 */
 962	start_valid = OA_TC6_DATA_START_INVALID;
 963
 964	/* Set start valid if the current tx chunk contains the start of the tx
 965	 * ethernet frame.
 966	 */
 967	if (!tc6->tx_skb_offset)
 968		start_valid = OA_TC6_DATA_START_VALID;
 969
 970	/* If the remaining tx skb length is more than the chunk payload size of
 971	 * 64 bytes then copy only 64 bytes and leave the ongoing tx skb for
 972	 * next tx chunk.
 973	 */
 974	length_to_copy = min_t(u16, remaining_len, OA_TC6_CHUNK_PAYLOAD_SIZE);
 975
 976	/* Copy the tx skb data to the tx chunk payload buffer */
 977	memcpy(tx_buf + 1, tx_skb_data, length_to_copy);
 978	tc6->tx_skb_offset += length_to_copy;
 979
 980	/* Set end valid if the current tx chunk contains the end of the tx
 981	 * ethernet frame.
 982	 */
 983	if (tc6->ongoing_tx_skb->len == tc6->tx_skb_offset) {
 984		end_valid = OA_TC6_DATA_END_VALID;
 985		end_byte_offset = length_to_copy - 1;
 986		tc6->tx_skb_offset = 0;
 987		tc6->netdev->stats.tx_bytes += tc6->ongoing_tx_skb->len;
 988		tc6->netdev->stats.tx_packets++;
 989		kfree_skb(tc6->ongoing_tx_skb);
 990		tc6->ongoing_tx_skb = NULL;
 991	}
 992
 993	*tx_buf = oa_tc6_prepare_data_header(OA_TC6_DATA_VALID, start_valid,
 994					     end_valid, end_byte_offset);
 995	tc6->spi_data_tx_buf_offset += OA_TC6_CHUNK_SIZE;
 996}
 997
 998static u16 oa_tc6_prepare_spi_tx_buf_for_tx_skbs(struct oa_tc6 *tc6)
 999{
1000	u16 used_tx_credits;
1001
1002	/* Get tx skbs and convert them into tx chunks based on the tx credits
1003	 * available.
1004	 */
1005	for (used_tx_credits = 0; used_tx_credits < tc6->tx_credits;
1006	     used_tx_credits++) {
1007		if (!tc6->ongoing_tx_skb) {
1008			spin_lock_bh(&tc6->tx_skb_lock);
1009			tc6->ongoing_tx_skb = tc6->waiting_tx_skb;
1010			tc6->waiting_tx_skb = NULL;
1011			spin_unlock_bh(&tc6->tx_skb_lock);
1012		}
1013		if (!tc6->ongoing_tx_skb)
1014			break;
1015		oa_tc6_add_tx_skb_to_spi_buf(tc6);
1016	}
1017
1018	return used_tx_credits * OA_TC6_CHUNK_SIZE;
1019}
1020
1021static void oa_tc6_add_empty_chunks_to_spi_buf(struct oa_tc6 *tc6,
1022					       u16 needed_empty_chunks)
1023{
1024	__be32 header;
1025
1026	header = oa_tc6_prepare_data_header(OA_TC6_DATA_INVALID,
1027					    OA_TC6_DATA_START_INVALID,
1028					    OA_TC6_DATA_END_INVALID, 0);
1029
1030	while (needed_empty_chunks--) {
1031		__be32 *tx_buf = tc6->spi_data_tx_buf +
1032				 tc6->spi_data_tx_buf_offset;
1033
1034		*tx_buf = header;
1035		tc6->spi_data_tx_buf_offset += OA_TC6_CHUNK_SIZE;
1036	}
1037}
1038
1039static u16 oa_tc6_prepare_spi_tx_buf_for_rx_chunks(struct oa_tc6 *tc6, u16 len)
1040{
1041	u16 tx_chunks = len / OA_TC6_CHUNK_SIZE;
1042	u16 needed_empty_chunks;
1043
1044	/* If there are more chunks to receive than to transmit, we need to add
1045	 * enough empty tx chunks to allow the reception of the excess rx
1046	 * chunks.
1047	 */
1048	if (tx_chunks >= tc6->rx_chunks_available)
1049		return len;
1050
1051	needed_empty_chunks = tc6->rx_chunks_available - tx_chunks;
1052
1053	oa_tc6_add_empty_chunks_to_spi_buf(tc6, needed_empty_chunks);
1054
1055	return needed_empty_chunks * OA_TC6_CHUNK_SIZE + len;
1056}
1057
1058static int oa_tc6_try_spi_transfer(struct oa_tc6 *tc6)
1059{
1060	int ret;
1061
1062	while (true) {
1063		u16 spi_len = 0;
1064
1065		tc6->spi_data_tx_buf_offset = 0;
1066
1067		if (tc6->ongoing_tx_skb || tc6->waiting_tx_skb)
1068			spi_len = oa_tc6_prepare_spi_tx_buf_for_tx_skbs(tc6);
1069
1070		spi_len = oa_tc6_prepare_spi_tx_buf_for_rx_chunks(tc6, spi_len);
1071
1072		if (tc6->int_flag) {
1073			tc6->int_flag = false;
1074			if (spi_len == 0) {
1075				oa_tc6_add_empty_chunks_to_spi_buf(tc6, 1);
1076				spi_len = OA_TC6_CHUNK_SIZE;
1077			}
1078		}
1079
1080		if (spi_len == 0)
1081			break;
1082
1083		ret = oa_tc6_spi_transfer(tc6, OA_TC6_DATA_HEADER, spi_len);
1084		if (ret) {
1085			netdev_err(tc6->netdev, "SPI data transfer failed: %d\n",
1086				   ret);
1087			return ret;
1088		}
1089
1090		ret = oa_tc6_process_spi_data_rx_buf(tc6, spi_len);
1091		if (ret) {
1092			if (ret == -EAGAIN)
1093				continue;
1094
1095			oa_tc6_cleanup_ongoing_tx_skb(tc6);
1096			oa_tc6_cleanup_ongoing_rx_skb(tc6);
1097			netdev_err(tc6->netdev, "Device error: %d\n", ret);
1098			return ret;
1099		}
1100
1101		if (!tc6->waiting_tx_skb && netif_queue_stopped(tc6->netdev))
1102			netif_wake_queue(tc6->netdev);
1103	}
1104
1105	return 0;
1106}
1107
1108static int oa_tc6_spi_thread_handler(void *data)
1109{
1110	struct oa_tc6 *tc6 = data;
1111	int ret;
1112
1113	while (likely(!kthread_should_stop())) {
1114		/* This kthread will be waken up if there is a tx skb or mac-phy
1115		 * interrupt to perform spi transfer with tx chunks.
1116		 */
1117		wait_event_interruptible(tc6->spi_wq, tc6->int_flag ||
1118					 (tc6->waiting_tx_skb &&
1119					 tc6->tx_credits) ||
1120					 kthread_should_stop());
1121
1122		if (kthread_should_stop())
1123			break;
1124
1125		ret = oa_tc6_try_spi_transfer(tc6);
1126		if (ret)
1127			return ret;
1128	}
1129
1130	return 0;
1131}
1132
1133static int oa_tc6_update_buffer_status_from_register(struct oa_tc6 *tc6)
1134{
1135	u32 value;
1136	int ret;
1137
1138	/* Initially tx credits and rx chunks available to be updated from the
1139	 * register as there is no data transfer performed yet. Later they will
1140	 * be updated from the rx footer.
1141	 */
1142	ret = oa_tc6_read_register(tc6, OA_TC6_REG_BUFFER_STATUS, &value);
1143	if (ret)
1144		return ret;
1145
1146	tc6->tx_credits = FIELD_GET(BUFFER_STATUS_TX_CREDITS_AVAILABLE, value);
1147	tc6->rx_chunks_available = FIELD_GET(BUFFER_STATUS_RX_CHUNKS_AVAILABLE,
1148					     value);
1149
1150	return 0;
1151}
1152
1153static irqreturn_t oa_tc6_macphy_isr(int irq, void *data)
1154{
1155	struct oa_tc6 *tc6 = data;
1156
1157	/* MAC-PHY interrupt can occur for the following reasons.
1158	 * - availability of tx credits if it was 0 before and not reported in
1159	 *   the previous rx footer.
1160	 * - availability of rx chunks if it was 0 before and not reported in
1161	 *   the previous rx footer.
1162	 * - extended status event not reported in the previous rx footer.
1163	 */
1164	tc6->int_flag = true;
1165	/* Wake spi kthread to perform spi transfer */
1166	wake_up_interruptible(&tc6->spi_wq);
1167
1168	return IRQ_HANDLED;
1169}
1170
1171/**
1172 * oa_tc6_zero_align_receive_frame_enable - function to enable zero align
1173 * receive frame feature.
1174 * @tc6: oa_tc6 struct.
1175 *
1176 * Return: 0 on success otherwise failed.
1177 */
1178int oa_tc6_zero_align_receive_frame_enable(struct oa_tc6 *tc6)
1179{
1180	u32 regval;
1181	int ret;
1182
1183	ret = oa_tc6_read_register(tc6, OA_TC6_REG_CONFIG0, &regval);
1184	if (ret)
1185		return ret;
1186
1187	/* Set Zero-Align Receive Frame Enable */
1188	regval |= CONFIG0_ZARFE_ENABLE;
1189
1190	return oa_tc6_write_register(tc6, OA_TC6_REG_CONFIG0, regval);
1191}
1192EXPORT_SYMBOL_GPL(oa_tc6_zero_align_receive_frame_enable);
1193
1194/**
1195 * oa_tc6_start_xmit - function for sending the tx skb which consists ethernet
1196 * frame.
1197 * @tc6: oa_tc6 struct.
1198 * @skb: socket buffer in which the ethernet frame is stored.
1199 *
1200 * Return: NETDEV_TX_OK if the transmit ethernet frame skb added in the tx_skb_q
1201 * otherwise returns NETDEV_TX_BUSY.
1202 */
1203netdev_tx_t oa_tc6_start_xmit(struct oa_tc6 *tc6, struct sk_buff *skb)
1204{
1205	if (tc6->waiting_tx_skb) {
1206		netif_stop_queue(tc6->netdev);
1207		return NETDEV_TX_BUSY;
1208	}
1209
1210	if (skb_linearize(skb)) {
1211		dev_kfree_skb_any(skb);
1212		tc6->netdev->stats.tx_dropped++;
1213		return NETDEV_TX_OK;
1214	}
1215
1216	spin_lock_bh(&tc6->tx_skb_lock);
1217	tc6->waiting_tx_skb = skb;
1218	spin_unlock_bh(&tc6->tx_skb_lock);
1219
1220	/* Wake spi kthread to perform spi transfer */
1221	wake_up_interruptible(&tc6->spi_wq);
1222
1223	return NETDEV_TX_OK;
1224}
1225EXPORT_SYMBOL_GPL(oa_tc6_start_xmit);
1226
1227/**
1228 * oa_tc6_init - allocates and initializes oa_tc6 structure.
1229 * @spi: device with which data will be exchanged.
1230 * @netdev: network device interface structure.
1231 *
1232 * Return: pointer reference to the oa_tc6 structure if the MAC-PHY
1233 * initialization is successful otherwise NULL.
1234 */
1235struct oa_tc6 *oa_tc6_init(struct spi_device *spi, struct net_device *netdev)
1236{
1237	struct oa_tc6 *tc6;
1238	int ret;
1239
1240	tc6 = devm_kzalloc(&spi->dev, sizeof(*tc6), GFP_KERNEL);
1241	if (!tc6)
1242		return NULL;
1243
1244	tc6->spi = spi;
1245	tc6->netdev = netdev;
1246	SET_NETDEV_DEV(netdev, &spi->dev);
1247	mutex_init(&tc6->spi_ctrl_lock);
1248	spin_lock_init(&tc6->tx_skb_lock);
1249
1250	/* Set the SPI controller to pump at realtime priority */
1251	tc6->spi->rt = true;
1252	spi_setup(tc6->spi);
1253
1254	tc6->spi_ctrl_tx_buf = devm_kzalloc(&tc6->spi->dev,
1255					    OA_TC6_CTRL_SPI_BUF_SIZE,
1256					    GFP_KERNEL);
1257	if (!tc6->spi_ctrl_tx_buf)
1258		return NULL;
1259
1260	tc6->spi_ctrl_rx_buf = devm_kzalloc(&tc6->spi->dev,
1261					    OA_TC6_CTRL_SPI_BUF_SIZE,
1262					    GFP_KERNEL);
1263	if (!tc6->spi_ctrl_rx_buf)
1264		return NULL;
1265
1266	tc6->spi_data_tx_buf = devm_kzalloc(&tc6->spi->dev,
1267					    OA_TC6_SPI_DATA_BUF_SIZE,
1268					    GFP_KERNEL);
1269	if (!tc6->spi_data_tx_buf)
1270		return NULL;
1271
1272	tc6->spi_data_rx_buf = devm_kzalloc(&tc6->spi->dev,
1273					    OA_TC6_SPI_DATA_BUF_SIZE,
1274					    GFP_KERNEL);
1275	if (!tc6->spi_data_rx_buf)
1276		return NULL;
1277
1278	ret = oa_tc6_sw_reset_macphy(tc6);
1279	if (ret) {
1280		dev_err(&tc6->spi->dev,
1281			"MAC-PHY software reset failed: %d\n", ret);
1282		return NULL;
1283	}
1284
1285	ret = oa_tc6_unmask_macphy_error_interrupts(tc6);
1286	if (ret) {
1287		dev_err(&tc6->spi->dev,
1288			"MAC-PHY error interrupts unmask failed: %d\n", ret);
1289		return NULL;
1290	}
1291
1292	ret = oa_tc6_phy_init(tc6);
1293	if (ret) {
1294		dev_err(&tc6->spi->dev,
1295			"MAC internal PHY initialization failed: %d\n", ret);
1296		return NULL;
1297	}
1298
1299	ret = oa_tc6_enable_data_transfer(tc6);
1300	if (ret) {
1301		dev_err(&tc6->spi->dev, "Failed to enable data transfer: %d\n",
1302			ret);
1303		goto phy_exit;
1304	}
1305
1306	ret = oa_tc6_update_buffer_status_from_register(tc6);
1307	if (ret) {
1308		dev_err(&tc6->spi->dev,
1309			"Failed to update buffer status: %d\n", ret);
1310		goto phy_exit;
1311	}
1312
1313	init_waitqueue_head(&tc6->spi_wq);
1314
1315	tc6->spi_thread = kthread_run(oa_tc6_spi_thread_handler, tc6,
1316				      "oa-tc6-spi-thread");
1317	if (IS_ERR(tc6->spi_thread)) {
1318		dev_err(&tc6->spi->dev, "Failed to create SPI thread\n");
1319		goto phy_exit;
1320	}
1321
1322	sched_set_fifo(tc6->spi_thread);
1323
1324	ret = devm_request_irq(&tc6->spi->dev, tc6->spi->irq, oa_tc6_macphy_isr,
1325			       IRQF_TRIGGER_FALLING, dev_name(&tc6->spi->dev),
1326			       tc6);
1327	if (ret) {
1328		dev_err(&tc6->spi->dev, "Failed to request macphy isr %d\n",
1329			ret);
1330		goto kthread_stop;
1331	}
1332
1333	/* oa_tc6_sw_reset_macphy() function resets and clears the MAC-PHY reset
1334	 * complete status. IRQ is also asserted on reset completion and it is
1335	 * remain asserted until MAC-PHY receives a data chunk. So performing an
1336	 * empty data chunk transmission will deassert the IRQ. Refer section
1337	 * 7.7 and 9.2.8.8 in the OPEN Alliance specification for more details.
1338	 */
1339	tc6->int_flag = true;
1340	wake_up_interruptible(&tc6->spi_wq);
1341
1342	return tc6;
1343
1344kthread_stop:
1345	kthread_stop(tc6->spi_thread);
1346phy_exit:
1347	oa_tc6_phy_exit(tc6);
1348	return NULL;
1349}
1350EXPORT_SYMBOL_GPL(oa_tc6_init);
1351
1352/**
1353 * oa_tc6_exit - exit function.
1354 * @tc6: oa_tc6 struct.
1355 */
1356void oa_tc6_exit(struct oa_tc6 *tc6)
1357{
1358	oa_tc6_phy_exit(tc6);
1359	kthread_stop(tc6->spi_thread);
1360	dev_kfree_skb_any(tc6->ongoing_tx_skb);
1361	dev_kfree_skb_any(tc6->waiting_tx_skb);
1362	dev_kfree_skb_any(tc6->rx_skb);
1363}
1364EXPORT_SYMBOL_GPL(oa_tc6_exit);
1365
1366MODULE_DESCRIPTION("OPEN Alliance 10BASE‑T1x MAC‑PHY Serial Interface Lib");
1367MODULE_AUTHOR("Parthiban Veerasooran <parthiban.veerasooran@microchip.com>");
1368MODULE_LICENSE("GPL");