Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.4.
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2010 ASIX Electronics Corporation
   4 * Copyright (c) 2020 Samsung Electronics Co., Ltd.
   5 *
   6 * ASIX AX88796C SPI Fast Ethernet Linux driver
   7 */
   8
   9#define pr_fmt(fmt)	"ax88796c: " fmt
  10
  11#include "ax88796c_main.h"
  12#include "ax88796c_ioctl.h"
  13
  14#include <linux/bitmap.h>
  15#include <linux/etherdevice.h>
  16#include <linux/iopoll.h>
  17#include <linux/lockdep.h>
  18#include <linux/mdio.h>
  19#include <linux/minmax.h>
  20#include <linux/module.h>
  21#include <linux/netdevice.h>
  22#include <linux/of.h>
  23#include <linux/phy.h>
  24#include <linux/skbuff.h>
  25#include <linux/spi/spi.h>
  26
  27static int comp = IS_ENABLED(CONFIG_SPI_AX88796C_COMPRESSION);
  28static int msg_enable = NETIF_MSG_PROBE |
  29			NETIF_MSG_LINK |
  30			NETIF_MSG_RX_ERR |
  31			NETIF_MSG_TX_ERR;
  32
  33static const char *no_regs_list = "80018001,e1918001,8001a001,fc0d0000";
  34unsigned long ax88796c_no_regs_mask[AX88796C_REGDUMP_LEN / (sizeof(unsigned long) * 8)];
  35
  36module_param(msg_enable, int, 0444);
  37MODULE_PARM_DESC(msg_enable, "Message mask (see linux/netdevice.h for bitmap)");
  38
  39static int ax88796c_soft_reset(struct ax88796c_device *ax_local)
  40{
  41	u16 temp;
  42	int ret;
  43
  44	lockdep_assert_held(&ax_local->spi_lock);
  45
  46	AX_WRITE(&ax_local->ax_spi, PSR_RESET, P0_PSR);
  47	AX_WRITE(&ax_local->ax_spi, PSR_RESET_CLR, P0_PSR);
  48
  49	ret = read_poll_timeout(AX_READ, ret,
  50				(ret & PSR_DEV_READY),
  51				0, jiffies_to_usecs(160 * HZ / 1000), false,
  52				&ax_local->ax_spi, P0_PSR);
  53	if (ret)
  54		return ret;
  55
  56	temp = AX_READ(&ax_local->ax_spi, P4_SPICR);
  57	if (ax_local->priv_flags & AX_CAP_COMP) {
  58		AX_WRITE(&ax_local->ax_spi,
  59			 (temp | SPICR_RCEN | SPICR_QCEN), P4_SPICR);
  60		ax_local->ax_spi.comp = 1;
  61	} else {
  62		AX_WRITE(&ax_local->ax_spi,
  63			 (temp & ~(SPICR_RCEN | SPICR_QCEN)), P4_SPICR);
  64		ax_local->ax_spi.comp = 0;
  65	}
  66
  67	return 0;
  68}
  69
  70static int ax88796c_reload_eeprom(struct ax88796c_device *ax_local)
  71{
  72	int ret;
  73
  74	lockdep_assert_held(&ax_local->spi_lock);
  75
  76	AX_WRITE(&ax_local->ax_spi, EECR_RELOAD, P3_EECR);
  77
  78	ret = read_poll_timeout(AX_READ, ret,
  79				(ret & PSR_DEV_READY),
  80				0, jiffies_to_usecs(2 * HZ / 1000), false,
  81				&ax_local->ax_spi, P0_PSR);
  82	if (ret) {
  83		dev_err(&ax_local->spi->dev,
  84			"timeout waiting for reload eeprom\n");
  85		return ret;
  86	}
  87
  88	return 0;
  89}
  90
  91static void ax88796c_set_hw_multicast(struct net_device *ndev)
  92{
  93	struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
  94	int mc_count = netdev_mc_count(ndev);
  95	u16 rx_ctl = RXCR_AB;
  96
  97	lockdep_assert_held(&ax_local->spi_lock);
  98
  99	memset(ax_local->multi_filter, 0, AX_MCAST_FILTER_SIZE);
 100
 101	if (ndev->flags & IFF_PROMISC) {
 102		rx_ctl |= RXCR_PRO;
 103
 104	} else if (ndev->flags & IFF_ALLMULTI || mc_count > AX_MAX_MCAST) {
 105		rx_ctl |= RXCR_AMALL;
 106
 107	} else if (mc_count == 0) {
 108		/* just broadcast and directed */
 109	} else {
 110		u32 crc_bits;
 111		int i;
 112		struct netdev_hw_addr *ha;
 113
 114		netdev_for_each_mc_addr(ha, ndev) {
 115			crc_bits = ether_crc(ETH_ALEN, ha->addr);
 116			ax_local->multi_filter[crc_bits >> 29] |=
 117						(1 << ((crc_bits >> 26) & 7));
 118		}
 119
 120		for (i = 0; i < 4; i++) {
 121			AX_WRITE(&ax_local->ax_spi,
 122				 ((ax_local->multi_filter[i * 2 + 1] << 8) |
 123				  ax_local->multi_filter[i * 2]), P3_MFAR(i));
 124		}
 125	}
 126
 127	AX_WRITE(&ax_local->ax_spi, rx_ctl, P2_RXCR);
 128}
 129
 130static void ax88796c_set_mac_addr(struct net_device *ndev)
 131{
 132	struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
 133
 134	lockdep_assert_held(&ax_local->spi_lock);
 135
 136	AX_WRITE(&ax_local->ax_spi, ((u16)(ndev->dev_addr[4] << 8) |
 137			(u16)ndev->dev_addr[5]), P3_MACASR0);
 138	AX_WRITE(&ax_local->ax_spi, ((u16)(ndev->dev_addr[2] << 8) |
 139			(u16)ndev->dev_addr[3]), P3_MACASR1);
 140	AX_WRITE(&ax_local->ax_spi, ((u16)(ndev->dev_addr[0] << 8) |
 141			(u16)ndev->dev_addr[1]), P3_MACASR2);
 142}
 143
 144static void ax88796c_load_mac_addr(struct net_device *ndev)
 145{
 146	struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
 147	u8 addr[ETH_ALEN];
 148	u16 temp;
 149
 150	lockdep_assert_held(&ax_local->spi_lock);
 151
 152	/* Try the device tree first */
 153	if (!platform_get_ethdev_address(&ax_local->spi->dev, ndev) &&
 154	    is_valid_ether_addr(ndev->dev_addr)) {
 155		if (netif_msg_probe(ax_local))
 156			dev_info(&ax_local->spi->dev,
 157				 "MAC address read from device tree\n");
 158		return;
 159	}
 160
 161	/* Read the MAC address from AX88796C */
 162	temp = AX_READ(&ax_local->ax_spi, P3_MACASR0);
 163	addr[5] = (u8)temp;
 164	addr[4] = (u8)(temp >> 8);
 165
 166	temp = AX_READ(&ax_local->ax_spi, P3_MACASR1);
 167	addr[3] = (u8)temp;
 168	addr[2] = (u8)(temp >> 8);
 169
 170	temp = AX_READ(&ax_local->ax_spi, P3_MACASR2);
 171	addr[1] = (u8)temp;
 172	addr[0] = (u8)(temp >> 8);
 173
 174	if (is_valid_ether_addr(addr)) {
 175		eth_hw_addr_set(ndev, addr);
 176		if (netif_msg_probe(ax_local))
 177			dev_info(&ax_local->spi->dev,
 178				 "MAC address read from ASIX chip\n");
 179		return;
 180	}
 181
 182	/* Use random address if none found */
 183	if (netif_msg_probe(ax_local))
 184		dev_info(&ax_local->spi->dev, "Use random MAC address\n");
 185	eth_hw_addr_random(ndev);
 186}
 187
 188static void ax88796c_proc_tx_hdr(struct tx_pkt_info *info, u8 ip_summed)
 189{
 190	u16 pkt_len_bar = (~info->pkt_len & TX_HDR_SOP_PKTLENBAR);
 191
 192	/* Prepare SOP header */
 193	info->sop.flags_len = info->pkt_len |
 194		((ip_summed == CHECKSUM_NONE) ||
 195		 (ip_summed == CHECKSUM_UNNECESSARY) ? TX_HDR_SOP_DICF : 0);
 196
 197	info->sop.seq_lenbar = ((info->seq_num << 11) & TX_HDR_SOP_SEQNUM)
 198				| pkt_len_bar;
 199	cpu_to_be16s(&info->sop.flags_len);
 200	cpu_to_be16s(&info->sop.seq_lenbar);
 201
 202	/* Prepare Segment header */
 203	info->seg.flags_seqnum_seglen = TX_HDR_SEG_FS | TX_HDR_SEG_LS
 204						| info->pkt_len;
 205
 206	info->seg.eo_so_seglenbar = pkt_len_bar;
 207
 208	cpu_to_be16s(&info->seg.flags_seqnum_seglen);
 209	cpu_to_be16s(&info->seg.eo_so_seglenbar);
 210
 211	/* Prepare EOP header */
 212	info->eop.seq_len = ((info->seq_num << 11) &
 213			     TX_HDR_EOP_SEQNUM) | info->pkt_len;
 214	info->eop.seqbar_lenbar = ((~info->seq_num << 11) &
 215				   TX_HDR_EOP_SEQNUMBAR) | pkt_len_bar;
 216
 217	cpu_to_be16s(&info->eop.seq_len);
 218	cpu_to_be16s(&info->eop.seqbar_lenbar);
 219}
 220
 221static int
 222ax88796c_check_free_pages(struct ax88796c_device *ax_local, u8 need_pages)
 223{
 224	u8 free_pages;
 225	u16 tmp;
 226
 227	lockdep_assert_held(&ax_local->spi_lock);
 228
 229	free_pages = AX_READ(&ax_local->ax_spi, P0_TFBFCR) & TX_FREEBUF_MASK;
 230	if (free_pages < need_pages) {
 231		/* schedule free page interrupt */
 232		tmp = AX_READ(&ax_local->ax_spi, P0_TFBFCR)
 233				& TFBFCR_SCHE_FREE_PAGE;
 234		AX_WRITE(&ax_local->ax_spi, tmp | TFBFCR_TX_PAGE_SET |
 235				TFBFCR_SET_FREE_PAGE(need_pages),
 236				P0_TFBFCR);
 237		return -ENOMEM;
 238	}
 239
 240	return 0;
 241}
 242
 243static struct sk_buff *
 244ax88796c_tx_fixup(struct net_device *ndev, struct sk_buff_head *q)
 245{
 246	struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
 247	u8 spi_len = ax_local->ax_spi.comp ? 1 : 4;
 248	struct sk_buff *skb;
 249	struct tx_pkt_info info;
 250	struct skb_data *entry;
 251	u16 pkt_len;
 252	u8 padlen, seq_num;
 253	u8 need_pages;
 254	int headroom;
 255	int tailroom;
 256
 257	if (skb_queue_empty(q))
 258		return NULL;
 259
 260	skb = skb_peek(q);
 261	pkt_len = skb->len;
 262	need_pages = (pkt_len + TX_OVERHEAD + 127) >> 7;
 263	if (ax88796c_check_free_pages(ax_local, need_pages) != 0)
 264		return NULL;
 265
 266	headroom = skb_headroom(skb);
 267	tailroom = skb_tailroom(skb);
 268	padlen = round_up(pkt_len, 4) - pkt_len;
 269	seq_num = ++ax_local->seq_num & 0x1F;
 270
 271	info.pkt_len = pkt_len;
 272
 273	if (skb_cloned(skb) ||
 274	    (headroom < (TX_OVERHEAD + spi_len)) ||
 275	    (tailroom < (padlen + TX_EOP_SIZE))) {
 276		size_t h = max((TX_OVERHEAD + spi_len) - headroom, 0);
 277		size_t t = max((padlen + TX_EOP_SIZE) - tailroom, 0);
 278
 279		if (pskb_expand_head(skb, h, t, GFP_KERNEL))
 280			return NULL;
 281	}
 282
 283	info.seq_num = seq_num;
 284	ax88796c_proc_tx_hdr(&info, skb->ip_summed);
 285
 286	/* SOP and SEG header */
 287	memcpy(skb_push(skb, TX_OVERHEAD), &info.tx_overhead, TX_OVERHEAD);
 288
 289	/* Write SPI TXQ header */
 290	memcpy(skb_push(skb, spi_len), ax88796c_tx_cmd_buf, spi_len);
 291
 292	/* Make 32-bit alignment */
 293	skb_put(skb, padlen);
 294
 295	/* EOP header */
 296	skb_put_data(skb, &info.eop, TX_EOP_SIZE);
 297
 298	skb_unlink(skb, q);
 299
 300	entry = (struct skb_data *)skb->cb;
 301	memset(entry, 0, sizeof(*entry));
 302	entry->len = pkt_len;
 303
 304	if (netif_msg_pktdata(ax_local)) {
 305		char pfx[IFNAMSIZ + 7];
 306
 307		snprintf(pfx, sizeof(pfx), "%s:     ", ndev->name);
 308
 309		netdev_info(ndev, "TX packet len %d, total len %d, seq %d\n",
 310			    pkt_len, skb->len, seq_num);
 311
 312		netdev_info(ndev, "  SPI Header:\n");
 313		print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1,
 314			       skb->data, 4, 0);
 315
 316		netdev_info(ndev, "  TX SOP:\n");
 317		print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1,
 318			       skb->data + 4, TX_OVERHEAD, 0);
 319
 320		netdev_info(ndev, "  TX packet:\n");
 321		print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1,
 322			       skb->data + 4 + TX_OVERHEAD,
 323			       skb->len - TX_EOP_SIZE - 4 - TX_OVERHEAD, 0);
 324
 325		netdev_info(ndev, "  TX EOP:\n");
 326		print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1,
 327			       skb->data + skb->len - 4, 4, 0);
 328	}
 329
 330	return skb;
 331}
 332
 333static int ax88796c_hard_xmit(struct ax88796c_device *ax_local)
 334{
 335	struct ax88796c_pcpu_stats *stats;
 336	struct sk_buff *tx_skb;
 337	struct skb_data *entry;
 338	unsigned long flags;
 339
 340	lockdep_assert_held(&ax_local->spi_lock);
 341
 342	stats = this_cpu_ptr(ax_local->stats);
 343	tx_skb = ax88796c_tx_fixup(ax_local->ndev, &ax_local->tx_wait_q);
 344
 345	if (!tx_skb) {
 346		this_cpu_inc(ax_local->stats->tx_dropped);
 347		return 0;
 348	}
 349	entry = (struct skb_data *)tx_skb->cb;
 350
 351	AX_WRITE(&ax_local->ax_spi,
 352		 (TSNR_TXB_START | TSNR_PKT_CNT(1)), P0_TSNR);
 353
 354	axspi_write_txq(&ax_local->ax_spi, tx_skb->data, tx_skb->len);
 355
 356	if (((AX_READ(&ax_local->ax_spi, P0_TSNR) & TXNR_TXB_IDLE) == 0) ||
 357	    ((ISR_TXERR & AX_READ(&ax_local->ax_spi, P0_ISR)) != 0)) {
 358		/* Ack tx error int */
 359		AX_WRITE(&ax_local->ax_spi, ISR_TXERR, P0_ISR);
 360
 361		this_cpu_inc(ax_local->stats->tx_dropped);
 362
 363		if (net_ratelimit())
 364			netif_err(ax_local, tx_err, ax_local->ndev,
 365				  "TX FIFO error, re-initialize the TX bridge\n");
 366
 367		/* Reinitial tx bridge */
 368		AX_WRITE(&ax_local->ax_spi, TXNR_TXB_REINIT |
 369			AX_READ(&ax_local->ax_spi, P0_TSNR), P0_TSNR);
 370		ax_local->seq_num = 0;
 371	} else {
 372		flags = u64_stats_update_begin_irqsave(&stats->syncp);
 373		u64_stats_inc(&stats->tx_packets);
 374		u64_stats_add(&stats->tx_bytes, entry->len);
 375		u64_stats_update_end_irqrestore(&stats->syncp, flags);
 376	}
 377
 378	entry->state = tx_done;
 379	dev_kfree_skb(tx_skb);
 380
 381	return 1;
 382}
 383
 384static netdev_tx_t
 385ax88796c_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 386{
 387	struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
 388
 389	skb_queue_tail(&ax_local->tx_wait_q, skb);
 390	if (skb_queue_len(&ax_local->tx_wait_q) > TX_QUEUE_HIGH_WATER)
 391		netif_stop_queue(ndev);
 392
 393	set_bit(EVENT_TX, &ax_local->flags);
 394	schedule_work(&ax_local->ax_work);
 395
 396	return NETDEV_TX_OK;
 397}
 398
 399static void
 400ax88796c_skb_return(struct ax88796c_device *ax_local,
 401		    struct sk_buff *skb, struct rx_header *rxhdr)
 402{
 403	struct net_device *ndev = ax_local->ndev;
 404	struct ax88796c_pcpu_stats *stats;
 405	unsigned long flags;
 406	int status;
 407
 408	stats = this_cpu_ptr(ax_local->stats);
 409
 410	do {
 411		if (!(ndev->features & NETIF_F_RXCSUM))
 412			break;
 413
 414		/* checksum error bit is set */
 415		if ((rxhdr->flags & RX_HDR3_L3_ERR) ||
 416		    (rxhdr->flags & RX_HDR3_L4_ERR))
 417			break;
 418
 419		/* Other types may be indicated by more than one bit. */
 420		if ((rxhdr->flags & RX_HDR3_L4_TYPE_TCP) ||
 421		    (rxhdr->flags & RX_HDR3_L4_TYPE_UDP))
 422			skb->ip_summed = CHECKSUM_UNNECESSARY;
 423	} while (0);
 424
 425	flags = u64_stats_update_begin_irqsave(&stats->syncp);
 426	u64_stats_inc(&stats->rx_packets);
 427	u64_stats_add(&stats->rx_bytes, skb->len);
 428	u64_stats_update_end_irqrestore(&stats->syncp, flags);
 429
 430	skb->dev = ndev;
 431	skb->protocol = eth_type_trans(skb, ax_local->ndev);
 432
 433	netif_info(ax_local, rx_status, ndev, "< rx, len %zu, type 0x%x\n",
 434		   skb->len + sizeof(struct ethhdr), skb->protocol);
 435
 436	status = netif_rx(skb);
 437	if (status != NET_RX_SUCCESS && net_ratelimit())
 438		netif_info(ax_local, rx_err, ndev,
 439			   "netif_rx status %d\n", status);
 440}
 441
 442static void
 443ax88796c_rx_fixup(struct ax88796c_device *ax_local, struct sk_buff *rx_skb)
 444{
 445	struct rx_header *rxhdr = (struct rx_header *)rx_skb->data;
 446	struct net_device *ndev = ax_local->ndev;
 447	u16 len;
 448
 449	be16_to_cpus(&rxhdr->flags_len);
 450	be16_to_cpus(&rxhdr->seq_lenbar);
 451	be16_to_cpus(&rxhdr->flags);
 452
 453	if ((rxhdr->flags_len & RX_HDR1_PKT_LEN) !=
 454			 (~rxhdr->seq_lenbar & 0x7FF)) {
 455		netif_err(ax_local, rx_err, ndev, "Header error\n");
 456
 457		this_cpu_inc(ax_local->stats->rx_frame_errors);
 458		kfree_skb(rx_skb);
 459		return;
 460	}
 461
 462	if ((rxhdr->flags_len & RX_HDR1_MII_ERR) ||
 463	    (rxhdr->flags_len & RX_HDR1_CRC_ERR)) {
 464		netif_err(ax_local, rx_err, ndev, "CRC or MII error\n");
 465
 466		this_cpu_inc(ax_local->stats->rx_crc_errors);
 467		kfree_skb(rx_skb);
 468		return;
 469	}
 470
 471	len = rxhdr->flags_len & RX_HDR1_PKT_LEN;
 472	if (netif_msg_pktdata(ax_local)) {
 473		char pfx[IFNAMSIZ + 7];
 474
 475		snprintf(pfx, sizeof(pfx), "%s:     ", ndev->name);
 476		netdev_info(ndev, "RX data, total len %d, packet len %d\n",
 477			    rx_skb->len, len);
 478
 479		netdev_info(ndev, "  Dump RX packet header:");
 480		print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1,
 481			       rx_skb->data, sizeof(*rxhdr), 0);
 482
 483		netdev_info(ndev, "  Dump RX packet:");
 484		print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1,
 485			       rx_skb->data + sizeof(*rxhdr), len, 0);
 486	}
 487
 488	skb_pull(rx_skb, sizeof(*rxhdr));
 489	pskb_trim(rx_skb, len);
 490
 491	ax88796c_skb_return(ax_local, rx_skb, rxhdr);
 492}
 493
 494static int ax88796c_receive(struct net_device *ndev)
 495{
 496	struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
 497	struct skb_data *entry;
 498	u16 w_count, pkt_len;
 499	struct sk_buff *skb;
 500	u8 pkt_cnt;
 501
 502	lockdep_assert_held(&ax_local->spi_lock);
 503
 504	/* check rx packet and total word count */
 505	AX_WRITE(&ax_local->ax_spi, AX_READ(&ax_local->ax_spi, P0_RTWCR)
 506		  | RTWCR_RX_LATCH, P0_RTWCR);
 507
 508	pkt_cnt = AX_READ(&ax_local->ax_spi, P0_RXBCR2) & RXBCR2_PKT_MASK;
 509	if (!pkt_cnt)
 510		return 0;
 511
 512	pkt_len = AX_READ(&ax_local->ax_spi, P0_RCPHR) & 0x7FF;
 513
 514	w_count = round_up(pkt_len + 6, 4) >> 1;
 515
 516	skb = netdev_alloc_skb(ndev, w_count * 2);
 517	if (!skb) {
 518		AX_WRITE(&ax_local->ax_spi, RXBCR1_RXB_DISCARD, P0_RXBCR1);
 519		this_cpu_inc(ax_local->stats->rx_dropped);
 520		return 0;
 521	}
 522	entry = (struct skb_data *)skb->cb;
 523
 524	AX_WRITE(&ax_local->ax_spi, RXBCR1_RXB_START | w_count, P0_RXBCR1);
 525
 526	axspi_read_rxq(&ax_local->ax_spi,
 527		       skb_put(skb, w_count * 2), skb->len);
 528
 529	/* Check if rx bridge is idle */
 530	if ((AX_READ(&ax_local->ax_spi, P0_RXBCR2) & RXBCR2_RXB_IDLE) == 0) {
 531		if (net_ratelimit())
 532			netif_err(ax_local, rx_err, ndev,
 533				  "Rx Bridge is not idle\n");
 534		AX_WRITE(&ax_local->ax_spi, RXBCR2_RXB_REINIT, P0_RXBCR2);
 535
 536		entry->state = rx_err;
 537	} else {
 538		entry->state = rx_done;
 539	}
 540
 541	AX_WRITE(&ax_local->ax_spi, ISR_RXPKT, P0_ISR);
 542
 543	ax88796c_rx_fixup(ax_local, skb);
 544
 545	return 1;
 546}
 547
 548static int ax88796c_process_isr(struct ax88796c_device *ax_local)
 549{
 550	struct net_device *ndev = ax_local->ndev;
 551	int todo = 0;
 552	u16 isr;
 553
 554	lockdep_assert_held(&ax_local->spi_lock);
 555
 556	isr = AX_READ(&ax_local->ax_spi, P0_ISR);
 557	AX_WRITE(&ax_local->ax_spi, isr, P0_ISR);
 558
 559	netif_dbg(ax_local, intr, ndev, "  ISR 0x%04x\n", isr);
 560
 561	if (isr & ISR_TXERR) {
 562		netif_dbg(ax_local, intr, ndev, "  TXERR interrupt\n");
 563		AX_WRITE(&ax_local->ax_spi, TXNR_TXB_REINIT, P0_TSNR);
 564		ax_local->seq_num = 0x1f;
 565	}
 566
 567	if (isr & ISR_TXPAGES) {
 568		netif_dbg(ax_local, intr, ndev, "  TXPAGES interrupt\n");
 569		set_bit(EVENT_TX, &ax_local->flags);
 570	}
 571
 572	if (isr & ISR_LINK) {
 573		netif_dbg(ax_local, intr, ndev, "  Link change interrupt\n");
 574		phy_mac_interrupt(ax_local->ndev->phydev);
 575	}
 576
 577	if (isr & ISR_RXPKT) {
 578		netif_dbg(ax_local, intr, ndev, "  RX interrupt\n");
 579		todo = ax88796c_receive(ax_local->ndev);
 580	}
 581
 582	return todo;
 583}
 584
 585static irqreturn_t ax88796c_interrupt(int irq, void *dev_instance)
 586{
 587	struct ax88796c_device *ax_local;
 588	struct net_device *ndev;
 589
 590	ndev = dev_instance;
 591	if (!ndev) {
 592		pr_err("irq %d for unknown device.\n", irq);
 593		return IRQ_RETVAL(0);
 594	}
 595	ax_local = to_ax88796c_device(ndev);
 596
 597	disable_irq_nosync(irq);
 598
 599	netif_dbg(ax_local, intr, ndev, "Interrupt occurred\n");
 600
 601	set_bit(EVENT_INTR, &ax_local->flags);
 602	schedule_work(&ax_local->ax_work);
 603
 604	return IRQ_HANDLED;
 605}
 606
 607static void ax88796c_work(struct work_struct *work)
 608{
 609	struct ax88796c_device *ax_local =
 610			container_of(work, struct ax88796c_device, ax_work);
 611
 612	mutex_lock(&ax_local->spi_lock);
 613
 614	if (test_bit(EVENT_SET_MULTI, &ax_local->flags)) {
 615		ax88796c_set_hw_multicast(ax_local->ndev);
 616		clear_bit(EVENT_SET_MULTI, &ax_local->flags);
 617	}
 618
 619	if (test_bit(EVENT_INTR, &ax_local->flags)) {
 620		AX_WRITE(&ax_local->ax_spi, IMR_MASKALL, P0_IMR);
 621
 622		while (ax88796c_process_isr(ax_local))
 623			/* nothing */;
 624
 625		clear_bit(EVENT_INTR, &ax_local->flags);
 626
 627		AX_WRITE(&ax_local->ax_spi, IMR_DEFAULT, P0_IMR);
 628
 629		enable_irq(ax_local->ndev->irq);
 630	}
 631
 632	if (test_bit(EVENT_TX, &ax_local->flags)) {
 633		while (skb_queue_len(&ax_local->tx_wait_q)) {
 634			if (!ax88796c_hard_xmit(ax_local))
 635				break;
 636		}
 637
 638		clear_bit(EVENT_TX, &ax_local->flags);
 639
 640		if (netif_queue_stopped(ax_local->ndev) &&
 641		    (skb_queue_len(&ax_local->tx_wait_q) < TX_QUEUE_LOW_WATER))
 642			netif_wake_queue(ax_local->ndev);
 643	}
 644
 645	mutex_unlock(&ax_local->spi_lock);
 646}
 647
 648static void ax88796c_get_stats64(struct net_device *ndev,
 649				 struct rtnl_link_stats64 *stats)
 650{
 651	struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
 652	u32 rx_frame_errors = 0, rx_crc_errors = 0;
 653	u32 rx_dropped = 0, tx_dropped = 0;
 654	unsigned int start;
 655	int cpu;
 656
 657	for_each_possible_cpu(cpu) {
 658		struct ax88796c_pcpu_stats *s;
 659		u64 rx_packets, rx_bytes;
 660		u64 tx_packets, tx_bytes;
 661
 662		s = per_cpu_ptr(ax_local->stats, cpu);
 663
 664		do {
 665			start = u64_stats_fetch_begin(&s->syncp);
 666			rx_packets = u64_stats_read(&s->rx_packets);
 667			rx_bytes   = u64_stats_read(&s->rx_bytes);
 668			tx_packets = u64_stats_read(&s->tx_packets);
 669			tx_bytes   = u64_stats_read(&s->tx_bytes);
 670		} while (u64_stats_fetch_retry(&s->syncp, start));
 671
 672		stats->rx_packets += rx_packets;
 673		stats->rx_bytes   += rx_bytes;
 674		stats->tx_packets += tx_packets;
 675		stats->tx_bytes   += tx_bytes;
 676
 677		rx_dropped      += s->rx_dropped;
 678		tx_dropped      += s->tx_dropped;
 679		rx_frame_errors += s->rx_frame_errors;
 680		rx_crc_errors   += s->rx_crc_errors;
 681	}
 682
 683	stats->rx_dropped = rx_dropped;
 684	stats->tx_dropped = tx_dropped;
 685	stats->rx_frame_errors = rx_frame_errors;
 686	stats->rx_crc_errors = rx_crc_errors;
 687}
 688
 689static void ax88796c_set_mac(struct  ax88796c_device *ax_local)
 690{
 691	u16 maccr;
 692
 693	maccr = (ax_local->link) ? MACCR_RXEN : 0;
 694
 695	switch (ax_local->speed) {
 696	case SPEED_100:
 697		maccr |= MACCR_SPEED_100;
 698		break;
 699	case SPEED_10:
 700	case SPEED_UNKNOWN:
 701		break;
 702	default:
 703		return;
 704	}
 705
 706	switch (ax_local->duplex) {
 707	case DUPLEX_FULL:
 708		maccr |= MACCR_SPEED_100;
 709		break;
 710	case DUPLEX_HALF:
 711	case DUPLEX_UNKNOWN:
 712		break;
 713	default:
 714		return;
 715	}
 716
 717	if (ax_local->flowctrl & AX_FC_ANEG &&
 718	    ax_local->phydev->autoneg) {
 719		maccr |= ax_local->pause ? MACCR_RXFC_ENABLE : 0;
 720		maccr |= !ax_local->pause != !ax_local->asym_pause ?
 721			MACCR_TXFC_ENABLE : 0;
 722	} else {
 723		maccr |= (ax_local->flowctrl & AX_FC_RX) ? MACCR_RXFC_ENABLE : 0;
 724		maccr |= (ax_local->flowctrl & AX_FC_TX) ? MACCR_TXFC_ENABLE : 0;
 725	}
 726
 727	mutex_lock(&ax_local->spi_lock);
 728
 729	maccr |= AX_READ(&ax_local->ax_spi, P0_MACCR) &
 730		~(MACCR_DUPLEX_FULL | MACCR_SPEED_100 |
 731		  MACCR_TXFC_ENABLE | MACCR_RXFC_ENABLE);
 732	AX_WRITE(&ax_local->ax_spi, maccr, P0_MACCR);
 733
 734	mutex_unlock(&ax_local->spi_lock);
 735}
 736
 737static void ax88796c_handle_link_change(struct net_device *ndev)
 738{
 739	struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
 740	struct phy_device *phydev = ndev->phydev;
 741	bool update = false;
 742
 743	if (phydev->link && (ax_local->speed != phydev->speed ||
 744			     ax_local->duplex != phydev->duplex ||
 745			     ax_local->pause != phydev->pause ||
 746			     ax_local->asym_pause != phydev->asym_pause)) {
 747		ax_local->speed = phydev->speed;
 748		ax_local->duplex = phydev->duplex;
 749		ax_local->pause = phydev->pause;
 750		ax_local->asym_pause = phydev->asym_pause;
 751		update = true;
 752	}
 753
 754	if (phydev->link != ax_local->link) {
 755		if (!phydev->link) {
 756			ax_local->speed = SPEED_UNKNOWN;
 757			ax_local->duplex = DUPLEX_UNKNOWN;
 758		}
 759
 760		ax_local->link = phydev->link;
 761		update = true;
 762	}
 763
 764	if (update)
 765		ax88796c_set_mac(ax_local);
 766
 767	if (net_ratelimit())
 768		phy_print_status(ndev->phydev);
 769}
 770
 771static void ax88796c_set_csums(struct ax88796c_device *ax_local)
 772{
 773	struct net_device *ndev = ax_local->ndev;
 774
 775	lockdep_assert_held(&ax_local->spi_lock);
 776
 777	if (ndev->features & NETIF_F_RXCSUM) {
 778		AX_WRITE(&ax_local->ax_spi, COERCR0_DEFAULT, P4_COERCR0);
 779		AX_WRITE(&ax_local->ax_spi, COERCR1_DEFAULT, P4_COERCR1);
 780	} else {
 781		AX_WRITE(&ax_local->ax_spi, 0, P4_COERCR0);
 782		AX_WRITE(&ax_local->ax_spi, 0, P4_COERCR1);
 783	}
 784
 785	if (ndev->features & NETIF_F_HW_CSUM) {
 786		AX_WRITE(&ax_local->ax_spi, COETCR0_DEFAULT, P4_COETCR0);
 787		AX_WRITE(&ax_local->ax_spi, COETCR1_TXPPPE, P4_COETCR1);
 788	} else {
 789		AX_WRITE(&ax_local->ax_spi, 0, P4_COETCR0);
 790		AX_WRITE(&ax_local->ax_spi, 0, P4_COETCR1);
 791	}
 792}
 793
 794static int
 795ax88796c_open(struct net_device *ndev)
 796{
 797	struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
 798	unsigned long irq_flag = 0;
 799	int fc = AX_FC_NONE;
 800	int ret;
 801	u16 t;
 802
 803	ret = request_irq(ndev->irq, ax88796c_interrupt,
 804			  irq_flag, ndev->name, ndev);
 805	if (ret) {
 806		netdev_err(ndev, "unable to get IRQ %d (errno=%d).\n",
 807			   ndev->irq, ret);
 808		return ret;
 809	}
 810
 811	mutex_lock(&ax_local->spi_lock);
 812
 813	ret = ax88796c_soft_reset(ax_local);
 814	if (ret < 0) {
 815		free_irq(ndev->irq, ndev);
 816		mutex_unlock(&ax_local->spi_lock);
 817		return ret;
 818	}
 819	ax_local->seq_num = 0x1f;
 820
 821	ax88796c_set_mac_addr(ndev);
 822	ax88796c_set_csums(ax_local);
 823
 824	/* Disable stuffing packet */
 825	t = AX_READ(&ax_local->ax_spi, P1_RXBSPCR);
 826	t &= ~RXBSPCR_STUF_ENABLE;
 827	AX_WRITE(&ax_local->ax_spi, t, P1_RXBSPCR);
 828
 829	/* Enable RX packet process */
 830	AX_WRITE(&ax_local->ax_spi, RPPER_RXEN, P1_RPPER);
 831
 832	t = AX_READ(&ax_local->ax_spi, P0_FER);
 833	t |= FER_RXEN | FER_TXEN | FER_BSWAP | FER_IRQ_PULL;
 834	AX_WRITE(&ax_local->ax_spi, t, P0_FER);
 835
 836	/* Setup LED mode */
 837	AX_WRITE(&ax_local->ax_spi,
 838		 (LCR_LED0_EN | LCR_LED0_DUPLEX | LCR_LED1_EN |
 839		 LCR_LED1_100MODE), P2_LCR0);
 840	AX_WRITE(&ax_local->ax_spi,
 841		 (AX_READ(&ax_local->ax_spi, P2_LCR1) & LCR_LED2_MASK) |
 842		 LCR_LED2_EN | LCR_LED2_LINK, P2_LCR1);
 843
 844	/* Disable PHY auto-polling */
 845	AX_WRITE(&ax_local->ax_spi, PCR_PHYID(AX88796C_PHY_ID), P2_PCR);
 846
 847	/* Enable MAC interrupts */
 848	AX_WRITE(&ax_local->ax_spi, IMR_DEFAULT, P0_IMR);
 849
 850	mutex_unlock(&ax_local->spi_lock);
 851
 852	/* Setup flow-control configuration */
 853	phy_support_asym_pause(ax_local->phydev);
 854
 855	if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
 856			      ax_local->phydev->advertising) ||
 857	    linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
 858			      ax_local->phydev->advertising))
 859		fc |= AX_FC_ANEG;
 860
 861	fc |= linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
 862				ax_local->phydev->advertising) ? AX_FC_RX : 0;
 863	fc |= (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
 864				 ax_local->phydev->advertising) !=
 865	       linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
 866				 ax_local->phydev->advertising)) ? AX_FC_TX : 0;
 867	ax_local->flowctrl = fc;
 868
 869	phy_start(ax_local->ndev->phydev);
 870
 871	netif_start_queue(ndev);
 872
 873	spi_message_init(&ax_local->ax_spi.rx_msg);
 874
 875	return 0;
 876}
 877
 878static int
 879ax88796c_close(struct net_device *ndev)
 880{
 881	struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
 882
 883	phy_stop(ndev->phydev);
 884
 885	/* We lock the mutex early not only to protect the device
 886	 * against concurrent access, but also avoid waking up the
 887	 * queue in ax88796c_work(). phy_stop() needs to be called
 888	 * before because it locks the mutex to access SPI.
 889	 */
 890	mutex_lock(&ax_local->spi_lock);
 891
 892	netif_stop_queue(ndev);
 893
 894	/* No more work can be scheduled now. Make any pending work,
 895	 * including one already waiting for the mutex to be unlocked,
 896	 * NOP.
 897	 */
 898	netif_dbg(ax_local, ifdown, ndev, "clearing bits\n");
 899	clear_bit(EVENT_SET_MULTI, &ax_local->flags);
 900	clear_bit(EVENT_INTR, &ax_local->flags);
 901	clear_bit(EVENT_TX, &ax_local->flags);
 902
 903	/* Disable MAC interrupts */
 904	AX_WRITE(&ax_local->ax_spi, IMR_MASKALL, P0_IMR);
 905	__skb_queue_purge(&ax_local->tx_wait_q);
 906	ax88796c_soft_reset(ax_local);
 907
 908	mutex_unlock(&ax_local->spi_lock);
 909
 910	cancel_work_sync(&ax_local->ax_work);
 911
 912	free_irq(ndev->irq, ndev);
 913
 914	return 0;
 915}
 916
 917static int
 918ax88796c_set_features(struct net_device *ndev, netdev_features_t features)
 919{
 920	struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
 921	netdev_features_t changed = features ^ ndev->features;
 922
 923	if (!(changed & (NETIF_F_RXCSUM | NETIF_F_HW_CSUM)))
 924		return 0;
 925
 926	ndev->features = features;
 927
 928	if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_CSUM))
 929		ax88796c_set_csums(ax_local);
 930
 931	return 0;
 932}
 933
 934static const struct net_device_ops ax88796c_netdev_ops = {
 935	.ndo_open		= ax88796c_open,
 936	.ndo_stop		= ax88796c_close,
 937	.ndo_start_xmit		= ax88796c_start_xmit,
 938	.ndo_get_stats64	= ax88796c_get_stats64,
 939	.ndo_eth_ioctl		= ax88796c_ioctl,
 940	.ndo_set_mac_address	= eth_mac_addr,
 941	.ndo_set_features	= ax88796c_set_features,
 942};
 943
 944static int ax88796c_hard_reset(struct ax88796c_device *ax_local)
 945{
 946	struct device *dev = (struct device *)&ax_local->spi->dev;
 947	struct gpio_desc *reset_gpio;
 948
 949	/* reset info */
 950	reset_gpio = gpiod_get(dev, "reset", 0);
 951	if (IS_ERR(reset_gpio)) {
 952		dev_err(dev, "Could not get 'reset' GPIO: %ld", PTR_ERR(reset_gpio));
 953		return PTR_ERR(reset_gpio);
 954	}
 955
 956	/* set reset */
 957	gpiod_direction_output(reset_gpio, 1);
 958	msleep(100);
 959	gpiod_direction_output(reset_gpio, 0);
 960	gpiod_put(reset_gpio);
 961	msleep(20);
 962
 963	return 0;
 964}
 965
 966static int ax88796c_probe(struct spi_device *spi)
 967{
 968	char phy_id[MII_BUS_ID_SIZE + 3];
 969	struct ax88796c_device *ax_local;
 970	struct net_device *ndev;
 971	u16 temp;
 972	int ret;
 973
 974	ndev = devm_alloc_etherdev(&spi->dev, sizeof(*ax_local));
 975	if (!ndev)
 976		return -ENOMEM;
 977
 978	SET_NETDEV_DEV(ndev, &spi->dev);
 979
 980	ax_local = to_ax88796c_device(ndev);
 981
 982	dev_set_drvdata(&spi->dev, ax_local);
 983	ax_local->spi = spi;
 984	ax_local->ax_spi.spi = spi;
 985
 986	ax_local->stats =
 987		devm_netdev_alloc_pcpu_stats(&spi->dev,
 988					     struct ax88796c_pcpu_stats);
 989	if (!ax_local->stats)
 990		return -ENOMEM;
 991
 992	ax_local->ndev = ndev;
 993	ax_local->priv_flags |= comp ? AX_CAP_COMP : 0;
 994	ax_local->msg_enable = msg_enable;
 995	mutex_init(&ax_local->spi_lock);
 996
 997	ax_local->mdiobus = devm_mdiobus_alloc(&spi->dev);
 998	if (!ax_local->mdiobus)
 999		return -ENOMEM;
1000
1001	ax_local->mdiobus->priv = ax_local;
1002	ax_local->mdiobus->read = ax88796c_mdio_read;
1003	ax_local->mdiobus->write = ax88796c_mdio_write;
1004	ax_local->mdiobus->name = "ax88976c-mdiobus";
1005	ax_local->mdiobus->phy_mask = (u32)~BIT(AX88796C_PHY_ID);
1006	ax_local->mdiobus->parent = &spi->dev;
1007
1008	snprintf(ax_local->mdiobus->id, MII_BUS_ID_SIZE,
1009		 "ax88796c-%s.%u", dev_name(&spi->dev), spi_get_chipselect(spi, 0));
1010
1011	ret = devm_mdiobus_register(&spi->dev, ax_local->mdiobus);
1012	if (ret < 0) {
1013		dev_err(&spi->dev, "Could not register MDIO bus\n");
1014		return ret;
1015	}
1016
1017	if (netif_msg_probe(ax_local)) {
1018		dev_info(&spi->dev, "AX88796C-SPI Configuration:\n");
1019		dev_info(&spi->dev, "    Compression : %s\n",
1020			 ax_local->priv_flags & AX_CAP_COMP ? "ON" : "OFF");
1021	}
1022
1023	ndev->irq = spi->irq;
1024	ndev->netdev_ops = &ax88796c_netdev_ops;
1025	ndev->ethtool_ops = &ax88796c_ethtool_ops;
1026	ndev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
1027	ndev->features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
1028	ndev->needed_headroom = TX_OVERHEAD;
1029	ndev->needed_tailroom = TX_EOP_SIZE;
1030
1031	mutex_lock(&ax_local->spi_lock);
1032
1033	/* ax88796c gpio reset */
1034	ax88796c_hard_reset(ax_local);
1035
1036	/* Reset AX88796C */
1037	ret = ax88796c_soft_reset(ax_local);
1038	if (ret < 0) {
1039		ret = -ENODEV;
1040		mutex_unlock(&ax_local->spi_lock);
1041		goto err;
1042	}
1043	/* Check board revision */
1044	temp = AX_READ(&ax_local->ax_spi, P2_CRIR);
1045	if ((temp & 0xF) != 0x0) {
1046		dev_err(&spi->dev, "spi read failed: %d\n", temp);
1047		ret = -ENODEV;
1048		mutex_unlock(&ax_local->spi_lock);
1049		goto err;
1050	}
1051
1052	/*Reload EEPROM*/
1053	ax88796c_reload_eeprom(ax_local);
1054
1055	ax88796c_load_mac_addr(ndev);
1056
1057	if (netif_msg_probe(ax_local))
1058		dev_info(&spi->dev,
1059			 "irq %d, MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
1060			 ndev->irq,
1061			 ndev->dev_addr[0], ndev->dev_addr[1],
1062			 ndev->dev_addr[2], ndev->dev_addr[3],
1063			 ndev->dev_addr[4], ndev->dev_addr[5]);
1064
1065	/* Disable power saving */
1066	AX_WRITE(&ax_local->ax_spi, (AX_READ(&ax_local->ax_spi, P0_PSCR)
1067				     & PSCR_PS_MASK) | PSCR_PS_D0, P0_PSCR);
1068
1069	mutex_unlock(&ax_local->spi_lock);
1070
1071	INIT_WORK(&ax_local->ax_work, ax88796c_work);
1072
1073	skb_queue_head_init(&ax_local->tx_wait_q);
1074
1075	snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
1076		 ax_local->mdiobus->id, AX88796C_PHY_ID);
1077	ax_local->phydev = phy_connect(ax_local->ndev, phy_id,
1078				       ax88796c_handle_link_change,
1079				       PHY_INTERFACE_MODE_MII);
1080	if (IS_ERR(ax_local->phydev)) {
1081		ret = PTR_ERR(ax_local->phydev);
1082		goto err;
1083	}
1084	ax_local->phydev->irq = PHY_POLL;
1085
1086	ret = devm_register_netdev(&spi->dev, ndev);
1087	if (ret) {
1088		dev_err(&spi->dev, "failed to register a network device\n");
1089		goto err_phy_dis;
1090	}
1091
1092	netif_info(ax_local, probe, ndev, "%s %s registered\n",
1093		   dev_driver_string(&spi->dev),
1094		   dev_name(&spi->dev));
1095	phy_attached_info(ax_local->phydev);
1096
1097	return 0;
1098
1099err_phy_dis:
1100	phy_disconnect(ax_local->phydev);
1101err:
1102	return ret;
1103}
1104
1105static void ax88796c_remove(struct spi_device *spi)
1106{
1107	struct ax88796c_device *ax_local = dev_get_drvdata(&spi->dev);
1108	struct net_device *ndev = ax_local->ndev;
1109
1110	phy_disconnect(ndev->phydev);
1111
1112	netif_info(ax_local, probe, ndev, "removing network device %s %s\n",
1113		   dev_driver_string(&spi->dev),
1114		   dev_name(&spi->dev));
1115}
1116
1117#ifdef CONFIG_OF
1118static const struct of_device_id ax88796c_dt_ids[] = {
1119	{ .compatible = "asix,ax88796c" },
1120	{},
1121};
1122MODULE_DEVICE_TABLE(of, ax88796c_dt_ids);
1123#endif
1124
1125static const struct spi_device_id asix_id[] = {
1126	{ "ax88796c", 0 },
1127	{ }
1128};
1129MODULE_DEVICE_TABLE(spi, asix_id);
1130
1131static struct spi_driver ax88796c_spi_driver = {
1132	.driver = {
1133		.name = DRV_NAME,
1134		.of_match_table = of_match_ptr(ax88796c_dt_ids),
1135	},
1136	.probe = ax88796c_probe,
1137	.remove = ax88796c_remove,
1138	.id_table = asix_id,
1139};
1140
1141static __init int ax88796c_spi_init(void)
1142{
1143	int ret;
1144
1145	bitmap_zero(ax88796c_no_regs_mask, AX88796C_REGDUMP_LEN);
1146	ret = bitmap_parse(no_regs_list, 35,
1147			   ax88796c_no_regs_mask, AX88796C_REGDUMP_LEN);
1148	if (ret) {
1149		bitmap_fill(ax88796c_no_regs_mask, AX88796C_REGDUMP_LEN);
1150		pr_err("Invalid bitmap description, masking all registers\n");
1151	}
1152
1153	return spi_register_driver(&ax88796c_spi_driver);
1154}
1155
1156static __exit void ax88796c_spi_exit(void)
1157{
1158	spi_unregister_driver(&ax88796c_spi_driver);
1159}
1160
1161module_init(ax88796c_spi_init);
1162module_exit(ax88796c_spi_exit);
1163
1164MODULE_AUTHOR("Ɓukasz Stelmach <l.stelmach@samsung.com>");
1165MODULE_DESCRIPTION("ASIX AX88796C SPI Ethernet driver");
1166MODULE_LICENSE("GPL");