Linux Audio

Check our new training course

Buildroot integration, development and maintenance

Need a Buildroot system for your embedded project?
Loading...
Note: File does not exist in v6.8.
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) ST-Ericsson AB 2010
   4 * Author:  Daniel Martensson
   5 *	    Dmitry.Tarnyagin  / dmitry.tarnyagin@lockless.no
   6 */
   7
   8#define pr_fmt(fmt) KBUILD_MODNAME fmt
   9
  10#include <linux/init.h>
  11#include <linux/module.h>
  12#include <linux/device.h>
  13#include <linux/netdevice.h>
  14#include <linux/string.h>
  15#include <linux/list.h>
  16#include <linux/interrupt.h>
  17#include <linux/delay.h>
  18#include <linux/sched.h>
  19#include <linux/if_arp.h>
  20#include <linux/timer.h>
  21#include <net/rtnetlink.h>
  22#include <linux/pkt_sched.h>
  23#include <net/caif/caif_layer.h>
  24#include <net/caif/caif_hsi.h>
  25
  26MODULE_LICENSE("GPL");
  27MODULE_AUTHOR("Daniel Martensson");
  28MODULE_DESCRIPTION("CAIF HSI driver");
  29
  30/* Returns the number of padding bytes for alignment. */
  31#define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
  32				(((pow)-((x)&((pow)-1)))))
  33
  34static const struct cfhsi_config  hsi_default_config = {
  35
  36	/* Inactivity timeout on HSI, ms */
  37	.inactivity_timeout = HZ,
  38
  39	/* Aggregation timeout (ms) of zero means no aggregation is done*/
  40	.aggregation_timeout = 1,
  41
  42	/*
  43	 * HSI link layer flow-control thresholds.
  44	 * Threshold values for the HSI packet queue. Flow-control will be
  45	 * asserted when the number of packets exceeds q_high_mark. It will
  46	 * not be de-asserted before the number of packets drops below
  47	 * q_low_mark.
  48	 * Warning: A high threshold value might increase throughput but it
  49	 * will at the same time prevent channel prioritization and increase
  50	 * the risk of flooding the modem. The high threshold should be above
  51	 * the low.
  52	 */
  53	.q_high_mark = 100,
  54	.q_low_mark = 50,
  55
  56	/*
  57	 * HSI padding options.
  58	 * Warning: must be a base of 2 (& operation used) and can not be zero !
  59	 */
  60	.head_align = 4,
  61	.tail_align = 4,
  62};
  63
  64#define ON 1
  65#define OFF 0
  66
  67static LIST_HEAD(cfhsi_list);
  68
  69static void cfhsi_inactivity_tout(struct timer_list *t)
  70{
  71	struct cfhsi *cfhsi = from_timer(cfhsi, t, inactivity_timer);
  72
  73	netdev_dbg(cfhsi->ndev, "%s.\n",
  74		__func__);
  75
  76	/* Schedule power down work queue. */
  77	if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
  78		queue_work(cfhsi->wq, &cfhsi->wake_down_work);
  79}
  80
  81static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi,
  82					   const struct sk_buff *skb,
  83					   int direction)
  84{
  85	struct caif_payload_info *info;
  86	int hpad, tpad, len;
  87
  88	info = (struct caif_payload_info *)&skb->cb;
  89	hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
  90	tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
  91	len = skb->len + hpad + tpad;
  92
  93	if (direction > 0)
  94		cfhsi->aggregation_len += len;
  95	else if (direction < 0)
  96		cfhsi->aggregation_len -= len;
  97}
  98
  99static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi)
 100{
 101	int i;
 102
 103	if (cfhsi->cfg.aggregation_timeout == 0)
 104		return true;
 105
 106	for (i = 0; i < CFHSI_PRIO_BEBK; ++i) {
 107		if (cfhsi->qhead[i].qlen)
 108			return true;
 109	}
 110
 111	/* TODO: Use aggregation_len instead */
 112	if (cfhsi->qhead[CFHSI_PRIO_BEBK].qlen >= CFHSI_MAX_PKTS)
 113		return true;
 114
 115	return false;
 116}
 117
 118static struct sk_buff *cfhsi_dequeue(struct cfhsi *cfhsi)
 119{
 120	struct sk_buff *skb;
 121	int i;
 122
 123	for (i = 0; i < CFHSI_PRIO_LAST; ++i) {
 124		skb = skb_dequeue(&cfhsi->qhead[i]);
 125		if (skb)
 126			break;
 127	}
 128
 129	return skb;
 130}
 131
 132static int cfhsi_tx_queue_len(struct cfhsi *cfhsi)
 133{
 134	int i, len = 0;
 135	for (i = 0; i < CFHSI_PRIO_LAST; ++i)
 136		len += skb_queue_len(&cfhsi->qhead[i]);
 137	return len;
 138}
 139
 140static void cfhsi_abort_tx(struct cfhsi *cfhsi)
 141{
 142	struct sk_buff *skb;
 143
 144	for (;;) {
 145		spin_lock_bh(&cfhsi->lock);
 146		skb = cfhsi_dequeue(cfhsi);
 147		if (!skb)
 148			break;
 149
 150		cfhsi->ndev->stats.tx_errors++;
 151		cfhsi->ndev->stats.tx_dropped++;
 152		cfhsi_update_aggregation_stats(cfhsi, skb, -1);
 153		spin_unlock_bh(&cfhsi->lock);
 154		kfree_skb(skb);
 155	}
 156	cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
 157	if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 158		mod_timer(&cfhsi->inactivity_timer,
 159			jiffies + cfhsi->cfg.inactivity_timeout);
 160	spin_unlock_bh(&cfhsi->lock);
 161}
 162
 163static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
 164{
 165	char buffer[32]; /* Any reasonable value */
 166	size_t fifo_occupancy;
 167	int ret;
 168
 169	netdev_dbg(cfhsi->ndev, "%s.\n",
 170		__func__);
 171
 172	do {
 173		ret = cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
 174				&fifo_occupancy);
 175		if (ret) {
 176			netdev_warn(cfhsi->ndev,
 177				"%s: can't get FIFO occupancy: %d.\n",
 178				__func__, ret);
 179			break;
 180		} else if (!fifo_occupancy)
 181			/* No more data, exitting normally */
 182			break;
 183
 184		fifo_occupancy = min(sizeof(buffer), fifo_occupancy);
 185		set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
 186		ret = cfhsi->ops->cfhsi_rx(buffer, fifo_occupancy,
 187				cfhsi->ops);
 188		if (ret) {
 189			clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
 190			netdev_warn(cfhsi->ndev,
 191				"%s: can't read data: %d.\n",
 192				__func__, ret);
 193			break;
 194		}
 195
 196		ret = 5 * HZ;
 197		ret = wait_event_interruptible_timeout(cfhsi->flush_fifo_wait,
 198			 !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);
 199
 200		if (ret < 0) {
 201			netdev_warn(cfhsi->ndev,
 202				"%s: can't wait for flush complete: %d.\n",
 203				__func__, ret);
 204			break;
 205		} else if (!ret) {
 206			ret = -ETIMEDOUT;
 207			netdev_warn(cfhsi->ndev,
 208				"%s: timeout waiting for flush complete.\n",
 209				__func__);
 210			break;
 211		}
 212	} while (1);
 213
 214	return ret;
 215}
 216
 217static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
 218{
 219	int nfrms = 0;
 220	int pld_len = 0;
 221	struct sk_buff *skb;
 222	u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
 223
 224	skb = cfhsi_dequeue(cfhsi);
 225	if (!skb)
 226		return 0;
 227
 228	/* Clear offset. */
 229	desc->offset = 0;
 230
 231	/* Check if we can embed a CAIF frame. */
 232	if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
 233		struct caif_payload_info *info;
 234		int hpad;
 235		int tpad;
 236
 237		/* Calculate needed head alignment and tail alignment. */
 238		info = (struct caif_payload_info *)&skb->cb;
 239
 240		hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
 241		tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
 242
 243		/* Check if frame still fits with added alignment. */
 244		if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) {
 245			u8 *pemb = desc->emb_frm;
 246			desc->offset = CFHSI_DESC_SHORT_SZ;
 247			*pemb = (u8)(hpad - 1);
 248			pemb += hpad;
 249
 250			/* Update network statistics. */
 251			spin_lock_bh(&cfhsi->lock);
 252			cfhsi->ndev->stats.tx_packets++;
 253			cfhsi->ndev->stats.tx_bytes += skb->len;
 254			cfhsi_update_aggregation_stats(cfhsi, skb, -1);
 255			spin_unlock_bh(&cfhsi->lock);
 256
 257			/* Copy in embedded CAIF frame. */
 258			skb_copy_bits(skb, 0, pemb, skb->len);
 259
 260			/* Consume the SKB */
 261			consume_skb(skb);
 262			skb = NULL;
 263		}
 264	}
 265
 266	/* Create payload CAIF frames. */
 267	while (nfrms < CFHSI_MAX_PKTS) {
 268		struct caif_payload_info *info;
 269		int hpad;
 270		int tpad;
 271
 272		if (!skb)
 273			skb = cfhsi_dequeue(cfhsi);
 274
 275		if (!skb)
 276			break;
 277
 278		/* Calculate needed head alignment and tail alignment. */
 279		info = (struct caif_payload_info *)&skb->cb;
 280
 281		hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
 282		tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
 283
 284		/* Fill in CAIF frame length in descriptor. */
 285		desc->cffrm_len[nfrms] = hpad + skb->len + tpad;
 286
 287		/* Fill head padding information. */
 288		*pfrm = (u8)(hpad - 1);
 289		pfrm += hpad;
 290
 291		/* Update network statistics. */
 292		spin_lock_bh(&cfhsi->lock);
 293		cfhsi->ndev->stats.tx_packets++;
 294		cfhsi->ndev->stats.tx_bytes += skb->len;
 295		cfhsi_update_aggregation_stats(cfhsi, skb, -1);
 296		spin_unlock_bh(&cfhsi->lock);
 297
 298		/* Copy in CAIF frame. */
 299		skb_copy_bits(skb, 0, pfrm, skb->len);
 300
 301		/* Update payload length. */
 302		pld_len += desc->cffrm_len[nfrms];
 303
 304		/* Update frame pointer. */
 305		pfrm += skb->len + tpad;
 306
 307		/* Consume the SKB */
 308		consume_skb(skb);
 309		skb = NULL;
 310
 311		/* Update number of frames. */
 312		nfrms++;
 313	}
 314
 315	/* Unused length fields should be zero-filled (according to SPEC). */
 316	while (nfrms < CFHSI_MAX_PKTS) {
 317		desc->cffrm_len[nfrms] = 0x0000;
 318		nfrms++;
 319	}
 320
 321	/* Check if we can piggy-back another descriptor. */
 322	if (cfhsi_can_send_aggregate(cfhsi))
 323		desc->header |= CFHSI_PIGGY_DESC;
 324	else
 325		desc->header &= ~CFHSI_PIGGY_DESC;
 326
 327	return CFHSI_DESC_SZ + pld_len;
 328}
 329
 330static void cfhsi_start_tx(struct cfhsi *cfhsi)
 331{
 332	struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
 333	int len, res;
 334
 335	netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
 336
 337	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 338		return;
 339
 340	do {
 341		/* Create HSI frame. */
 342		len = cfhsi_tx_frm(desc, cfhsi);
 343		if (!len) {
 344			spin_lock_bh(&cfhsi->lock);
 345			if (unlikely(cfhsi_tx_queue_len(cfhsi))) {
 346				spin_unlock_bh(&cfhsi->lock);
 347				res = -EAGAIN;
 348				continue;
 349			}
 350			cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
 351			/* Start inactivity timer. */
 352			mod_timer(&cfhsi->inactivity_timer,
 353				jiffies + cfhsi->cfg.inactivity_timeout);
 354			spin_unlock_bh(&cfhsi->lock);
 355			break;
 356		}
 357
 358		/* Set up new transfer. */
 359		res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
 360		if (WARN_ON(res < 0))
 361			netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
 362				__func__, res);
 363	} while (res < 0);
 364}
 365
 366static void cfhsi_tx_done(struct cfhsi *cfhsi)
 367{
 368	netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
 369
 370	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 371		return;
 372
 373	/*
 374	 * Send flow on if flow off has been previously signalled
 375	 * and number of packets is below low water mark.
 376	 */
 377	spin_lock_bh(&cfhsi->lock);
 378	if (cfhsi->flow_off_sent &&
 379			cfhsi_tx_queue_len(cfhsi) <= cfhsi->cfg.q_low_mark &&
 380			cfhsi->cfdev.flowctrl) {
 381
 382		cfhsi->flow_off_sent = 0;
 383		cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
 384	}
 385
 386	if (cfhsi_can_send_aggregate(cfhsi)) {
 387		spin_unlock_bh(&cfhsi->lock);
 388		cfhsi_start_tx(cfhsi);
 389	} else {
 390		mod_timer(&cfhsi->aggregation_timer,
 391			jiffies + cfhsi->cfg.aggregation_timeout);
 392		spin_unlock_bh(&cfhsi->lock);
 393	}
 394
 395	return;
 396}
 397
 398static void cfhsi_tx_done_cb(struct cfhsi_cb_ops *cb_ops)
 399{
 400	struct cfhsi *cfhsi;
 401
 402	cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
 403	netdev_dbg(cfhsi->ndev, "%s.\n",
 404		__func__);
 405
 406	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 407		return;
 408	cfhsi_tx_done(cfhsi);
 409}
 410
 411static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
 412{
 413	int xfer_sz = 0;
 414	int nfrms = 0;
 415	u16 *plen = NULL;
 416	u8 *pfrm = NULL;
 417
 418	if ((desc->header & ~CFHSI_PIGGY_DESC) ||
 419			(desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
 420		netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
 421			__func__);
 422		return -EPROTO;
 423	}
 424
 425	/* Check for embedded CAIF frame. */
 426	if (desc->offset) {
 427		struct sk_buff *skb;
 428		int len = 0;
 429		pfrm = ((u8 *)desc) + desc->offset;
 430
 431		/* Remove offset padding. */
 432		pfrm += *pfrm + 1;
 433
 434		/* Read length of CAIF frame (little endian). */
 435		len = *pfrm;
 436		len |= ((*(pfrm+1)) << 8) & 0xFF00;
 437		len += 2;	/* Add FCS fields. */
 438
 439		/* Sanity check length of CAIF frame. */
 440		if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
 441			netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
 442				__func__);
 443			return -EPROTO;
 444		}
 445
 446		/* Allocate SKB (OK even in IRQ context). */
 447		skb = alloc_skb(len + 1, GFP_ATOMIC);
 448		if (!skb) {
 449			netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
 450				__func__);
 451			return -ENOMEM;
 452		}
 453		caif_assert(skb != NULL);
 454
 455		skb_put_data(skb, pfrm, len);
 456
 457		skb->protocol = htons(ETH_P_CAIF);
 458		skb_reset_mac_header(skb);
 459		skb->dev = cfhsi->ndev;
 460
 461		/*
 462		 * We are in a callback handler and
 463		 * unfortunately we don't know what context we're
 464		 * running in.
 465		 */
 466		if (in_interrupt())
 467			netif_rx(skb);
 468		else
 469			netif_rx_ni(skb);
 470
 471		/* Update network statistics. */
 472		cfhsi->ndev->stats.rx_packets++;
 473		cfhsi->ndev->stats.rx_bytes += len;
 474	}
 475
 476	/* Calculate transfer length. */
 477	plen = desc->cffrm_len;
 478	while (nfrms < CFHSI_MAX_PKTS && *plen) {
 479		xfer_sz += *plen;
 480		plen++;
 481		nfrms++;
 482	}
 483
 484	/* Check for piggy-backed descriptor. */
 485	if (desc->header & CFHSI_PIGGY_DESC)
 486		xfer_sz += CFHSI_DESC_SZ;
 487
 488	if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) {
 489		netdev_err(cfhsi->ndev,
 490				"%s: Invalid payload len: %d, ignored.\n",
 491			__func__, xfer_sz);
 492		return -EPROTO;
 493	}
 494	return xfer_sz;
 495}
 496
 497static int cfhsi_rx_desc_len(struct cfhsi_desc *desc)
 498{
 499	int xfer_sz = 0;
 500	int nfrms = 0;
 501	u16 *plen;
 502
 503	if ((desc->header & ~CFHSI_PIGGY_DESC) ||
 504			(desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
 505
 506		pr_err("Invalid descriptor. %x %x\n", desc->header,
 507				desc->offset);
 508		return -EPROTO;
 509	}
 510
 511	/* Calculate transfer length. */
 512	plen = desc->cffrm_len;
 513	while (nfrms < CFHSI_MAX_PKTS && *plen) {
 514		xfer_sz += *plen;
 515		plen++;
 516		nfrms++;
 517	}
 518
 519	if (xfer_sz % 4) {
 520		pr_err("Invalid payload len: %d, ignored.\n", xfer_sz);
 521		return -EPROTO;
 522	}
 523	return xfer_sz;
 524}
 525
 526static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
 527{
 528	int rx_sz = 0;
 529	int nfrms = 0;
 530	u16 *plen = NULL;
 531	u8 *pfrm = NULL;
 532
 533	/* Sanity check header and offset. */
 534	if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) ||
 535			(desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
 536		netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
 537			__func__);
 538		return -EPROTO;
 539	}
 540
 541	/* Set frame pointer to start of payload. */
 542	pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
 543	plen = desc->cffrm_len;
 544
 545	/* Skip already processed frames. */
 546	while (nfrms < cfhsi->rx_state.nfrms) {
 547		pfrm += *plen;
 548		rx_sz += *plen;
 549		plen++;
 550		nfrms++;
 551	}
 552
 553	/* Parse payload. */
 554	while (nfrms < CFHSI_MAX_PKTS && *plen) {
 555		struct sk_buff *skb;
 556		u8 *pcffrm = NULL;
 557		int len;
 558
 559		/* CAIF frame starts after head padding. */
 560		pcffrm = pfrm + *pfrm + 1;
 561
 562		/* Read length of CAIF frame (little endian). */
 563		len = *pcffrm;
 564		len |= ((*(pcffrm + 1)) << 8) & 0xFF00;
 565		len += 2;	/* Add FCS fields. */
 566
 567		/* Sanity check length of CAIF frames. */
 568		if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
 569			netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
 570				__func__);
 571			return -EPROTO;
 572		}
 573
 574		/* Allocate SKB (OK even in IRQ context). */
 575		skb = alloc_skb(len + 1, GFP_ATOMIC);
 576		if (!skb) {
 577			netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
 578				__func__);
 579			cfhsi->rx_state.nfrms = nfrms;
 580			return -ENOMEM;
 581		}
 582		caif_assert(skb != NULL);
 583
 584		skb_put_data(skb, pcffrm, len);
 585
 586		skb->protocol = htons(ETH_P_CAIF);
 587		skb_reset_mac_header(skb);
 588		skb->dev = cfhsi->ndev;
 589
 590		/*
 591		 * We're called in callback from HSI
 592		 * and don't know the context we're running in.
 593		 */
 594		if (in_interrupt())
 595			netif_rx(skb);
 596		else
 597			netif_rx_ni(skb);
 598
 599		/* Update network statistics. */
 600		cfhsi->ndev->stats.rx_packets++;
 601		cfhsi->ndev->stats.rx_bytes += len;
 602
 603		pfrm += *plen;
 604		rx_sz += *plen;
 605		plen++;
 606		nfrms++;
 607	}
 608
 609	return rx_sz;
 610}
 611
 612static void cfhsi_rx_done(struct cfhsi *cfhsi)
 613{
 614	int res;
 615	int desc_pld_len = 0, rx_len, rx_state;
 616	struct cfhsi_desc *desc = NULL;
 617	u8 *rx_ptr, *rx_buf;
 618	struct cfhsi_desc *piggy_desc = NULL;
 619
 620	desc = (struct cfhsi_desc *)cfhsi->rx_buf;
 621
 622	netdev_dbg(cfhsi->ndev, "%s\n", __func__);
 623
 624	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 625		return;
 626
 627	/* Update inactivity timer if pending. */
 628	spin_lock_bh(&cfhsi->lock);
 629	mod_timer_pending(&cfhsi->inactivity_timer,
 630			jiffies + cfhsi->cfg.inactivity_timeout);
 631	spin_unlock_bh(&cfhsi->lock);
 632
 633	if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
 634		desc_pld_len = cfhsi_rx_desc_len(desc);
 635
 636		if (desc_pld_len < 0)
 637			goto out_of_sync;
 638
 639		rx_buf = cfhsi->rx_buf;
 640		rx_len = desc_pld_len;
 641		if (desc_pld_len > 0 && (desc->header & CFHSI_PIGGY_DESC))
 642			rx_len += CFHSI_DESC_SZ;
 643		if (desc_pld_len == 0)
 644			rx_buf = cfhsi->rx_flip_buf;
 645	} else {
 646		rx_buf = cfhsi->rx_flip_buf;
 647
 648		rx_len = CFHSI_DESC_SZ;
 649		if (cfhsi->rx_state.pld_len > 0 &&
 650				(desc->header & CFHSI_PIGGY_DESC)) {
 651
 652			piggy_desc = (struct cfhsi_desc *)
 653				(desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ +
 654						cfhsi->rx_state.pld_len);
 655
 656			cfhsi->rx_state.piggy_desc = true;
 657
 658			/* Extract payload len from piggy-backed descriptor. */
 659			desc_pld_len = cfhsi_rx_desc_len(piggy_desc);
 660			if (desc_pld_len < 0)
 661				goto out_of_sync;
 662
 663			if (desc_pld_len > 0) {
 664				rx_len = desc_pld_len;
 665				if (piggy_desc->header & CFHSI_PIGGY_DESC)
 666					rx_len += CFHSI_DESC_SZ;
 667			}
 668
 669			/*
 670			 * Copy needed information from the piggy-backed
 671			 * descriptor to the descriptor in the start.
 672			 */
 673			memcpy(rx_buf, (u8 *)piggy_desc,
 674					CFHSI_DESC_SHORT_SZ);
 675		}
 676	}
 677
 678	if (desc_pld_len) {
 679		rx_state = CFHSI_RX_STATE_PAYLOAD;
 680		rx_ptr = rx_buf + CFHSI_DESC_SZ;
 681	} else {
 682		rx_state = CFHSI_RX_STATE_DESC;
 683		rx_ptr = rx_buf;
 684		rx_len = CFHSI_DESC_SZ;
 685	}
 686
 687	/* Initiate next read */
 688	if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
 689		/* Set up new transfer. */
 690		netdev_dbg(cfhsi->ndev, "%s: Start RX.\n",
 691				__func__);
 692
 693		res = cfhsi->ops->cfhsi_rx(rx_ptr, rx_len,
 694				cfhsi->ops);
 695		if (WARN_ON(res < 0)) {
 696			netdev_err(cfhsi->ndev, "%s: RX error %d.\n",
 697				__func__, res);
 698			cfhsi->ndev->stats.rx_errors++;
 699			cfhsi->ndev->stats.rx_dropped++;
 700		}
 701	}
 702
 703	if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
 704		/* Extract payload from descriptor */
 705		if (cfhsi_rx_desc(desc, cfhsi) < 0)
 706			goto out_of_sync;
 707	} else {
 708		/* Extract payload */
 709		if (cfhsi_rx_pld(desc, cfhsi) < 0)
 710			goto out_of_sync;
 711		if (piggy_desc) {
 712			/* Extract any payload in piggyback descriptor. */
 713			if (cfhsi_rx_desc(piggy_desc, cfhsi) < 0)
 714				goto out_of_sync;
 715			/* Mark no embedded frame after extracting it */
 716			piggy_desc->offset = 0;
 717		}
 718	}
 719
 720	/* Update state info */
 721	memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state));
 722	cfhsi->rx_state.state = rx_state;
 723	cfhsi->rx_ptr = rx_ptr;
 724	cfhsi->rx_len = rx_len;
 725	cfhsi->rx_state.pld_len = desc_pld_len;
 726	cfhsi->rx_state.piggy_desc = desc->header & CFHSI_PIGGY_DESC;
 727
 728	if (rx_buf != cfhsi->rx_buf)
 729		swap(cfhsi->rx_buf, cfhsi->rx_flip_buf);
 730	return;
 731
 732out_of_sync:
 733	netdev_err(cfhsi->ndev, "%s: Out of sync.\n", __func__);
 734	print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE,
 735			cfhsi->rx_buf, CFHSI_DESC_SZ);
 736	schedule_work(&cfhsi->out_of_sync_work);
 737}
 738
 739static void cfhsi_rx_slowpath(struct timer_list *t)
 740{
 741	struct cfhsi *cfhsi = from_timer(cfhsi, t, rx_slowpath_timer);
 742
 743	netdev_dbg(cfhsi->ndev, "%s.\n",
 744		__func__);
 745
 746	cfhsi_rx_done(cfhsi);
 747}
 748
 749static void cfhsi_rx_done_cb(struct cfhsi_cb_ops *cb_ops)
 750{
 751	struct cfhsi *cfhsi;
 752
 753	cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
 754	netdev_dbg(cfhsi->ndev, "%s.\n",
 755		__func__);
 756
 757	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 758		return;
 759
 760	if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits))
 761		wake_up_interruptible(&cfhsi->flush_fifo_wait);
 762	else
 763		cfhsi_rx_done(cfhsi);
 764}
 765
 766static void cfhsi_wake_up(struct work_struct *work)
 767{
 768	struct cfhsi *cfhsi = NULL;
 769	int res;
 770	int len;
 771	long ret;
 772
 773	cfhsi = container_of(work, struct cfhsi, wake_up_work);
 774
 775	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 776		return;
 777
 778	if (unlikely(test_bit(CFHSI_AWAKE, &cfhsi->bits))) {
 779		/* It happenes when wakeup is requested by
 780		 * both ends at the same time. */
 781		clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
 782		clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
 783		return;
 784	}
 785
 786	/* Activate wake line. */
 787	cfhsi->ops->cfhsi_wake_up(cfhsi->ops);
 788
 789	netdev_dbg(cfhsi->ndev, "%s: Start waiting.\n",
 790		__func__);
 791
 792	/* Wait for acknowledge. */
 793	ret = CFHSI_WAKE_TOUT;
 794	ret = wait_event_interruptible_timeout(cfhsi->wake_up_wait,
 795					test_and_clear_bit(CFHSI_WAKE_UP_ACK,
 796							&cfhsi->bits), ret);
 797	if (unlikely(ret < 0)) {
 798		/* Interrupted by signal. */
 799		netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
 800			__func__, ret);
 801
 802		clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
 803		cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
 804		return;
 805	} else if (!ret) {
 806		bool ca_wake = false;
 807		size_t fifo_occupancy = 0;
 808
 809		/* Wakeup timeout */
 810		netdev_dbg(cfhsi->ndev, "%s: Timeout.\n",
 811			__func__);
 812
 813		/* Check FIFO to check if modem has sent something. */
 814		WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
 815					&fifo_occupancy));
 816
 817		netdev_dbg(cfhsi->ndev, "%s: Bytes in FIFO: %u.\n",
 818				__func__, (unsigned) fifo_occupancy);
 819
 820		/* Check if we misssed the interrupt. */
 821		WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
 822							&ca_wake));
 823
 824		if (ca_wake) {
 825			netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
 826				__func__);
 827
 828			/* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */
 829			clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
 830
 831			/* Continue execution. */
 832			goto wake_ack;
 833		}
 834
 835		clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
 836		cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
 837		return;
 838	}
 839wake_ack:
 840	netdev_dbg(cfhsi->ndev, "%s: Woken.\n",
 841		__func__);
 842
 843	/* Clear power up bit. */
 844	set_bit(CFHSI_AWAKE, &cfhsi->bits);
 845	clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
 846
 847	/* Resume read operation. */
 848	netdev_dbg(cfhsi->ndev, "%s: Start RX.\n", __func__);
 849	res = cfhsi->ops->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->ops);
 850
 851	if (WARN_ON(res < 0))
 852		netdev_err(cfhsi->ndev, "%s: RX err %d.\n", __func__, res);
 853
 854	/* Clear power up acknowledment. */
 855	clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
 856
 857	spin_lock_bh(&cfhsi->lock);
 858
 859	/* Resume transmit if queues are not empty. */
 860	if (!cfhsi_tx_queue_len(cfhsi)) {
 861		netdev_dbg(cfhsi->ndev, "%s: Peer wake, start timer.\n",
 862			__func__);
 863		/* Start inactivity timer. */
 864		mod_timer(&cfhsi->inactivity_timer,
 865				jiffies + cfhsi->cfg.inactivity_timeout);
 866		spin_unlock_bh(&cfhsi->lock);
 867		return;
 868	}
 869
 870	netdev_dbg(cfhsi->ndev, "%s: Host wake.\n",
 871		__func__);
 872
 873	spin_unlock_bh(&cfhsi->lock);
 874
 875	/* Create HSI frame. */
 876	len = cfhsi_tx_frm((struct cfhsi_desc *)cfhsi->tx_buf, cfhsi);
 877
 878	if (likely(len > 0)) {
 879		/* Set up new transfer. */
 880		res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
 881		if (WARN_ON(res < 0)) {
 882			netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
 883				__func__, res);
 884			cfhsi_abort_tx(cfhsi);
 885		}
 886	} else {
 887		netdev_err(cfhsi->ndev,
 888				"%s: Failed to create HSI frame: %d.\n",
 889				__func__, len);
 890	}
 891}
 892
 893static void cfhsi_wake_down(struct work_struct *work)
 894{
 895	long ret;
 896	struct cfhsi *cfhsi = NULL;
 897	size_t fifo_occupancy = 0;
 898	int retry = CFHSI_WAKE_TOUT;
 899
 900	cfhsi = container_of(work, struct cfhsi, wake_down_work);
 901	netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
 902
 903	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 904		return;
 905
 906	/* Deactivate wake line. */
 907	cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
 908
 909	/* Wait for acknowledge. */
 910	ret = CFHSI_WAKE_TOUT;
 911	ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait,
 912					test_and_clear_bit(CFHSI_WAKE_DOWN_ACK,
 913							&cfhsi->bits), ret);
 914	if (ret < 0) {
 915		/* Interrupted by signal. */
 916		netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
 917			__func__, ret);
 918		return;
 919	} else if (!ret) {
 920		bool ca_wake = true;
 921
 922		/* Timeout */
 923		netdev_err(cfhsi->ndev, "%s: Timeout.\n", __func__);
 924
 925		/* Check if we misssed the interrupt. */
 926		WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
 927							&ca_wake));
 928		if (!ca_wake)
 929			netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
 930				__func__);
 931	}
 932
 933	/* Check FIFO occupancy. */
 934	while (retry) {
 935		WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
 936							&fifo_occupancy));
 937
 938		if (!fifo_occupancy)
 939			break;
 940
 941		set_current_state(TASK_INTERRUPTIBLE);
 942		schedule_timeout(1);
 943		retry--;
 944	}
 945
 946	if (!retry)
 947		netdev_err(cfhsi->ndev, "%s: FIFO Timeout.\n", __func__);
 948
 949	/* Clear AWAKE condition. */
 950	clear_bit(CFHSI_AWAKE, &cfhsi->bits);
 951
 952	/* Cancel pending RX requests. */
 953	cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
 954}
 955
 956static void cfhsi_out_of_sync(struct work_struct *work)
 957{
 958	struct cfhsi *cfhsi = NULL;
 959
 960	cfhsi = container_of(work, struct cfhsi, out_of_sync_work);
 961
 962	rtnl_lock();
 963	dev_close(cfhsi->ndev);
 964	rtnl_unlock();
 965}
 966
 967static void cfhsi_wake_up_cb(struct cfhsi_cb_ops *cb_ops)
 968{
 969	struct cfhsi *cfhsi = NULL;
 970
 971	cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
 972	netdev_dbg(cfhsi->ndev, "%s.\n",
 973		__func__);
 974
 975	set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
 976	wake_up_interruptible(&cfhsi->wake_up_wait);
 977
 978	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 979		return;
 980
 981	/* Schedule wake up work queue if the peer initiates. */
 982	if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
 983		queue_work(cfhsi->wq, &cfhsi->wake_up_work);
 984}
 985
 986static void cfhsi_wake_down_cb(struct cfhsi_cb_ops *cb_ops)
 987{
 988	struct cfhsi *cfhsi = NULL;
 989
 990	cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
 991	netdev_dbg(cfhsi->ndev, "%s.\n",
 992		__func__);
 993
 994	/* Initiating low power is only permitted by the host (us). */
 995	set_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
 996	wake_up_interruptible(&cfhsi->wake_down_wait);
 997}
 998
 999static void cfhsi_aggregation_tout(struct timer_list *t)
1000{
1001	struct cfhsi *cfhsi = from_timer(cfhsi, t, aggregation_timer);
1002
1003	netdev_dbg(cfhsi->ndev, "%s.\n",
1004		__func__);
1005
1006	cfhsi_start_tx(cfhsi);
1007}
1008
1009static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
1010{
1011	struct cfhsi *cfhsi = NULL;
1012	int start_xfer = 0;
1013	int timer_active;
1014	int prio;
1015
1016	if (!dev)
1017		return -EINVAL;
1018
1019	cfhsi = netdev_priv(dev);
1020
1021	switch (skb->priority) {
1022	case TC_PRIO_BESTEFFORT:
1023	case TC_PRIO_FILLER:
1024	case TC_PRIO_BULK:
1025		prio = CFHSI_PRIO_BEBK;
1026		break;
1027	case TC_PRIO_INTERACTIVE_BULK:
1028		prio = CFHSI_PRIO_VI;
1029		break;
1030	case TC_PRIO_INTERACTIVE:
1031		prio = CFHSI_PRIO_VO;
1032		break;
1033	case TC_PRIO_CONTROL:
1034	default:
1035		prio = CFHSI_PRIO_CTL;
1036		break;
1037	}
1038
1039	spin_lock_bh(&cfhsi->lock);
1040
1041	/* Update aggregation statistics  */
1042	cfhsi_update_aggregation_stats(cfhsi, skb, 1);
1043
1044	/* Queue the SKB */
1045	skb_queue_tail(&cfhsi->qhead[prio], skb);
1046
1047	/* Sanity check; xmit should not be called after unregister_netdev */
1048	if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) {
1049		spin_unlock_bh(&cfhsi->lock);
1050		cfhsi_abort_tx(cfhsi);
1051		return -EINVAL;
1052	}
1053
1054	/* Send flow off if number of packets is above high water mark. */
1055	if (!cfhsi->flow_off_sent &&
1056		cfhsi_tx_queue_len(cfhsi) > cfhsi->cfg.q_high_mark &&
1057		cfhsi->cfdev.flowctrl) {
1058		cfhsi->flow_off_sent = 1;
1059		cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
1060	}
1061
1062	if (cfhsi->tx_state == CFHSI_TX_STATE_IDLE) {
1063		cfhsi->tx_state = CFHSI_TX_STATE_XFER;
1064		start_xfer = 1;
1065	}
1066
1067	if (!start_xfer) {
1068		/* Send aggregate if it is possible */
1069		bool aggregate_ready =
1070			cfhsi_can_send_aggregate(cfhsi) &&
1071			del_timer(&cfhsi->aggregation_timer) > 0;
1072		spin_unlock_bh(&cfhsi->lock);
1073		if (aggregate_ready)
1074			cfhsi_start_tx(cfhsi);
1075		return 0;
1076	}
1077
1078	/* Delete inactivity timer if started. */
1079	timer_active = del_timer_sync(&cfhsi->inactivity_timer);
1080
1081	spin_unlock_bh(&cfhsi->lock);
1082
1083	if (timer_active) {
1084		struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
1085		int len;
1086		int res;
1087
1088		/* Create HSI frame. */
1089		len = cfhsi_tx_frm(desc, cfhsi);
1090		WARN_ON(!len);
1091
1092		/* Set up new transfer. */
1093		res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
1094		if (WARN_ON(res < 0)) {
1095			netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
1096				__func__, res);
1097			cfhsi_abort_tx(cfhsi);
1098		}
1099	} else {
1100		/* Schedule wake up work queue if the we initiate. */
1101		if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
1102			queue_work(cfhsi->wq, &cfhsi->wake_up_work);
1103	}
1104
1105	return 0;
1106}
1107
1108static const struct net_device_ops cfhsi_netdevops;
1109
1110static void cfhsi_setup(struct net_device *dev)
1111{
1112	int i;
1113	struct cfhsi *cfhsi = netdev_priv(dev);
1114	dev->features = 0;
1115	dev->type = ARPHRD_CAIF;
1116	dev->flags = IFF_POINTOPOINT | IFF_NOARP;
1117	dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
1118	dev->priv_flags |= IFF_NO_QUEUE;
1119	dev->needs_free_netdev = true;
1120	dev->netdev_ops = &cfhsi_netdevops;
1121	for (i = 0; i < CFHSI_PRIO_LAST; ++i)
1122		skb_queue_head_init(&cfhsi->qhead[i]);
1123	cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
1124	cfhsi->cfdev.use_frag = false;
1125	cfhsi->cfdev.use_stx = false;
1126	cfhsi->cfdev.use_fcs = false;
1127	cfhsi->ndev = dev;
1128	cfhsi->cfg = hsi_default_config;
1129}
1130
1131static int cfhsi_open(struct net_device *ndev)
1132{
1133	struct cfhsi *cfhsi = netdev_priv(ndev);
1134	int res;
1135
1136	clear_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
1137
1138	/* Initialize state vaiables. */
1139	cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
1140	cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
1141
1142	/* Set flow info */
1143	cfhsi->flow_off_sent = 0;
1144
1145	/*
1146	 * Allocate a TX buffer with the size of a HSI packet descriptors
1147	 * and the necessary room for CAIF payload frames.
1148	 */
1149	cfhsi->tx_buf = kzalloc(CFHSI_BUF_SZ_TX, GFP_KERNEL);
1150	if (!cfhsi->tx_buf) {
1151		res = -ENODEV;
1152		goto err_alloc_tx;
1153	}
1154
1155	/*
1156	 * Allocate a RX buffer with the size of two HSI packet descriptors and
1157	 * the necessary room for CAIF payload frames.
1158	 */
1159	cfhsi->rx_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
1160	if (!cfhsi->rx_buf) {
1161		res = -ENODEV;
1162		goto err_alloc_rx;
1163	}
1164
1165	cfhsi->rx_flip_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
1166	if (!cfhsi->rx_flip_buf) {
1167		res = -ENODEV;
1168		goto err_alloc_rx_flip;
1169	}
1170
1171	/* Initialize aggregation timeout */
1172	cfhsi->cfg.aggregation_timeout = hsi_default_config.aggregation_timeout;
1173
1174	/* Initialize recieve vaiables. */
1175	cfhsi->rx_ptr = cfhsi->rx_buf;
1176	cfhsi->rx_len = CFHSI_DESC_SZ;
1177
1178	/* Initialize spin locks. */
1179	spin_lock_init(&cfhsi->lock);
1180
1181	/* Set up the driver. */
1182	cfhsi->cb_ops.tx_done_cb = cfhsi_tx_done_cb;
1183	cfhsi->cb_ops.rx_done_cb = cfhsi_rx_done_cb;
1184	cfhsi->cb_ops.wake_up_cb = cfhsi_wake_up_cb;
1185	cfhsi->cb_ops.wake_down_cb = cfhsi_wake_down_cb;
1186
1187	/* Initialize the work queues. */
1188	INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up);
1189	INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down);
1190	INIT_WORK(&cfhsi->out_of_sync_work, cfhsi_out_of_sync);
1191
1192	/* Clear all bit fields. */
1193	clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
1194	clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
1195	clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
1196	clear_bit(CFHSI_AWAKE, &cfhsi->bits);
1197
1198	/* Create work thread. */
1199	cfhsi->wq = alloc_ordered_workqueue(cfhsi->ndev->name, WQ_MEM_RECLAIM);
1200	if (!cfhsi->wq) {
1201		netdev_err(cfhsi->ndev, "%s: Failed to create work queue.\n",
1202			__func__);
1203		res = -ENODEV;
1204		goto err_create_wq;
1205	}
1206
1207	/* Initialize wait queues. */
1208	init_waitqueue_head(&cfhsi->wake_up_wait);
1209	init_waitqueue_head(&cfhsi->wake_down_wait);
1210	init_waitqueue_head(&cfhsi->flush_fifo_wait);
1211
1212	/* Setup the inactivity timer. */
1213	timer_setup(&cfhsi->inactivity_timer, cfhsi_inactivity_tout, 0);
1214	/* Setup the slowpath RX timer. */
1215	timer_setup(&cfhsi->rx_slowpath_timer, cfhsi_rx_slowpath, 0);
1216	/* Setup the aggregation timer. */
1217	timer_setup(&cfhsi->aggregation_timer, cfhsi_aggregation_tout, 0);
1218
1219	/* Activate HSI interface. */
1220	res = cfhsi->ops->cfhsi_up(cfhsi->ops);
1221	if (res) {
1222		netdev_err(cfhsi->ndev,
1223			"%s: can't activate HSI interface: %d.\n",
1224			__func__, res);
1225		goto err_activate;
1226	}
1227
1228	/* Flush FIFO */
1229	res = cfhsi_flush_fifo(cfhsi);
1230	if (res) {
1231		netdev_err(cfhsi->ndev, "%s: Can't flush FIFO: %d.\n",
1232			__func__, res);
1233		goto err_net_reg;
1234	}
1235	return res;
1236
1237 err_net_reg:
1238	cfhsi->ops->cfhsi_down(cfhsi->ops);
1239 err_activate:
1240	destroy_workqueue(cfhsi->wq);
1241 err_create_wq:
1242	kfree(cfhsi->rx_flip_buf);
1243 err_alloc_rx_flip:
1244	kfree(cfhsi->rx_buf);
1245 err_alloc_rx:
1246	kfree(cfhsi->tx_buf);
1247 err_alloc_tx:
1248	return res;
1249}
1250
1251static int cfhsi_close(struct net_device *ndev)
1252{
1253	struct cfhsi *cfhsi = netdev_priv(ndev);
1254	u8 *tx_buf, *rx_buf, *flip_buf;
1255
1256	/* going to shutdown driver */
1257	set_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
1258
1259	/* Delete timers if pending */
1260	del_timer_sync(&cfhsi->inactivity_timer);
1261	del_timer_sync(&cfhsi->rx_slowpath_timer);
1262	del_timer_sync(&cfhsi->aggregation_timer);
1263
1264	/* Cancel pending RX request (if any) */
1265	cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
1266
1267	/* Destroy workqueue */
1268	destroy_workqueue(cfhsi->wq);
1269
1270	/* Store bufferes: will be freed later. */
1271	tx_buf = cfhsi->tx_buf;
1272	rx_buf = cfhsi->rx_buf;
1273	flip_buf = cfhsi->rx_flip_buf;
1274	/* Flush transmit queues. */
1275	cfhsi_abort_tx(cfhsi);
1276
1277	/* Deactivate interface */
1278	cfhsi->ops->cfhsi_down(cfhsi->ops);
1279
1280	/* Free buffers. */
1281	kfree(tx_buf);
1282	kfree(rx_buf);
1283	kfree(flip_buf);
1284	return 0;
1285}
1286
1287static void cfhsi_uninit(struct net_device *dev)
1288{
1289	struct cfhsi *cfhsi = netdev_priv(dev);
1290	ASSERT_RTNL();
1291	symbol_put(cfhsi_get_device);
1292	list_del(&cfhsi->list);
1293}
1294
1295static const struct net_device_ops cfhsi_netdevops = {
1296	.ndo_uninit = cfhsi_uninit,
1297	.ndo_open = cfhsi_open,
1298	.ndo_stop = cfhsi_close,
1299	.ndo_start_xmit = cfhsi_xmit
1300};
1301
1302static void cfhsi_netlink_parms(struct nlattr *data[], struct cfhsi *cfhsi)
1303{
1304	int i;
1305
1306	if (!data) {
1307		pr_debug("no params data found\n");
1308		return;
1309	}
1310
1311	i = __IFLA_CAIF_HSI_INACTIVITY_TOUT;
1312	/*
1313	 * Inactivity timeout in millisecs. Lowest possible value is 1,
1314	 * and highest possible is NEXT_TIMER_MAX_DELTA.
1315	 */
1316	if (data[i]) {
1317		u32 inactivity_timeout = nla_get_u32(data[i]);
1318		/* Pre-calculate inactivity timeout. */
1319		cfhsi->cfg.inactivity_timeout =	inactivity_timeout * HZ / 1000;
1320		if (cfhsi->cfg.inactivity_timeout == 0)
1321			cfhsi->cfg.inactivity_timeout = 1;
1322		else if (cfhsi->cfg.inactivity_timeout > NEXT_TIMER_MAX_DELTA)
1323			cfhsi->cfg.inactivity_timeout = NEXT_TIMER_MAX_DELTA;
1324	}
1325
1326	i = __IFLA_CAIF_HSI_AGGREGATION_TOUT;
1327	if (data[i])
1328		cfhsi->cfg.aggregation_timeout = nla_get_u32(data[i]);
1329
1330	i = __IFLA_CAIF_HSI_HEAD_ALIGN;
1331	if (data[i])
1332		cfhsi->cfg.head_align = nla_get_u32(data[i]);
1333
1334	i = __IFLA_CAIF_HSI_TAIL_ALIGN;
1335	if (data[i])
1336		cfhsi->cfg.tail_align = nla_get_u32(data[i]);
1337
1338	i = __IFLA_CAIF_HSI_QHIGH_WATERMARK;
1339	if (data[i])
1340		cfhsi->cfg.q_high_mark = nla_get_u32(data[i]);
1341
1342	i = __IFLA_CAIF_HSI_QLOW_WATERMARK;
1343	if (data[i])
1344		cfhsi->cfg.q_low_mark = nla_get_u32(data[i]);
1345}
1346
1347static int caif_hsi_changelink(struct net_device *dev, struct nlattr *tb[],
1348			       struct nlattr *data[],
1349			       struct netlink_ext_ack *extack)
1350{
1351	cfhsi_netlink_parms(data, netdev_priv(dev));
1352	netdev_state_change(dev);
1353	return 0;
1354}
1355
1356static const struct nla_policy caif_hsi_policy[__IFLA_CAIF_HSI_MAX + 1] = {
1357	[__IFLA_CAIF_HSI_INACTIVITY_TOUT] = { .type = NLA_U32, .len = 4 },
1358	[__IFLA_CAIF_HSI_AGGREGATION_TOUT] = { .type = NLA_U32, .len = 4 },
1359	[__IFLA_CAIF_HSI_HEAD_ALIGN] = { .type = NLA_U32, .len = 4 },
1360	[__IFLA_CAIF_HSI_TAIL_ALIGN] = { .type = NLA_U32, .len = 4 },
1361	[__IFLA_CAIF_HSI_QHIGH_WATERMARK] = { .type = NLA_U32, .len = 4 },
1362	[__IFLA_CAIF_HSI_QLOW_WATERMARK] = { .type = NLA_U32, .len = 4 },
1363};
1364
1365static size_t caif_hsi_get_size(const struct net_device *dev)
1366{
1367	int i;
1368	size_t s = 0;
1369	for (i = __IFLA_CAIF_HSI_UNSPEC + 1; i < __IFLA_CAIF_HSI_MAX; i++)
1370		s += nla_total_size(caif_hsi_policy[i].len);
1371	return s;
1372}
1373
1374static int caif_hsi_fill_info(struct sk_buff *skb, const struct net_device *dev)
1375{
1376	struct cfhsi *cfhsi = netdev_priv(dev);
1377
1378	if (nla_put_u32(skb, __IFLA_CAIF_HSI_INACTIVITY_TOUT,
1379			cfhsi->cfg.inactivity_timeout) ||
1380	    nla_put_u32(skb, __IFLA_CAIF_HSI_AGGREGATION_TOUT,
1381			cfhsi->cfg.aggregation_timeout) ||
1382	    nla_put_u32(skb, __IFLA_CAIF_HSI_HEAD_ALIGN,
1383			cfhsi->cfg.head_align) ||
1384	    nla_put_u32(skb, __IFLA_CAIF_HSI_TAIL_ALIGN,
1385			cfhsi->cfg.tail_align) ||
1386	    nla_put_u32(skb, __IFLA_CAIF_HSI_QHIGH_WATERMARK,
1387			cfhsi->cfg.q_high_mark) ||
1388	    nla_put_u32(skb, __IFLA_CAIF_HSI_QLOW_WATERMARK,
1389			cfhsi->cfg.q_low_mark))
1390		return -EMSGSIZE;
1391
1392	return 0;
1393}
1394
1395static int caif_hsi_newlink(struct net *src_net, struct net_device *dev,
1396			    struct nlattr *tb[], struct nlattr *data[],
1397			    struct netlink_ext_ack *extack)
1398{
1399	struct cfhsi *cfhsi = NULL;
1400	struct cfhsi_ops *(*get_ops)(void);
1401
1402	ASSERT_RTNL();
1403
1404	cfhsi = netdev_priv(dev);
1405	cfhsi_netlink_parms(data, cfhsi);
1406
1407	get_ops = symbol_get(cfhsi_get_ops);
1408	if (!get_ops) {
1409		pr_err("%s: failed to get the cfhsi_ops\n", __func__);
1410		return -ENODEV;
1411	}
1412
1413	/* Assign the HSI device. */
1414	cfhsi->ops = (*get_ops)();
1415	if (!cfhsi->ops) {
1416		pr_err("%s: failed to get the cfhsi_ops\n", __func__);
1417		goto err;
1418	}
1419
1420	/* Assign the driver to this HSI device. */
1421	cfhsi->ops->cb_ops = &cfhsi->cb_ops;
1422	if (register_netdevice(dev)) {
1423		pr_warn("%s: caif_hsi device registration failed\n", __func__);
1424		goto err;
1425	}
1426	/* Add CAIF HSI device to list. */
1427	list_add_tail(&cfhsi->list, &cfhsi_list);
1428
1429	return 0;
1430err:
1431	symbol_put(cfhsi_get_ops);
1432	return -ENODEV;
1433}
1434
1435static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
1436	.kind		= "cfhsi",
1437	.priv_size	= sizeof(struct cfhsi),
1438	.setup		= cfhsi_setup,
1439	.maxtype	= __IFLA_CAIF_HSI_MAX,
1440	.policy	= caif_hsi_policy,
1441	.newlink	= caif_hsi_newlink,
1442	.changelink	= caif_hsi_changelink,
1443	.get_size	= caif_hsi_get_size,
1444	.fill_info	= caif_hsi_fill_info,
1445};
1446
1447static void __exit cfhsi_exit_module(void)
1448{
1449	struct list_head *list_node;
1450	struct list_head *n;
1451	struct cfhsi *cfhsi;
1452
1453	rtnl_link_unregister(&caif_hsi_link_ops);
1454
1455	rtnl_lock();
1456	list_for_each_safe(list_node, n, &cfhsi_list) {
1457		cfhsi = list_entry(list_node, struct cfhsi, list);
1458		unregister_netdevice(cfhsi->ndev);
1459	}
1460	rtnl_unlock();
1461}
1462
1463static int __init cfhsi_init_module(void)
1464{
1465	return rtnl_link_register(&caif_hsi_link_ops);
1466}
1467
1468module_init(cfhsi_init_module);
1469module_exit(cfhsi_exit_module);