Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.13.7.
   1/*
   2 * Copyright (C) ST-Ericsson AB 2010
   3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
   4 * Author:  Daniel Martensson / daniel.martensson@stericsson.com
   5 *	    Dmitry.Tarnyagin  / dmitry.tarnyagin@stericsson.com
   6 * License terms: GNU General Public License (GPL) version 2.
   7 */
   8
   9#include <linux/init.h>
  10#include <linux/module.h>
  11#include <linux/device.h>
  12#include <linux/platform_device.h>
  13#include <linux/netdevice.h>
  14#include <linux/string.h>
  15#include <linux/list.h>
  16#include <linux/interrupt.h>
  17#include <linux/delay.h>
  18#include <linux/sched.h>
  19#include <linux/if_arp.h>
  20#include <linux/timer.h>
  21#include <net/caif/caif_layer.h>
  22#include <net/caif/caif_hsi.h>
  23
  24MODULE_LICENSE("GPL");
  25MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
  26MODULE_DESCRIPTION("CAIF HSI driver");
  27
  28/* Returns the number of padding bytes for alignment. */
  29#define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
  30				(((pow)-((x)&((pow)-1)))))
  31
  32/*
  33 * HSI padding options.
  34 * Warning: must be a base of 2 (& operation used) and can not be zero !
  35 */
  36static int hsi_head_align = 4;
  37module_param(hsi_head_align, int, S_IRUGO);
  38MODULE_PARM_DESC(hsi_head_align, "HSI head alignment.");
  39
  40static int hsi_tail_align = 4;
  41module_param(hsi_tail_align, int, S_IRUGO);
  42MODULE_PARM_DESC(hsi_tail_align, "HSI tail alignment.");
  43
  44/*
  45 * HSI link layer flowcontrol thresholds.
  46 * Warning: A high threshold value migth increase throughput but it will at
  47 * the same time prevent channel prioritization and increase the risk of
  48 * flooding the modem. The high threshold should be above the low.
  49 */
  50static int hsi_high_threshold = 100;
  51module_param(hsi_high_threshold, int, S_IRUGO);
  52MODULE_PARM_DESC(hsi_high_threshold, "HSI high threshold (FLOW OFF).");
  53
  54static int hsi_low_threshold = 50;
  55module_param(hsi_low_threshold, int, S_IRUGO);
  56MODULE_PARM_DESC(hsi_low_threshold, "HSI high threshold (FLOW ON).");
  57
  58#define ON 1
  59#define OFF 0
  60
  61/*
  62 * Threshold values for the HSI packet queue. Flowcontrol will be asserted
  63 * when the number of packets exceeds HIGH_WATER_MARK. It will not be
  64 * de-asserted before the number of packets drops below LOW_WATER_MARK.
  65 */
  66#define LOW_WATER_MARK   hsi_low_threshold
  67#define HIGH_WATER_MARK  hsi_high_threshold
  68
  69static LIST_HEAD(cfhsi_list);
  70static spinlock_t cfhsi_list_lock;
  71
  72static void cfhsi_inactivity_tout(unsigned long arg)
  73{
  74	struct cfhsi *cfhsi = (struct cfhsi *)arg;
  75
  76	dev_dbg(&cfhsi->ndev->dev, "%s.\n",
  77		__func__);
  78
  79	/* Schedule power down work queue. */
  80	if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
  81		queue_work(cfhsi->wq, &cfhsi->wake_down_work);
  82}
  83
  84static void cfhsi_abort_tx(struct cfhsi *cfhsi)
  85{
  86	struct sk_buff *skb;
  87
  88	for (;;) {
  89		spin_lock_bh(&cfhsi->lock);
  90		skb = skb_dequeue(&cfhsi->qhead);
  91		if (!skb)
  92			break;
  93
  94		cfhsi->ndev->stats.tx_errors++;
  95		cfhsi->ndev->stats.tx_dropped++;
  96		spin_unlock_bh(&cfhsi->lock);
  97		kfree_skb(skb);
  98	}
  99	cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
 100	if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 101		mod_timer(&cfhsi->timer, jiffies + CFHSI_INACTIVITY_TOUT);
 102	spin_unlock_bh(&cfhsi->lock);
 103}
 104
 105static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
 106{
 107	char buffer[32]; /* Any reasonable value */
 108	size_t fifo_occupancy;
 109	int ret;
 110
 111	dev_dbg(&cfhsi->ndev->dev, "%s.\n",
 112		__func__);
 113
 114
 115	ret = cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
 116	if (ret) {
 117		dev_warn(&cfhsi->ndev->dev,
 118			"%s: can't wake up HSI interface: %d.\n",
 119			__func__, ret);
 120		return ret;
 121	}
 122
 123	do {
 124		ret = cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
 125				&fifo_occupancy);
 126		if (ret) {
 127			dev_warn(&cfhsi->ndev->dev,
 128				"%s: can't get FIFO occupancy: %d.\n",
 129				__func__, ret);
 130			break;
 131		} else if (!fifo_occupancy)
 132			/* No more data, exitting normally */
 133			break;
 134
 135		fifo_occupancy = min(sizeof(buffer), fifo_occupancy);
 136		set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
 137		ret = cfhsi->dev->cfhsi_rx(buffer, fifo_occupancy,
 138				cfhsi->dev);
 139		if (ret) {
 140			clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
 141			dev_warn(&cfhsi->ndev->dev,
 142				"%s: can't read data: %d.\n",
 143				__func__, ret);
 144			break;
 145		}
 146
 147		ret = 5 * HZ;
 148		wait_event_interruptible_timeout(cfhsi->flush_fifo_wait,
 149			 !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);
 150
 151		if (ret < 0) {
 152			dev_warn(&cfhsi->ndev->dev,
 153				"%s: can't wait for flush complete: %d.\n",
 154				__func__, ret);
 155			break;
 156		} else if (!ret) {
 157			ret = -ETIMEDOUT;
 158			dev_warn(&cfhsi->ndev->dev,
 159				"%s: timeout waiting for flush complete.\n",
 160				__func__);
 161			break;
 162		}
 163	} while (1);
 164
 165	cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
 166
 167	return ret;
 168}
 169
 170static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
 171{
 172	int nfrms = 0;
 173	int pld_len = 0;
 174	struct sk_buff *skb;
 175	u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
 176
 177	skb = skb_dequeue(&cfhsi->qhead);
 178	if (!skb)
 179		return 0;
 180
 181	/* Check if we can embed a CAIF frame. */
 182	if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
 183		struct caif_payload_info *info;
 184		int hpad = 0;
 185		int tpad = 0;
 186
 187		/* Calculate needed head alignment and tail alignment. */
 188		info = (struct caif_payload_info *)&skb->cb;
 189
 190		hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
 191		tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
 192
 193		/* Check if frame still fits with added alignment. */
 194		if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) {
 195			u8 *pemb = desc->emb_frm;
 196			desc->offset = CFHSI_DESC_SHORT_SZ;
 197			*pemb = (u8)(hpad - 1);
 198			pemb += hpad;
 199
 200			/* Update network statistics. */
 201			cfhsi->ndev->stats.tx_packets++;
 202			cfhsi->ndev->stats.tx_bytes += skb->len;
 203
 204			/* Copy in embedded CAIF frame. */
 205			skb_copy_bits(skb, 0, pemb, skb->len);
 206			consume_skb(skb);
 207			skb = NULL;
 208		}
 209	} else
 210		/* Clear offset. */
 211		desc->offset = 0;
 212
 213	/* Create payload CAIF frames. */
 214	pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
 215	while (nfrms < CFHSI_MAX_PKTS) {
 216		struct caif_payload_info *info;
 217		int hpad = 0;
 218		int tpad = 0;
 219
 220		if (!skb)
 221			skb = skb_dequeue(&cfhsi->qhead);
 222
 223		if (!skb)
 224			break;
 225
 226		/* Calculate needed head alignment and tail alignment. */
 227		info = (struct caif_payload_info *)&skb->cb;
 228
 229		hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
 230		tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
 231
 232		/* Fill in CAIF frame length in descriptor. */
 233		desc->cffrm_len[nfrms] = hpad + skb->len + tpad;
 234
 235		/* Fill head padding information. */
 236		*pfrm = (u8)(hpad - 1);
 237		pfrm += hpad;
 238
 239		/* Update network statistics. */
 240		cfhsi->ndev->stats.tx_packets++;
 241		cfhsi->ndev->stats.tx_bytes += skb->len;
 242
 243		/* Copy in CAIF frame. */
 244		skb_copy_bits(skb, 0, pfrm, skb->len);
 245
 246		/* Update payload length. */
 247		pld_len += desc->cffrm_len[nfrms];
 248
 249		/* Update frame pointer. */
 250		pfrm += skb->len + tpad;
 251		consume_skb(skb);
 252		skb = NULL;
 253
 254		/* Update number of frames. */
 255		nfrms++;
 256	}
 257
 258	/* Unused length fields should be zero-filled (according to SPEC). */
 259	while (nfrms < CFHSI_MAX_PKTS) {
 260		desc->cffrm_len[nfrms] = 0x0000;
 261		nfrms++;
 262	}
 263
 264	/* Check if we can piggy-back another descriptor. */
 265	skb = skb_peek(&cfhsi->qhead);
 266	if (skb)
 267		desc->header |= CFHSI_PIGGY_DESC;
 268	else
 269		desc->header &= ~CFHSI_PIGGY_DESC;
 270
 271	return CFHSI_DESC_SZ + pld_len;
 272}
 273
 274static void cfhsi_tx_done_work(struct work_struct *work)
 275{
 276	struct cfhsi *cfhsi = NULL;
 277	struct cfhsi_desc *desc = NULL;
 278	int len = 0;
 279	int res;
 280
 281	cfhsi = container_of(work, struct cfhsi, tx_done_work);
 282	dev_dbg(&cfhsi->ndev->dev, "%s.\n",
 283		__func__);
 284
 285	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 286		return;
 287
 288	desc = (struct cfhsi_desc *)cfhsi->tx_buf;
 289
 290	do {
 291		/*
 292		 * Send flow on if flow off has been previously signalled
 293		 * and number of packets is below low water mark.
 294		 */
 295		spin_lock_bh(&cfhsi->lock);
 296		if (cfhsi->flow_off_sent &&
 297				cfhsi->qhead.qlen <= cfhsi->q_low_mark &&
 298				cfhsi->cfdev.flowctrl) {
 299
 300			cfhsi->flow_off_sent = 0;
 301			cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
 302		}
 303		spin_unlock_bh(&cfhsi->lock);
 304
 305		/* Create HSI frame. */
 306		len = cfhsi_tx_frm(desc, cfhsi);
 307		if (!len) {
 308			cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
 309			/* Start inactivity timer. */
 310			mod_timer(&cfhsi->timer,
 311					jiffies + CFHSI_INACTIVITY_TOUT);
 312			break;
 313		}
 314
 315		/* Set up new transfer. */
 316		res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
 317		if (WARN_ON(res < 0)) {
 318			dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
 319				__func__, res);
 320		}
 321	} while (res < 0);
 322}
 323
 324static void cfhsi_tx_done_cb(struct cfhsi_drv *drv)
 325{
 326	struct cfhsi *cfhsi;
 327
 328	cfhsi = container_of(drv, struct cfhsi, drv);
 329	dev_dbg(&cfhsi->ndev->dev, "%s.\n",
 330		__func__);
 331
 332	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 333		return;
 334
 335	queue_work(cfhsi->wq, &cfhsi->tx_done_work);
 336}
 337
 338static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
 339{
 340	int xfer_sz = 0;
 341	int nfrms = 0;
 342	u16 *plen = NULL;
 343	u8 *pfrm = NULL;
 344
 345	if ((desc->header & ~CFHSI_PIGGY_DESC) ||
 346			(desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
 347		dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
 348			__func__);
 349		return 0;
 350	}
 351
 352	/* Check for embedded CAIF frame. */
 353	if (desc->offset) {
 354		struct sk_buff *skb;
 355		u8 *dst = NULL;
 356		int len = 0, retries = 0;
 357		pfrm = ((u8 *)desc) + desc->offset;
 358
 359		/* Remove offset padding. */
 360		pfrm += *pfrm + 1;
 361
 362		/* Read length of CAIF frame (little endian). */
 363		len = *pfrm;
 364		len |= ((*(pfrm+1)) << 8) & 0xFF00;
 365		len += 2;	/* Add FCS fields. */
 366
 367
 368		/* Allocate SKB (OK even in IRQ context). */
 369		skb = alloc_skb(len + 1, GFP_KERNEL);
 370		while (!skb) {
 371			retries++;
 372			schedule_timeout(1);
 373			skb = alloc_skb(len + 1, GFP_KERNEL);
 374			if (skb) {
 375				printk(KERN_WARNING "%s: slept for %u "
 376						"before getting memory\n",
 377						__func__, retries);
 378				break;
 379			}
 380			if (retries > HZ) {
 381				printk(KERN_ERR "%s: slept for 1HZ and "
 382						"did not get memory\n",
 383						__func__);
 384				cfhsi->ndev->stats.rx_dropped++;
 385				goto drop_frame;
 386			}
 387		}
 388		caif_assert(skb != NULL);
 389
 390		dst = skb_put(skb, len);
 391		memcpy(dst, pfrm, len);
 392
 393		skb->protocol = htons(ETH_P_CAIF);
 394		skb_reset_mac_header(skb);
 395		skb->dev = cfhsi->ndev;
 396
 397		/*
 398		 * We are called from a arch specific platform device.
 399		 * Unfortunately we don't know what context we're
 400		 * running in.
 401		 */
 402		if (in_interrupt())
 403			netif_rx(skb);
 404		else
 405			netif_rx_ni(skb);
 406
 407		/* Update network statistics. */
 408		cfhsi->ndev->stats.rx_packets++;
 409		cfhsi->ndev->stats.rx_bytes += len;
 410	}
 411
 412drop_frame:
 413	/* Calculate transfer length. */
 414	plen = desc->cffrm_len;
 415	while (nfrms < CFHSI_MAX_PKTS && *plen) {
 416		xfer_sz += *plen;
 417		plen++;
 418		nfrms++;
 419	}
 420
 421	/* Check for piggy-backed descriptor. */
 422	if (desc->header & CFHSI_PIGGY_DESC)
 423		xfer_sz += CFHSI_DESC_SZ;
 424
 425	if (xfer_sz % 4) {
 426		dev_err(&cfhsi->ndev->dev,
 427				"%s: Invalid payload len: %d, ignored.\n",
 428			__func__, xfer_sz);
 429		xfer_sz = 0;
 430	}
 431
 432	return xfer_sz;
 433}
 434
 435static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
 436{
 437	int rx_sz = 0;
 438	int nfrms = 0;
 439	u16 *plen = NULL;
 440	u8 *pfrm = NULL;
 441
 442	/* Sanity check header and offset. */
 443	if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) ||
 444			(desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
 445		dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
 446			__func__);
 447		return -EINVAL;
 448	}
 449
 450	/* Set frame pointer to start of payload. */
 451	pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
 452	plen = desc->cffrm_len;
 453	while (nfrms < CFHSI_MAX_PKTS && *plen) {
 454		struct sk_buff *skb;
 455		u8 *dst = NULL;
 456		u8 *pcffrm = NULL;
 457		int len = 0, retries = 0;
 458
 459		if (WARN_ON(desc->cffrm_len[nfrms] > CFHSI_MAX_PAYLOAD_SZ)) {
 460			dev_err(&cfhsi->ndev->dev, "%s: Invalid payload.\n",
 461				__func__);
 462			return -EINVAL;
 463		}
 464
 465		/* CAIF frame starts after head padding. */
 466		pcffrm = pfrm + *pfrm + 1;
 467
 468		/* Read length of CAIF frame (little endian). */
 469		len = *pcffrm;
 470		len |= ((*(pcffrm + 1)) << 8) & 0xFF00;
 471		len += 2;	/* Add FCS fields. */
 472
 473		/* Allocate SKB (OK even in IRQ context). */
 474		skb = alloc_skb(len + 1, GFP_KERNEL);
 475		while (!skb) {
 476			retries++;
 477			schedule_timeout(1);
 478			skb = alloc_skb(len + 1, GFP_KERNEL);
 479			if (skb) {
 480				printk(KERN_WARNING "%s: slept for %u "
 481						"before getting memory\n",
 482						__func__, retries);
 483				break;
 484			}
 485			if (retries > HZ) {
 486				printk(KERN_ERR "%s: slept for 1HZ "
 487						"and did not get memory\n",
 488						__func__);
 489				cfhsi->ndev->stats.rx_dropped++;
 490				goto drop_frame;
 491			}
 492		}
 493		caif_assert(skb != NULL);
 494
 495		dst = skb_put(skb, len);
 496		memcpy(dst, pcffrm, len);
 497
 498		skb->protocol = htons(ETH_P_CAIF);
 499		skb_reset_mac_header(skb);
 500		skb->dev = cfhsi->ndev;
 501
 502		/*
 503		 * We're called from a platform device,
 504		 * and don't know the context we're running in.
 505		 */
 506		if (in_interrupt())
 507			netif_rx(skb);
 508		else
 509			netif_rx_ni(skb);
 510
 511		/* Update network statistics. */
 512		cfhsi->ndev->stats.rx_packets++;
 513		cfhsi->ndev->stats.rx_bytes += len;
 514
 515drop_frame:
 516		pfrm += *plen;
 517		rx_sz += *plen;
 518		plen++;
 519		nfrms++;
 520	}
 521
 522	return rx_sz;
 523}
 524
 525static void cfhsi_rx_done_work(struct work_struct *work)
 526{
 527	int res;
 528	int desc_pld_len = 0;
 529	struct cfhsi *cfhsi = NULL;
 530	struct cfhsi_desc *desc = NULL;
 531
 532	cfhsi = container_of(work, struct cfhsi, rx_done_work);
 533	desc = (struct cfhsi_desc *)cfhsi->rx_buf;
 534
 535	dev_dbg(&cfhsi->ndev->dev, "%s: Kick timer if pending.\n",
 536		__func__);
 537
 538	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 539		return;
 540
 541	/* Update inactivity timer if pending. */
 542	mod_timer_pending(&cfhsi->timer, jiffies + CFHSI_INACTIVITY_TOUT);
 543
 544	if (cfhsi->rx_state == CFHSI_RX_STATE_DESC) {
 545		desc_pld_len = cfhsi_rx_desc(desc, cfhsi);
 546	} else {
 547		int pld_len;
 548
 549		pld_len = cfhsi_rx_pld(desc, cfhsi);
 550
 551		if ((pld_len > 0) && (desc->header & CFHSI_PIGGY_DESC)) {
 552			struct cfhsi_desc *piggy_desc;
 553			piggy_desc = (struct cfhsi_desc *)
 554				(desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ +
 555						pld_len);
 556
 557			/* Extract piggy-backed descriptor. */
 558			desc_pld_len = cfhsi_rx_desc(piggy_desc, cfhsi);
 559
 560			/*
 561			 * Copy needed information from the piggy-backed
 562			 * descriptor to the descriptor in the start.
 563			 */
 564			memcpy((u8 *)desc, (u8 *)piggy_desc,
 565					CFHSI_DESC_SHORT_SZ);
 566		}
 567	}
 568
 569	if (desc_pld_len) {
 570		cfhsi->rx_state = CFHSI_RX_STATE_PAYLOAD;
 571		cfhsi->rx_ptr = cfhsi->rx_buf + CFHSI_DESC_SZ;
 572		cfhsi->rx_len = desc_pld_len;
 573	} else {
 574		cfhsi->rx_state = CFHSI_RX_STATE_DESC;
 575		cfhsi->rx_ptr = cfhsi->rx_buf;
 576		cfhsi->rx_len = CFHSI_DESC_SZ;
 577	}
 578	clear_bit(CFHSI_PENDING_RX, &cfhsi->bits);
 579
 580	if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
 581		/* Set up new transfer. */
 582		dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n",
 583			__func__);
 584		res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len,
 585				cfhsi->dev);
 586		if (WARN_ON(res < 0)) {
 587			dev_err(&cfhsi->ndev->dev, "%s: RX error %d.\n",
 588				__func__, res);
 589			cfhsi->ndev->stats.rx_errors++;
 590			cfhsi->ndev->stats.rx_dropped++;
 591		}
 592	}
 593}
 594
 595static void cfhsi_rx_done_cb(struct cfhsi_drv *drv)
 596{
 597	struct cfhsi *cfhsi;
 598
 599	cfhsi = container_of(drv, struct cfhsi, drv);
 600	dev_dbg(&cfhsi->ndev->dev, "%s.\n",
 601		__func__);
 602
 603	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 604		return;
 605
 606	set_bit(CFHSI_PENDING_RX, &cfhsi->bits);
 607
 608	if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits))
 609		wake_up_interruptible(&cfhsi->flush_fifo_wait);
 610	else
 611		queue_work(cfhsi->wq, &cfhsi->rx_done_work);
 612}
 613
 614static void cfhsi_wake_up(struct work_struct *work)
 615{
 616	struct cfhsi *cfhsi = NULL;
 617	int res;
 618	int len;
 619	long ret;
 620
 621	cfhsi = container_of(work, struct cfhsi, wake_up_work);
 622
 623	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 624		return;
 625
 626	if (unlikely(test_bit(CFHSI_AWAKE, &cfhsi->bits))) {
 627		/* It happenes when wakeup is requested by
 628		 * both ends at the same time. */
 629		clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
 630		return;
 631	}
 632
 633	/* Activate wake line. */
 634	cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
 635
 636	dev_dbg(&cfhsi->ndev->dev, "%s: Start waiting.\n",
 637		__func__);
 638
 639	/* Wait for acknowledge. */
 640	ret = CFHSI_WAKEUP_TOUT;
 641	wait_event_interruptible_timeout(cfhsi->wake_up_wait,
 642					test_bit(CFHSI_WAKE_UP_ACK,
 643							&cfhsi->bits), ret);
 644	if (unlikely(ret < 0)) {
 645		/* Interrupted by signal. */
 646		dev_info(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
 647			__func__, ret);
 648		clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
 649		cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
 650		return;
 651	} else if (!ret) {
 652		/* Wakeup timeout */
 653		dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n",
 654			__func__);
 655		clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
 656		cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
 657		return;
 658	}
 659	dev_dbg(&cfhsi->ndev->dev, "%s: Woken.\n",
 660		__func__);
 661
 662	/* Clear power up bit. */
 663	set_bit(CFHSI_AWAKE, &cfhsi->bits);
 664	clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
 665
 666	/* Resume read operation. */
 667	if (!test_bit(CFHSI_PENDING_RX, &cfhsi->bits)) {
 668		dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n",
 669			__func__);
 670		res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr,
 671				cfhsi->rx_len, cfhsi->dev);
 672		if (WARN_ON(res < 0)) {
 673			dev_err(&cfhsi->ndev->dev, "%s: RX error %d.\n",
 674				__func__, res);
 675		}
 676	}
 677
 678	/* Clear power up acknowledment. */
 679	clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
 680
 681	spin_lock_bh(&cfhsi->lock);
 682
 683	/* Resume transmit if queue is not empty. */
 684	if (!skb_peek(&cfhsi->qhead)) {
 685		dev_dbg(&cfhsi->ndev->dev, "%s: Peer wake, start timer.\n",
 686			__func__);
 687		/* Start inactivity timer. */
 688		mod_timer(&cfhsi->timer,
 689				jiffies + CFHSI_INACTIVITY_TOUT);
 690		spin_unlock_bh(&cfhsi->lock);
 691		return;
 692	}
 693
 694	dev_dbg(&cfhsi->ndev->dev, "%s: Host wake.\n",
 695		__func__);
 696
 697	spin_unlock_bh(&cfhsi->lock);
 698
 699	/* Create HSI frame. */
 700	len = cfhsi_tx_frm((struct cfhsi_desc *)cfhsi->tx_buf, cfhsi);
 701
 702	if (likely(len > 0)) {
 703		/* Set up new transfer. */
 704		res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
 705		if (WARN_ON(res < 0)) {
 706			dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
 707				__func__, res);
 708			cfhsi_abort_tx(cfhsi);
 709		}
 710	} else {
 711		dev_err(&cfhsi->ndev->dev,
 712				"%s: Failed to create HSI frame: %d.\n",
 713				__func__, len);
 714	}
 715
 716}
 717
 718static void cfhsi_wake_down(struct work_struct *work)
 719{
 720	long ret;
 721	struct cfhsi *cfhsi = NULL;
 722	size_t fifo_occupancy;
 723
 724	cfhsi = container_of(work, struct cfhsi, wake_down_work);
 725	dev_dbg(&cfhsi->ndev->dev, "%s.\n",
 726		__func__);
 727
 728	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 729		return;
 730
 731	/* Check if there is something in FIFO. */
 732	if (WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
 733							&fifo_occupancy)))
 734		fifo_occupancy = 0;
 735
 736	if (fifo_occupancy) {
 737		dev_dbg(&cfhsi->ndev->dev,
 738				"%s: %u words in RX FIFO, restart timer.\n",
 739				__func__, (unsigned) fifo_occupancy);
 740		spin_lock_bh(&cfhsi->lock);
 741		mod_timer(&cfhsi->timer,
 742				jiffies + CFHSI_INACTIVITY_TOUT);
 743		spin_unlock_bh(&cfhsi->lock);
 744		return;
 745	}
 746
 747	/* Cancel pending RX requests */
 748	cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
 749
 750	/* Deactivate wake line. */
 751	cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
 752
 753	/* Wait for acknowledge. */
 754	ret = CFHSI_WAKEUP_TOUT;
 755	ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait,
 756					test_bit(CFHSI_WAKE_DOWN_ACK,
 757							&cfhsi->bits),
 758					ret);
 759	if (ret < 0) {
 760		/* Interrupted by signal. */
 761		dev_info(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
 762			__func__, ret);
 763		return;
 764	} else if (!ret) {
 765		/* Timeout */
 766		dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n",
 767			__func__);
 768	}
 769
 770	/* Clear power down acknowledment. */
 771	clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
 772	clear_bit(CFHSI_AWAKE, &cfhsi->bits);
 773
 774	/* Check if there is something in FIFO. */
 775	if (WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
 776							&fifo_occupancy)))
 777		fifo_occupancy = 0;
 778
 779	if (fifo_occupancy) {
 780		dev_dbg(&cfhsi->ndev->dev,
 781				"%s: %u words in RX FIFO, wakeup forced.\n",
 782				__func__, (unsigned) fifo_occupancy);
 783		if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
 784			queue_work(cfhsi->wq, &cfhsi->wake_up_work);
 785	} else
 786		dev_dbg(&cfhsi->ndev->dev, "%s: Done.\n",
 787			__func__);
 788}
 789
 790static void cfhsi_wake_up_cb(struct cfhsi_drv *drv)
 791{
 792	struct cfhsi *cfhsi = NULL;
 793
 794	cfhsi = container_of(drv, struct cfhsi, drv);
 795	dev_dbg(&cfhsi->ndev->dev, "%s.\n",
 796		__func__);
 797
 798	set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
 799	wake_up_interruptible(&cfhsi->wake_up_wait);
 800
 801	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 802		return;
 803
 804	/* Schedule wake up work queue if the peer initiates. */
 805	if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
 806		queue_work(cfhsi->wq, &cfhsi->wake_up_work);
 807}
 808
 809static void cfhsi_wake_down_cb(struct cfhsi_drv *drv)
 810{
 811	struct cfhsi *cfhsi = NULL;
 812
 813	cfhsi = container_of(drv, struct cfhsi, drv);
 814	dev_dbg(&cfhsi->ndev->dev, "%s.\n",
 815		__func__);
 816
 817	/* Initiating low power is only permitted by the host (us). */
 818	set_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
 819	wake_up_interruptible(&cfhsi->wake_down_wait);
 820}
 821
 822static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
 823{
 824	struct cfhsi *cfhsi = NULL;
 825	int start_xfer = 0;
 826	int timer_active;
 827
 828	if (!dev)
 829		return -EINVAL;
 830
 831	cfhsi = netdev_priv(dev);
 832
 833	spin_lock_bh(&cfhsi->lock);
 834
 835	skb_queue_tail(&cfhsi->qhead, skb);
 836
 837	/* Sanity check; xmit should not be called after unregister_netdev */
 838	if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) {
 839		spin_unlock_bh(&cfhsi->lock);
 840		cfhsi_abort_tx(cfhsi);
 841		return -EINVAL;
 842	}
 843
 844	/* Send flow off if number of packets is above high water mark. */
 845	if (!cfhsi->flow_off_sent &&
 846		cfhsi->qhead.qlen > cfhsi->q_high_mark &&
 847		cfhsi->cfdev.flowctrl) {
 848		cfhsi->flow_off_sent = 1;
 849		cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
 850	}
 851
 852	if (cfhsi->tx_state == CFHSI_TX_STATE_IDLE) {
 853		cfhsi->tx_state = CFHSI_TX_STATE_XFER;
 854		start_xfer = 1;
 855	}
 856
 857	spin_unlock_bh(&cfhsi->lock);
 858
 859	if (!start_xfer)
 860		return 0;
 861
 862	/* Delete inactivity timer if started. */
 863#ifdef CONFIG_SMP
 864	timer_active = del_timer_sync(&cfhsi->timer);
 865#else
 866	timer_active = del_timer(&cfhsi->timer);
 867#endif /* CONFIG_SMP */
 868
 869	if (timer_active) {
 870		struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
 871		int len;
 872		int res;
 873
 874		/* Create HSI frame. */
 875		len = cfhsi_tx_frm(desc, cfhsi);
 876		BUG_ON(!len);
 877
 878		/* Set up new transfer. */
 879		res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
 880		if (WARN_ON(res < 0)) {
 881			dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
 882				__func__, res);
 883			cfhsi_abort_tx(cfhsi);
 884		}
 885	} else {
 886		/* Schedule wake up work queue if the we initiate. */
 887		if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
 888			queue_work(cfhsi->wq, &cfhsi->wake_up_work);
 889	}
 890
 891	return 0;
 892}
 893
 894static int cfhsi_open(struct net_device *dev)
 895{
 896	netif_wake_queue(dev);
 897
 898	return 0;
 899}
 900
 901static int cfhsi_close(struct net_device *dev)
 902{
 903	netif_stop_queue(dev);
 904
 905	return 0;
 906}
 907
 908static const struct net_device_ops cfhsi_ops = {
 909	.ndo_open = cfhsi_open,
 910	.ndo_stop = cfhsi_close,
 911	.ndo_start_xmit = cfhsi_xmit
 912};
 913
 914static void cfhsi_setup(struct net_device *dev)
 915{
 916	struct cfhsi *cfhsi = netdev_priv(dev);
 917	dev->features = 0;
 918	dev->netdev_ops = &cfhsi_ops;
 919	dev->type = ARPHRD_CAIF;
 920	dev->flags = IFF_POINTOPOINT | IFF_NOARP;
 921	dev->mtu = CFHSI_MAX_PAYLOAD_SZ;
 922	dev->tx_queue_len = 0;
 923	dev->destructor = free_netdev;
 924	skb_queue_head_init(&cfhsi->qhead);
 925	cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
 926	cfhsi->cfdev.use_frag = false;
 927	cfhsi->cfdev.use_stx = false;
 928	cfhsi->cfdev.use_fcs = false;
 929	cfhsi->ndev = dev;
 930}
 931
 932int cfhsi_probe(struct platform_device *pdev)
 933{
 934	struct cfhsi *cfhsi = NULL;
 935	struct net_device *ndev;
 936	struct cfhsi_dev *dev;
 937	int res;
 938
 939	ndev = alloc_netdev(sizeof(struct cfhsi), "cfhsi%d", cfhsi_setup);
 940	if (!ndev) {
 941		dev_err(&pdev->dev, "%s: alloc_netdev failed.\n",
 942			__func__);
 943		return -ENODEV;
 944	}
 945
 946	cfhsi = netdev_priv(ndev);
 947	cfhsi->ndev = ndev;
 948	cfhsi->pdev = pdev;
 949
 950	/* Initialize state vaiables. */
 951	cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
 952	cfhsi->rx_state = CFHSI_RX_STATE_DESC;
 953
 954	/* Set flow info */
 955	cfhsi->flow_off_sent = 0;
 956	cfhsi->q_low_mark = LOW_WATER_MARK;
 957	cfhsi->q_high_mark = HIGH_WATER_MARK;
 958
 959	/* Assign the HSI device. */
 960	dev = (struct cfhsi_dev *)pdev->dev.platform_data;
 961	cfhsi->dev = dev;
 962
 963	/* Assign the driver to this HSI device. */
 964	dev->drv = &cfhsi->drv;
 965
 966	/*
 967	 * Allocate a TX buffer with the size of a HSI packet descriptors
 968	 * and the necessary room for CAIF payload frames.
 969	 */
 970	cfhsi->tx_buf = kzalloc(CFHSI_BUF_SZ_TX, GFP_KERNEL);
 971	if (!cfhsi->tx_buf) {
 972		dev_err(&ndev->dev, "%s: Failed to allocate TX buffer.\n",
 973			__func__);
 974		res = -ENODEV;
 975		goto err_alloc_tx;
 976	}
 977
 978	/*
 979	 * Allocate a RX buffer with the size of two HSI packet descriptors and
 980	 * the necessary room for CAIF payload frames.
 981	 */
 982	cfhsi->rx_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
 983	if (!cfhsi->rx_buf) {
 984		dev_err(&ndev->dev, "%s: Failed to allocate RX buffer.\n",
 985			__func__);
 986		res = -ENODEV;
 987		goto err_alloc_rx;
 988	}
 989
 990	/* Initialize receive variables. */
 991	cfhsi->rx_ptr = cfhsi->rx_buf;
 992	cfhsi->rx_len = CFHSI_DESC_SZ;
 993
 994	/* Initialize spin locks. */
 995	spin_lock_init(&cfhsi->lock);
 996
 997	/* Set up the driver. */
 998	cfhsi->drv.tx_done_cb = cfhsi_tx_done_cb;
 999	cfhsi->drv.rx_done_cb = cfhsi_rx_done_cb;
1000
1001	/* Initialize the work queues. */
1002	INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up);
1003	INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down);
1004	INIT_WORK(&cfhsi->rx_done_work, cfhsi_rx_done_work);
1005	INIT_WORK(&cfhsi->tx_done_work, cfhsi_tx_done_work);
1006
1007	/* Clear all bit fields. */
1008	clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
1009	clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
1010	clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
1011	clear_bit(CFHSI_AWAKE, &cfhsi->bits);
1012	clear_bit(CFHSI_PENDING_RX, &cfhsi->bits);
1013
1014	/* Create work thread. */
1015	cfhsi->wq = create_singlethread_workqueue(pdev->name);
1016	if (!cfhsi->wq) {
1017		dev_err(&ndev->dev, "%s: Failed to create work queue.\n",
1018			__func__);
1019		res = -ENODEV;
1020		goto err_create_wq;
1021	}
1022
1023	/* Initialize wait queues. */
1024	init_waitqueue_head(&cfhsi->wake_up_wait);
1025	init_waitqueue_head(&cfhsi->wake_down_wait);
1026	init_waitqueue_head(&cfhsi->flush_fifo_wait);
1027
1028	/* Setup the inactivity timer. */
1029	init_timer(&cfhsi->timer);
1030	cfhsi->timer.data = (unsigned long)cfhsi;
1031	cfhsi->timer.function = cfhsi_inactivity_tout;
1032
1033	/* Add CAIF HSI device to list. */
1034	spin_lock(&cfhsi_list_lock);
1035	list_add_tail(&cfhsi->list, &cfhsi_list);
1036	spin_unlock(&cfhsi_list_lock);
1037
1038	/* Activate HSI interface. */
1039	res = cfhsi->dev->cfhsi_up(cfhsi->dev);
1040	if (res) {
1041		dev_err(&cfhsi->ndev->dev,
1042			"%s: can't activate HSI interface: %d.\n",
1043			__func__, res);
1044		goto err_activate;
1045	}
1046
1047	/* Flush FIFO */
1048	res = cfhsi_flush_fifo(cfhsi);
1049	if (res) {
1050		dev_err(&ndev->dev, "%s: Can't flush FIFO: %d.\n",
1051			__func__, res);
1052		goto err_net_reg;
1053	}
1054
1055	cfhsi->drv.wake_up_cb = cfhsi_wake_up_cb;
1056	cfhsi->drv.wake_down_cb = cfhsi_wake_down_cb;
1057
1058	/* Register network device. */
1059	res = register_netdev(ndev);
1060	if (res) {
1061		dev_err(&ndev->dev, "%s: Registration error: %d.\n",
1062			__func__, res);
1063		goto err_net_reg;
1064	}
1065
1066	netif_stop_queue(ndev);
1067
1068	return res;
1069
1070 err_net_reg:
1071	cfhsi->dev->cfhsi_down(cfhsi->dev);
1072 err_activate:
1073	destroy_workqueue(cfhsi->wq);
1074 err_create_wq:
1075	kfree(cfhsi->rx_buf);
1076 err_alloc_rx:
1077	kfree(cfhsi->tx_buf);
1078 err_alloc_tx:
1079	free_netdev(ndev);
1080
1081	return res;
1082}
1083
1084static void cfhsi_shutdown(struct cfhsi *cfhsi, bool remove_platform_dev)
1085{
1086	u8 *tx_buf, *rx_buf;
1087
1088	/* Stop TXing */
1089	netif_tx_stop_all_queues(cfhsi->ndev);
1090
1091	/* going to shutdown driver */
1092	set_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
1093
1094	if (remove_platform_dev) {
1095		/* Flush workqueue */
1096		flush_workqueue(cfhsi->wq);
1097
1098		/* Notify device. */
1099		platform_device_unregister(cfhsi->pdev);
1100	}
1101
1102	/* Flush workqueue */
1103	flush_workqueue(cfhsi->wq);
1104
1105	/* Delete timer if pending */
1106#ifdef CONFIG_SMP
1107	del_timer_sync(&cfhsi->timer);
1108#else
1109	del_timer(&cfhsi->timer);
1110#endif /* CONFIG_SMP */
1111
1112	/* Cancel pending RX request (if any) */
1113	cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
1114
1115	/* Flush again and destroy workqueue */
1116	destroy_workqueue(cfhsi->wq);
1117
1118	/* Store bufferes: will be freed later. */
1119	tx_buf = cfhsi->tx_buf;
1120	rx_buf = cfhsi->rx_buf;
1121
1122	/* Flush transmit queues. */
1123	cfhsi_abort_tx(cfhsi);
1124
1125	/* Deactivate interface */
1126	cfhsi->dev->cfhsi_down(cfhsi->dev);
1127
1128	/* Finally unregister the network device. */
1129	unregister_netdev(cfhsi->ndev);
1130
1131	/* Free buffers. */
1132	kfree(tx_buf);
1133	kfree(rx_buf);
1134}
1135
1136int cfhsi_remove(struct platform_device *pdev)
1137{
1138	struct list_head *list_node;
1139	struct list_head *n;
1140	struct cfhsi *cfhsi = NULL;
1141	struct cfhsi_dev *dev;
1142
1143	dev = (struct cfhsi_dev *)pdev->dev.platform_data;
1144	spin_lock(&cfhsi_list_lock);
1145	list_for_each_safe(list_node, n, &cfhsi_list) {
1146		cfhsi = list_entry(list_node, struct cfhsi, list);
1147		/* Find the corresponding device. */
1148		if (cfhsi->dev == dev) {
1149			/* Remove from list. */
1150			list_del(list_node);
1151			spin_unlock(&cfhsi_list_lock);
1152
1153			/* Shutdown driver. */
1154			cfhsi_shutdown(cfhsi, false);
1155
1156			return 0;
1157		}
1158	}
1159	spin_unlock(&cfhsi_list_lock);
1160	return -ENODEV;
1161}
1162
1163struct platform_driver cfhsi_plat_drv = {
1164	.probe = cfhsi_probe,
1165	.remove = cfhsi_remove,
1166	.driver = {
1167		   .name = "cfhsi",
1168		   .owner = THIS_MODULE,
1169		   },
1170};
1171
1172static void __exit cfhsi_exit_module(void)
1173{
1174	struct list_head *list_node;
1175	struct list_head *n;
1176	struct cfhsi *cfhsi = NULL;
1177
1178	spin_lock(&cfhsi_list_lock);
1179	list_for_each_safe(list_node, n, &cfhsi_list) {
1180		cfhsi = list_entry(list_node, struct cfhsi, list);
1181
1182		/* Remove from list. */
1183		list_del(list_node);
1184		spin_unlock(&cfhsi_list_lock);
1185
1186		/* Shutdown driver. */
1187		cfhsi_shutdown(cfhsi, true);
1188
1189		spin_lock(&cfhsi_list_lock);
1190	}
1191	spin_unlock(&cfhsi_list_lock);
1192
1193	/* Unregister platform driver. */
1194	platform_driver_unregister(&cfhsi_plat_drv);
1195}
1196
1197static int __init cfhsi_init_module(void)
1198{
1199	int result;
1200
1201	/* Initialize spin lock. */
1202	spin_lock_init(&cfhsi_list_lock);
1203
1204	/* Register platform driver. */
1205	result = platform_driver_register(&cfhsi_plat_drv);
1206	if (result) {
1207		printk(KERN_ERR "Could not register platform HSI driver: %d.\n",
1208			result);
1209		goto err_dev_register;
1210	}
1211
1212	return result;
1213
1214 err_dev_register:
1215	return result;
1216}
1217
1218module_init(cfhsi_init_module);
1219module_exit(cfhsi_exit_module);