Linux Audio

Check our new training course

Loading...
v4.17
   1/*
   2 * Copyright (C) ST-Ericsson AB 2010
   3 * Author:  Daniel Martensson
   4 *	    Dmitry.Tarnyagin  / dmitry.tarnyagin@lockless.no
 
   5 * License terms: GNU General Public License (GPL) version 2.
   6 */
   7
   8#define pr_fmt(fmt) KBUILD_MODNAME fmt
   9
  10#include <linux/init.h>
  11#include <linux/module.h>
  12#include <linux/device.h>
 
  13#include <linux/netdevice.h>
  14#include <linux/string.h>
  15#include <linux/list.h>
  16#include <linux/interrupt.h>
  17#include <linux/delay.h>
  18#include <linux/sched.h>
  19#include <linux/if_arp.h>
  20#include <linux/timer.h>
  21#include <net/rtnetlink.h>
  22#include <linux/pkt_sched.h>
  23#include <net/caif/caif_layer.h>
  24#include <net/caif/caif_hsi.h>
  25
  26MODULE_LICENSE("GPL");
  27MODULE_AUTHOR("Daniel Martensson");
  28MODULE_DESCRIPTION("CAIF HSI driver");
  29
  30/* Returns the number of padding bytes for alignment. */
  31#define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
  32				(((pow)-((x)&((pow)-1)))))
  33
  34static const struct cfhsi_config  hsi_default_config = {
  35
  36	/* Inactivity timeout on HSI, ms */
  37	.inactivity_timeout = HZ,
  38
  39	/* Aggregation timeout (ms) of zero means no aggregation is done*/
  40	.aggregation_timeout = 1,
  41
  42	/*
  43	 * HSI link layer flow-control thresholds.
  44	 * Threshold values for the HSI packet queue. Flow-control will be
  45	 * asserted when the number of packets exceeds q_high_mark. It will
  46	 * not be de-asserted before the number of packets drops below
  47	 * q_low_mark.
  48	 * Warning: A high threshold value might increase throughput but it
  49	 * will at the same time prevent channel prioritization and increase
  50	 * the risk of flooding the modem. The high threshold should be above
  51	 * the low.
  52	 */
  53	.q_high_mark = 100,
  54	.q_low_mark = 50,
  55
  56	/*
  57	 * HSI padding options.
  58	 * Warning: must be a base of 2 (& operation used) and can not be zero !
  59	 */
  60	.head_align = 4,
  61	.tail_align = 4,
  62};
  63
  64#define ON 1
  65#define OFF 0
  66
 
 
 
 
 
 
 
 
  67static LIST_HEAD(cfhsi_list);
 
  68
  69static void cfhsi_inactivity_tout(struct timer_list *t)
  70{
  71	struct cfhsi *cfhsi = from_timer(cfhsi, t, inactivity_timer);
  72
  73	netdev_dbg(cfhsi->ndev, "%s.\n",
  74		__func__);
  75
  76	/* Schedule power down work queue. */
  77	if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
  78		queue_work(cfhsi->wq, &cfhsi->wake_down_work);
  79}
  80
  81static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi,
  82					   const struct sk_buff *skb,
  83					   int direction)
  84{
  85	struct caif_payload_info *info;
  86	int hpad, tpad, len;
  87
  88	info = (struct caif_payload_info *)&skb->cb;
  89	hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
  90	tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
  91	len = skb->len + hpad + tpad;
  92
  93	if (direction > 0)
  94		cfhsi->aggregation_len += len;
  95	else if (direction < 0)
  96		cfhsi->aggregation_len -= len;
  97}
  98
  99static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi)
 100{
 101	int i;
 102
 103	if (cfhsi->cfg.aggregation_timeout == 0)
 104		return true;
 105
 106	for (i = 0; i < CFHSI_PRIO_BEBK; ++i) {
 107		if (cfhsi->qhead[i].qlen)
 108			return true;
 109	}
 110
 111	/* TODO: Use aggregation_len instead */
 112	if (cfhsi->qhead[CFHSI_PRIO_BEBK].qlen >= CFHSI_MAX_PKTS)
 113		return true;
 114
 115	return false;
 116}
 117
 118static struct sk_buff *cfhsi_dequeue(struct cfhsi *cfhsi)
 119{
 120	struct sk_buff *skb;
 121	int i;
 122
 123	for (i = 0; i < CFHSI_PRIO_LAST; ++i) {
 124		skb = skb_dequeue(&cfhsi->qhead[i]);
 125		if (skb)
 126			break;
 127	}
 128
 129	return skb;
 130}
 131
 132static int cfhsi_tx_queue_len(struct cfhsi *cfhsi)
 133{
 134	int i, len = 0;
 135	for (i = 0; i < CFHSI_PRIO_LAST; ++i)
 136		len += skb_queue_len(&cfhsi->qhead[i]);
 137	return len;
 138}
 139
 140static void cfhsi_abort_tx(struct cfhsi *cfhsi)
 141{
 142	struct sk_buff *skb;
 143
 144	for (;;) {
 145		spin_lock_bh(&cfhsi->lock);
 146		skb = cfhsi_dequeue(cfhsi);
 147		if (!skb)
 148			break;
 149
 150		cfhsi->ndev->stats.tx_errors++;
 151		cfhsi->ndev->stats.tx_dropped++;
 152		cfhsi_update_aggregation_stats(cfhsi, skb, -1);
 153		spin_unlock_bh(&cfhsi->lock);
 154		kfree_skb(skb);
 155	}
 156	cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
 157	if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 158		mod_timer(&cfhsi->inactivity_timer,
 159			jiffies + cfhsi->cfg.inactivity_timeout);
 160	spin_unlock_bh(&cfhsi->lock);
 161}
 162
 163static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
 164{
 165	char buffer[32]; /* Any reasonable value */
 166	size_t fifo_occupancy;
 167	int ret;
 168
 169	netdev_dbg(cfhsi->ndev, "%s.\n",
 170		__func__);
 171
 
 
 
 
 
 
 
 
 
 172	do {
 173		ret = cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
 174				&fifo_occupancy);
 175		if (ret) {
 176			netdev_warn(cfhsi->ndev,
 177				"%s: can't get FIFO occupancy: %d.\n",
 178				__func__, ret);
 179			break;
 180		} else if (!fifo_occupancy)
 181			/* No more data, exitting normally */
 182			break;
 183
 184		fifo_occupancy = min(sizeof(buffer), fifo_occupancy);
 185		set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
 186		ret = cfhsi->ops->cfhsi_rx(buffer, fifo_occupancy,
 187				cfhsi->ops);
 188		if (ret) {
 189			clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
 190			netdev_warn(cfhsi->ndev,
 191				"%s: can't read data: %d.\n",
 192				__func__, ret);
 193			break;
 194		}
 195
 196		ret = 5 * HZ;
 197		ret = wait_event_interruptible_timeout(cfhsi->flush_fifo_wait,
 198			 !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);
 199
 200		if (ret < 0) {
 201			netdev_warn(cfhsi->ndev,
 202				"%s: can't wait for flush complete: %d.\n",
 203				__func__, ret);
 204			break;
 205		} else if (!ret) {
 206			ret = -ETIMEDOUT;
 207			netdev_warn(cfhsi->ndev,
 208				"%s: timeout waiting for flush complete.\n",
 209				__func__);
 210			break;
 211		}
 212	} while (1);
 213
 
 
 214	return ret;
 215}
 216
 217static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
 218{
 219	int nfrms = 0;
 220	int pld_len = 0;
 221	struct sk_buff *skb;
 222	u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
 223
 224	skb = cfhsi_dequeue(cfhsi);
 225	if (!skb)
 226		return 0;
 227
 228	/* Clear offset. */
 229	desc->offset = 0;
 230
 231	/* Check if we can embed a CAIF frame. */
 232	if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
 233		struct caif_payload_info *info;
 234		int hpad;
 235		int tpad;
 236
 237		/* Calculate needed head alignment and tail alignment. */
 238		info = (struct caif_payload_info *)&skb->cb;
 239
 240		hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
 241		tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
 242
 243		/* Check if frame still fits with added alignment. */
 244		if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) {
 245			u8 *pemb = desc->emb_frm;
 246			desc->offset = CFHSI_DESC_SHORT_SZ;
 247			*pemb = (u8)(hpad - 1);
 248			pemb += hpad;
 249
 250			/* Update network statistics. */
 251			spin_lock_bh(&cfhsi->lock);
 252			cfhsi->ndev->stats.tx_packets++;
 253			cfhsi->ndev->stats.tx_bytes += skb->len;
 254			cfhsi_update_aggregation_stats(cfhsi, skb, -1);
 255			spin_unlock_bh(&cfhsi->lock);
 256
 257			/* Copy in embedded CAIF frame. */
 258			skb_copy_bits(skb, 0, pemb, skb->len);
 259
 260			/* Consume the SKB */
 261			consume_skb(skb);
 262			skb = NULL;
 263		}
 264	}
 
 
 265
 266	/* Create payload CAIF frames. */
 
 267	while (nfrms < CFHSI_MAX_PKTS) {
 268		struct caif_payload_info *info;
 269		int hpad;
 270		int tpad;
 271
 272		if (!skb)
 273			skb = cfhsi_dequeue(cfhsi);
 274
 275		if (!skb)
 276			break;
 277
 278		/* Calculate needed head alignment and tail alignment. */
 279		info = (struct caif_payload_info *)&skb->cb;
 280
 281		hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
 282		tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
 283
 284		/* Fill in CAIF frame length in descriptor. */
 285		desc->cffrm_len[nfrms] = hpad + skb->len + tpad;
 286
 287		/* Fill head padding information. */
 288		*pfrm = (u8)(hpad - 1);
 289		pfrm += hpad;
 290
 291		/* Update network statistics. */
 292		spin_lock_bh(&cfhsi->lock);
 293		cfhsi->ndev->stats.tx_packets++;
 294		cfhsi->ndev->stats.tx_bytes += skb->len;
 295		cfhsi_update_aggregation_stats(cfhsi, skb, -1);
 296		spin_unlock_bh(&cfhsi->lock);
 297
 298		/* Copy in CAIF frame. */
 299		skb_copy_bits(skb, 0, pfrm, skb->len);
 300
 301		/* Update payload length. */
 302		pld_len += desc->cffrm_len[nfrms];
 303
 304		/* Update frame pointer. */
 305		pfrm += skb->len + tpad;
 306
 307		/* Consume the SKB */
 308		consume_skb(skb);
 309		skb = NULL;
 310
 311		/* Update number of frames. */
 312		nfrms++;
 313	}
 314
 315	/* Unused length fields should be zero-filled (according to SPEC). */
 316	while (nfrms < CFHSI_MAX_PKTS) {
 317		desc->cffrm_len[nfrms] = 0x0000;
 318		nfrms++;
 319	}
 320
 321	/* Check if we can piggy-back another descriptor. */
 322	if (cfhsi_can_send_aggregate(cfhsi))
 
 323		desc->header |= CFHSI_PIGGY_DESC;
 324	else
 325		desc->header &= ~CFHSI_PIGGY_DESC;
 326
 327	return CFHSI_DESC_SZ + pld_len;
 328}
 329
 330static void cfhsi_start_tx(struct cfhsi *cfhsi)
 331{
 332	struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
 333	int len, res;
 
 
 334
 335	netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
 
 
 336
 337	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 338		return;
 339
 
 
 340	do {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 341		/* Create HSI frame. */
 342		len = cfhsi_tx_frm(desc, cfhsi);
 343		if (!len) {
 344			spin_lock_bh(&cfhsi->lock);
 345			if (unlikely(cfhsi_tx_queue_len(cfhsi))) {
 346				spin_unlock_bh(&cfhsi->lock);
 347				res = -EAGAIN;
 348				continue;
 349			}
 350			cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
 351			/* Start inactivity timer. */
 352			mod_timer(&cfhsi->inactivity_timer,
 353				jiffies + cfhsi->cfg.inactivity_timeout);
 354			spin_unlock_bh(&cfhsi->lock);
 355			break;
 356		}
 357
 358		/* Set up new transfer. */
 359		res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
 360		if (WARN_ON(res < 0))
 361			netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
 362				__func__, res);
 
 363	} while (res < 0);
 364}
 365
 366static void cfhsi_tx_done(struct cfhsi *cfhsi)
 367{
 368	netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
 369
 370	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 371		return;
 372
 373	/*
 374	 * Send flow on if flow off has been previously signalled
 375	 * and number of packets is below low water mark.
 376	 */
 377	spin_lock_bh(&cfhsi->lock);
 378	if (cfhsi->flow_off_sent &&
 379			cfhsi_tx_queue_len(cfhsi) <= cfhsi->cfg.q_low_mark &&
 380			cfhsi->cfdev.flowctrl) {
 381
 382		cfhsi->flow_off_sent = 0;
 383		cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
 384	}
 385
 386	if (cfhsi_can_send_aggregate(cfhsi)) {
 387		spin_unlock_bh(&cfhsi->lock);
 388		cfhsi_start_tx(cfhsi);
 389	} else {
 390		mod_timer(&cfhsi->aggregation_timer,
 391			jiffies + cfhsi->cfg.aggregation_timeout);
 392		spin_unlock_bh(&cfhsi->lock);
 393	}
 394
 395	return;
 396}
 397
 398static void cfhsi_tx_done_cb(struct cfhsi_cb_ops *cb_ops)
 399{
 400	struct cfhsi *cfhsi;
 401
 402	cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
 403	netdev_dbg(cfhsi->ndev, "%s.\n",
 404		__func__);
 405
 406	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 407		return;
 408	cfhsi_tx_done(cfhsi);
 
 409}
 410
 411static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
 412{
 413	int xfer_sz = 0;
 414	int nfrms = 0;
 415	u16 *plen = NULL;
 416	u8 *pfrm = NULL;
 417
 418	if ((desc->header & ~CFHSI_PIGGY_DESC) ||
 419			(desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
 420		netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
 421			__func__);
 422		return -EPROTO;
 423	}
 424
 425	/* Check for embedded CAIF frame. */
 426	if (desc->offset) {
 427		struct sk_buff *skb;
 428		int len = 0;
 
 429		pfrm = ((u8 *)desc) + desc->offset;
 430
 431		/* Remove offset padding. */
 432		pfrm += *pfrm + 1;
 433
 434		/* Read length of CAIF frame (little endian). */
 435		len = *pfrm;
 436		len |= ((*(pfrm+1)) << 8) & 0xFF00;
 437		len += 2;	/* Add FCS fields. */
 438
 439		/* Sanity check length of CAIF frame. */
 440		if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
 441			netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
 442				__func__);
 443			return -EPROTO;
 444		}
 445
 446		/* Allocate SKB (OK even in IRQ context). */
 447		skb = alloc_skb(len + 1, GFP_ATOMIC);
 448		if (!skb) {
 449			netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
 450				__func__);
 451			return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 452		}
 453		caif_assert(skb != NULL);
 454
 455		skb_put_data(skb, pfrm, len);
 
 456
 457		skb->protocol = htons(ETH_P_CAIF);
 458		skb_reset_mac_header(skb);
 459		skb->dev = cfhsi->ndev;
 460
 461		/*
 462		 * We are in a callback handler and
 463		 * unfortunately we don't know what context we're
 464		 * running in.
 465		 */
 466		if (in_interrupt())
 467			netif_rx(skb);
 468		else
 469			netif_rx_ni(skb);
 470
 471		/* Update network statistics. */
 472		cfhsi->ndev->stats.rx_packets++;
 473		cfhsi->ndev->stats.rx_bytes += len;
 474	}
 475
 
 476	/* Calculate transfer length. */
 477	plen = desc->cffrm_len;
 478	while (nfrms < CFHSI_MAX_PKTS && *plen) {
 479		xfer_sz += *plen;
 480		plen++;
 481		nfrms++;
 482	}
 483
 484	/* Check for piggy-backed descriptor. */
 485	if (desc->header & CFHSI_PIGGY_DESC)
 486		xfer_sz += CFHSI_DESC_SZ;
 487
 488	if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) {
 489		netdev_err(cfhsi->ndev,
 490				"%s: Invalid payload len: %d, ignored.\n",
 491			__func__, xfer_sz);
 492		return -EPROTO;
 493	}
 494	return xfer_sz;
 495}
 496
 497static int cfhsi_rx_desc_len(struct cfhsi_desc *desc)
 498{
 499	int xfer_sz = 0;
 500	int nfrms = 0;
 501	u16 *plen;
 502
 503	if ((desc->header & ~CFHSI_PIGGY_DESC) ||
 504			(desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
 505
 506		pr_err("Invalid descriptor. %x %x\n", desc->header,
 507				desc->offset);
 508		return -EPROTO;
 509	}
 510
 511	/* Calculate transfer length. */
 512	plen = desc->cffrm_len;
 513	while (nfrms < CFHSI_MAX_PKTS && *plen) {
 514		xfer_sz += *plen;
 515		plen++;
 516		nfrms++;
 517	}
 518
 519	if (xfer_sz % 4) {
 520		pr_err("Invalid payload len: %d, ignored.\n", xfer_sz);
 521		return -EPROTO;
 522	}
 523	return xfer_sz;
 524}
 525
 526static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
 527{
 528	int rx_sz = 0;
 529	int nfrms = 0;
 530	u16 *plen = NULL;
 531	u8 *pfrm = NULL;
 532
 533	/* Sanity check header and offset. */
 534	if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) ||
 535			(desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
 536		netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
 537			__func__);
 538		return -EPROTO;
 539	}
 540
 541	/* Set frame pointer to start of payload. */
 542	pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
 543	plen = desc->cffrm_len;
 544
 545	/* Skip already processed frames. */
 546	while (nfrms < cfhsi->rx_state.nfrms) {
 547		pfrm += *plen;
 548		rx_sz += *plen;
 549		plen++;
 550		nfrms++;
 551	}
 552
 553	/* Parse payload. */
 554	while (nfrms < CFHSI_MAX_PKTS && *plen) {
 555		struct sk_buff *skb;
 
 556		u8 *pcffrm = NULL;
 557		int len;
 
 
 
 
 
 
 558
 559		/* CAIF frame starts after head padding. */
 560		pcffrm = pfrm + *pfrm + 1;
 561
 562		/* Read length of CAIF frame (little endian). */
 563		len = *pcffrm;
 564		len |= ((*(pcffrm + 1)) << 8) & 0xFF00;
 565		len += 2;	/* Add FCS fields. */
 566
 567		/* Sanity check length of CAIF frames. */
 568		if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
 569			netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
 570				__func__);
 571			return -EPROTO;
 572		}
 573
 574		/* Allocate SKB (OK even in IRQ context). */
 575		skb = alloc_skb(len + 1, GFP_ATOMIC);
 576		if (!skb) {
 577			netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
 578				__func__);
 579			cfhsi->rx_state.nfrms = nfrms;
 580			return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 581		}
 582		caif_assert(skb != NULL);
 583
 584		skb_put_data(skb, pcffrm, len);
 
 585
 586		skb->protocol = htons(ETH_P_CAIF);
 587		skb_reset_mac_header(skb);
 588		skb->dev = cfhsi->ndev;
 589
 590		/*
 591		 * We're called in callback from HSI
 592		 * and don't know the context we're running in.
 593		 */
 594		if (in_interrupt())
 595			netif_rx(skb);
 596		else
 597			netif_rx_ni(skb);
 598
 599		/* Update network statistics. */
 600		cfhsi->ndev->stats.rx_packets++;
 601		cfhsi->ndev->stats.rx_bytes += len;
 602
 
 603		pfrm += *plen;
 604		rx_sz += *plen;
 605		plen++;
 606		nfrms++;
 607	}
 608
 609	return rx_sz;
 610}
 611
 612static void cfhsi_rx_done(struct cfhsi *cfhsi)
 613{
 614	int res;
 615	int desc_pld_len = 0, rx_len, rx_state;
 
 616	struct cfhsi_desc *desc = NULL;
 617	u8 *rx_ptr, *rx_buf;
 618	struct cfhsi_desc *piggy_desc = NULL;
 619
 
 620	desc = (struct cfhsi_desc *)cfhsi->rx_buf;
 621
 622	netdev_dbg(cfhsi->ndev, "%s\n", __func__);
 
 623
 624	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 625		return;
 626
 627	/* Update inactivity timer if pending. */
 628	spin_lock_bh(&cfhsi->lock);
 629	mod_timer_pending(&cfhsi->inactivity_timer,
 630			jiffies + cfhsi->cfg.inactivity_timeout);
 631	spin_unlock_bh(&cfhsi->lock);
 632
 633	if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
 634		desc_pld_len = cfhsi_rx_desc_len(desc);
 635
 636		if (desc_pld_len < 0)
 637			goto out_of_sync;
 638
 639		rx_buf = cfhsi->rx_buf;
 640		rx_len = desc_pld_len;
 641		if (desc_pld_len > 0 && (desc->header & CFHSI_PIGGY_DESC))
 642			rx_len += CFHSI_DESC_SZ;
 643		if (desc_pld_len == 0)
 644			rx_buf = cfhsi->rx_flip_buf;
 645	} else {
 646		rx_buf = cfhsi->rx_flip_buf;
 647
 648		rx_len = CFHSI_DESC_SZ;
 649		if (cfhsi->rx_state.pld_len > 0 &&
 650				(desc->header & CFHSI_PIGGY_DESC)) {
 651
 
 
 652			piggy_desc = (struct cfhsi_desc *)
 653				(desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ +
 654						cfhsi->rx_state.pld_len);
 655
 656			cfhsi->rx_state.piggy_desc = true;
 657
 658			/* Extract payload len from piggy-backed descriptor. */
 659			desc_pld_len = cfhsi_rx_desc_len(piggy_desc);
 660			if (desc_pld_len < 0)
 661				goto out_of_sync;
 662
 663			if (desc_pld_len > 0) {
 664				rx_len = desc_pld_len;
 665				if (piggy_desc->header & CFHSI_PIGGY_DESC)
 666					rx_len += CFHSI_DESC_SZ;
 667			}
 668
 669			/*
 670			 * Copy needed information from the piggy-backed
 671			 * descriptor to the descriptor in the start.
 672			 */
 673			memcpy(rx_buf, (u8 *)piggy_desc,
 674					CFHSI_DESC_SHORT_SZ);
 675		}
 676	}
 677
 678	if (desc_pld_len) {
 679		rx_state = CFHSI_RX_STATE_PAYLOAD;
 680		rx_ptr = rx_buf + CFHSI_DESC_SZ;
 
 681	} else {
 682		rx_state = CFHSI_RX_STATE_DESC;
 683		rx_ptr = rx_buf;
 684		rx_len = CFHSI_DESC_SZ;
 685	}
 
 686
 687	/* Initiate next read */
 688	if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
 689		/* Set up new transfer. */
 690		netdev_dbg(cfhsi->ndev, "%s: Start RX.\n",
 691				__func__);
 692
 693		res = cfhsi->ops->cfhsi_rx(rx_ptr, rx_len,
 694				cfhsi->ops);
 695		if (WARN_ON(res < 0)) {
 696			netdev_err(cfhsi->ndev, "%s: RX error %d.\n",
 697				__func__, res);
 698			cfhsi->ndev->stats.rx_errors++;
 699			cfhsi->ndev->stats.rx_dropped++;
 700		}
 701	}
 702
 703	if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
 704		/* Extract payload from descriptor */
 705		if (cfhsi_rx_desc(desc, cfhsi) < 0)
 706			goto out_of_sync;
 707	} else {
 708		/* Extract payload */
 709		if (cfhsi_rx_pld(desc, cfhsi) < 0)
 710			goto out_of_sync;
 711		if (piggy_desc) {
 712			/* Extract any payload in piggyback descriptor. */
 713			if (cfhsi_rx_desc(piggy_desc, cfhsi) < 0)
 714				goto out_of_sync;
 715			/* Mark no embedded frame after extracting it */
 716			piggy_desc->offset = 0;
 717		}
 718	}
 719
 720	/* Update state info */
 721	memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state));
 722	cfhsi->rx_state.state = rx_state;
 723	cfhsi->rx_ptr = rx_ptr;
 724	cfhsi->rx_len = rx_len;
 725	cfhsi->rx_state.pld_len = desc_pld_len;
 726	cfhsi->rx_state.piggy_desc = desc->header & CFHSI_PIGGY_DESC;
 727
 728	if (rx_buf != cfhsi->rx_buf)
 729		swap(cfhsi->rx_buf, cfhsi->rx_flip_buf);
 730	return;
 731
 732out_of_sync:
 733	netdev_err(cfhsi->ndev, "%s: Out of sync.\n", __func__);
 734	print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE,
 735			cfhsi->rx_buf, CFHSI_DESC_SZ);
 736	schedule_work(&cfhsi->out_of_sync_work);
 737}
 738
 739static void cfhsi_rx_slowpath(struct timer_list *t)
 740{
 741	struct cfhsi *cfhsi = from_timer(cfhsi, t, rx_slowpath_timer);
 742
 743	netdev_dbg(cfhsi->ndev, "%s.\n",
 744		__func__);
 745
 746	cfhsi_rx_done(cfhsi);
 747}
 748
 749static void cfhsi_rx_done_cb(struct cfhsi_cb_ops *cb_ops)
 750{
 751	struct cfhsi *cfhsi;
 752
 753	cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
 754	netdev_dbg(cfhsi->ndev, "%s.\n",
 755		__func__);
 756
 757	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 758		return;
 759
 
 
 760	if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits))
 761		wake_up_interruptible(&cfhsi->flush_fifo_wait);
 762	else
 763		cfhsi_rx_done(cfhsi);
 764}
 765
 766static void cfhsi_wake_up(struct work_struct *work)
 767{
 768	struct cfhsi *cfhsi = NULL;
 769	int res;
 770	int len;
 771	long ret;
 772
 773	cfhsi = container_of(work, struct cfhsi, wake_up_work);
 774
 775	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 776		return;
 777
 778	if (unlikely(test_bit(CFHSI_AWAKE, &cfhsi->bits))) {
 779		/* It happenes when wakeup is requested by
 780		 * both ends at the same time. */
 781		clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
 782		clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
 783		return;
 784	}
 785
 786	/* Activate wake line. */
 787	cfhsi->ops->cfhsi_wake_up(cfhsi->ops);
 788
 789	netdev_dbg(cfhsi->ndev, "%s: Start waiting.\n",
 790		__func__);
 791
 792	/* Wait for acknowledge. */
 793	ret = CFHSI_WAKE_TOUT;
 794	ret = wait_event_interruptible_timeout(cfhsi->wake_up_wait,
 795					test_and_clear_bit(CFHSI_WAKE_UP_ACK,
 796							&cfhsi->bits), ret);
 797	if (unlikely(ret < 0)) {
 798		/* Interrupted by signal. */
 799		netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
 800			__func__, ret);
 801
 802		clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
 803		cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
 804		return;
 805	} else if (!ret) {
 806		bool ca_wake = false;
 807		size_t fifo_occupancy = 0;
 808
 809		/* Wakeup timeout */
 810		netdev_dbg(cfhsi->ndev, "%s: Timeout.\n",
 811			__func__);
 812
 813		/* Check FIFO to check if modem has sent something. */
 814		WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
 815					&fifo_occupancy));
 816
 817		netdev_dbg(cfhsi->ndev, "%s: Bytes in FIFO: %u.\n",
 818				__func__, (unsigned) fifo_occupancy);
 819
 820		/* Check if we misssed the interrupt. */
 821		WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
 822							&ca_wake));
 823
 824		if (ca_wake) {
 825			netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
 826				__func__);
 827
 828			/* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */
 829			clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
 830
 831			/* Continue execution. */
 832			goto wake_ack;
 833		}
 834
 835		clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
 836		cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
 837		return;
 838	}
 839wake_ack:
 840	netdev_dbg(cfhsi->ndev, "%s: Woken.\n",
 841		__func__);
 842
 843	/* Clear power up bit. */
 844	set_bit(CFHSI_AWAKE, &cfhsi->bits);
 845	clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
 846
 847	/* Resume read operation. */
 848	netdev_dbg(cfhsi->ndev, "%s: Start RX.\n", __func__);
 849	res = cfhsi->ops->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->ops);
 850
 851	if (WARN_ON(res < 0))
 852		netdev_err(cfhsi->ndev, "%s: RX err %d.\n", __func__, res);
 
 
 
 
 
 853
 854	/* Clear power up acknowledment. */
 855	clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
 856
 857	spin_lock_bh(&cfhsi->lock);
 858
 859	/* Resume transmit if queues are not empty. */
 860	if (!cfhsi_tx_queue_len(cfhsi)) {
 861		netdev_dbg(cfhsi->ndev, "%s: Peer wake, start timer.\n",
 862			__func__);
 863		/* Start inactivity timer. */
 864		mod_timer(&cfhsi->inactivity_timer,
 865				jiffies + cfhsi->cfg.inactivity_timeout);
 866		spin_unlock_bh(&cfhsi->lock);
 867		return;
 868	}
 869
 870	netdev_dbg(cfhsi->ndev, "%s: Host wake.\n",
 871		__func__);
 872
 873	spin_unlock_bh(&cfhsi->lock);
 874
 875	/* Create HSI frame. */
 876	len = cfhsi_tx_frm((struct cfhsi_desc *)cfhsi->tx_buf, cfhsi);
 877
 878	if (likely(len > 0)) {
 879		/* Set up new transfer. */
 880		res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
 881		if (WARN_ON(res < 0)) {
 882			netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
 883				__func__, res);
 884			cfhsi_abort_tx(cfhsi);
 885		}
 886	} else {
 887		netdev_err(cfhsi->ndev,
 888				"%s: Failed to create HSI frame: %d.\n",
 889				__func__, len);
 890	}
 
 891}
 892
 893static void cfhsi_wake_down(struct work_struct *work)
 894{
 895	long ret;
 896	struct cfhsi *cfhsi = NULL;
 897	size_t fifo_occupancy = 0;
 898	int retry = CFHSI_WAKE_TOUT;
 899
 900	cfhsi = container_of(work, struct cfhsi, wake_down_work);
 901	netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
 
 902
 903	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 904		return;
 905
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 906	/* Deactivate wake line. */
 907	cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
 908
 909	/* Wait for acknowledge. */
 910	ret = CFHSI_WAKE_TOUT;
 911	ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait,
 912					test_and_clear_bit(CFHSI_WAKE_DOWN_ACK,
 913							&cfhsi->bits), ret);
 
 914	if (ret < 0) {
 915		/* Interrupted by signal. */
 916		netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
 917			__func__, ret);
 918		return;
 919	} else if (!ret) {
 920		bool ca_wake = true;
 921
 922		/* Timeout */
 923		netdev_err(cfhsi->ndev, "%s: Timeout.\n", __func__);
 924
 925		/* Check if we misssed the interrupt. */
 926		WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
 927							&ca_wake));
 928		if (!ca_wake)
 929			netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
 930				__func__);
 931	}
 932
 933	/* Check FIFO occupancy. */
 934	while (retry) {
 935		WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
 936							&fifo_occupancy));
 937
 938		if (!fifo_occupancy)
 939			break;
 940
 941		set_current_state(TASK_INTERRUPTIBLE);
 942		schedule_timeout(1);
 943		retry--;
 944	}
 945
 946	if (!retry)
 947		netdev_err(cfhsi->ndev, "%s: FIFO Timeout.\n", __func__);
 948
 949	/* Clear AWAKE condition. */
 950	clear_bit(CFHSI_AWAKE, &cfhsi->bits);
 951
 952	/* Cancel pending RX requests. */
 953	cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
 954}
 955
 956static void cfhsi_out_of_sync(struct work_struct *work)
 957{
 958	struct cfhsi *cfhsi = NULL;
 959
 960	cfhsi = container_of(work, struct cfhsi, out_of_sync_work);
 961
 962	rtnl_lock();
 963	dev_close(cfhsi->ndev);
 964	rtnl_unlock();
 
 965}
 966
 967static void cfhsi_wake_up_cb(struct cfhsi_cb_ops *cb_ops)
 968{
 969	struct cfhsi *cfhsi = NULL;
 970
 971	cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
 972	netdev_dbg(cfhsi->ndev, "%s.\n",
 973		__func__);
 974
 975	set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
 976	wake_up_interruptible(&cfhsi->wake_up_wait);
 977
 978	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 979		return;
 980
 981	/* Schedule wake up work queue if the peer initiates. */
 982	if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
 983		queue_work(cfhsi->wq, &cfhsi->wake_up_work);
 984}
 985
 986static void cfhsi_wake_down_cb(struct cfhsi_cb_ops *cb_ops)
 987{
 988	struct cfhsi *cfhsi = NULL;
 989
 990	cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
 991	netdev_dbg(cfhsi->ndev, "%s.\n",
 992		__func__);
 993
 994	/* Initiating low power is only permitted by the host (us). */
 995	set_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
 996	wake_up_interruptible(&cfhsi->wake_down_wait);
 997}
 998
 999static void cfhsi_aggregation_tout(struct timer_list *t)
1000{
1001	struct cfhsi *cfhsi = from_timer(cfhsi, t, aggregation_timer);
1002
1003	netdev_dbg(cfhsi->ndev, "%s.\n",
1004		__func__);
1005
1006	cfhsi_start_tx(cfhsi);
1007}
1008
1009static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
1010{
1011	struct cfhsi *cfhsi = NULL;
1012	int start_xfer = 0;
1013	int timer_active;
1014	int prio;
1015
1016	if (!dev)
1017		return -EINVAL;
1018
1019	cfhsi = netdev_priv(dev);
1020
1021	switch (skb->priority) {
1022	case TC_PRIO_BESTEFFORT:
1023	case TC_PRIO_FILLER:
1024	case TC_PRIO_BULK:
1025		prio = CFHSI_PRIO_BEBK;
1026		break;
1027	case TC_PRIO_INTERACTIVE_BULK:
1028		prio = CFHSI_PRIO_VI;
1029		break;
1030	case TC_PRIO_INTERACTIVE:
1031		prio = CFHSI_PRIO_VO;
1032		break;
1033	case TC_PRIO_CONTROL:
1034	default:
1035		prio = CFHSI_PRIO_CTL;
1036		break;
1037	}
1038
1039	spin_lock_bh(&cfhsi->lock);
1040
1041	/* Update aggregation statistics  */
1042	cfhsi_update_aggregation_stats(cfhsi, skb, 1);
1043
1044	/* Queue the SKB */
1045	skb_queue_tail(&cfhsi->qhead[prio], skb);
1046
1047	/* Sanity check; xmit should not be called after unregister_netdev */
1048	if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) {
1049		spin_unlock_bh(&cfhsi->lock);
1050		cfhsi_abort_tx(cfhsi);
1051		return -EINVAL;
1052	}
1053
1054	/* Send flow off if number of packets is above high water mark. */
1055	if (!cfhsi->flow_off_sent &&
1056		cfhsi_tx_queue_len(cfhsi) > cfhsi->cfg.q_high_mark &&
1057		cfhsi->cfdev.flowctrl) {
1058		cfhsi->flow_off_sent = 1;
1059		cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
1060	}
1061
1062	if (cfhsi->tx_state == CFHSI_TX_STATE_IDLE) {
1063		cfhsi->tx_state = CFHSI_TX_STATE_XFER;
1064		start_xfer = 1;
1065	}
1066
1067	if (!start_xfer) {
1068		/* Send aggregate if it is possible */
1069		bool aggregate_ready =
1070			cfhsi_can_send_aggregate(cfhsi) &&
1071			del_timer(&cfhsi->aggregation_timer) > 0;
1072		spin_unlock_bh(&cfhsi->lock);
1073		if (aggregate_ready)
1074			cfhsi_start_tx(cfhsi);
1075		return 0;
1076	}
1077
1078	/* Delete inactivity timer if started. */
1079	timer_active = del_timer_sync(&cfhsi->inactivity_timer);
1080
1081	spin_unlock_bh(&cfhsi->lock);
 
 
1082
1083	if (timer_active) {
1084		struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
1085		int len;
1086		int res;
1087
1088		/* Create HSI frame. */
1089		len = cfhsi_tx_frm(desc, cfhsi);
1090		WARN_ON(!len);
1091
1092		/* Set up new transfer. */
1093		res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
1094		if (WARN_ON(res < 0)) {
1095			netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
1096				__func__, res);
1097			cfhsi_abort_tx(cfhsi);
1098		}
1099	} else {
1100		/* Schedule wake up work queue if the we initiate. */
1101		if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
1102			queue_work(cfhsi->wq, &cfhsi->wake_up_work);
1103	}
1104
1105	return 0;
1106}
1107
1108static const struct net_device_ops cfhsi_netdevops;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1109
1110static void cfhsi_setup(struct net_device *dev)
1111{
1112	int i;
1113	struct cfhsi *cfhsi = netdev_priv(dev);
1114	dev->features = 0;
 
1115	dev->type = ARPHRD_CAIF;
1116	dev->flags = IFF_POINTOPOINT | IFF_NOARP;
1117	dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
1118	dev->priv_flags |= IFF_NO_QUEUE;
1119	dev->needs_free_netdev = true;
1120	dev->netdev_ops = &cfhsi_netdevops;
1121	for (i = 0; i < CFHSI_PRIO_LAST; ++i)
1122		skb_queue_head_init(&cfhsi->qhead[i]);
1123	cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
1124	cfhsi->cfdev.use_frag = false;
1125	cfhsi->cfdev.use_stx = false;
1126	cfhsi->cfdev.use_fcs = false;
1127	cfhsi->ndev = dev;
1128	cfhsi->cfg = hsi_default_config;
1129}
1130
1131static int cfhsi_open(struct net_device *ndev)
1132{
1133	struct cfhsi *cfhsi = netdev_priv(ndev);
 
 
1134	int res;
1135
1136	clear_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
 
 
 
 
 
 
 
 
 
1137
1138	/* Initialize state vaiables. */
1139	cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
1140	cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
1141
1142	/* Set flow info */
1143	cfhsi->flow_off_sent = 0;
 
 
 
 
 
 
 
 
 
1144
1145	/*
1146	 * Allocate a TX buffer with the size of a HSI packet descriptors
1147	 * and the necessary room for CAIF payload frames.
1148	 */
1149	cfhsi->tx_buf = kzalloc(CFHSI_BUF_SZ_TX, GFP_KERNEL);
1150	if (!cfhsi->tx_buf) {
 
 
1151		res = -ENODEV;
1152		goto err_alloc_tx;
1153	}
1154
1155	/*
1156	 * Allocate a RX buffer with the size of two HSI packet descriptors and
1157	 * the necessary room for CAIF payload frames.
1158	 */
1159	cfhsi->rx_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
1160	if (!cfhsi->rx_buf) {
 
 
1161		res = -ENODEV;
1162		goto err_alloc_rx;
1163	}
1164
1165	cfhsi->rx_flip_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
1166	if (!cfhsi->rx_flip_buf) {
1167		res = -ENODEV;
1168		goto err_alloc_rx_flip;
1169	}
1170
1171	/* Initialize aggregation timeout */
1172	cfhsi->cfg.aggregation_timeout = hsi_default_config.aggregation_timeout;
1173
1174	/* Initialize recieve vaiables. */
1175	cfhsi->rx_ptr = cfhsi->rx_buf;
1176	cfhsi->rx_len = CFHSI_DESC_SZ;
1177
1178	/* Initialize spin locks. */
1179	spin_lock_init(&cfhsi->lock);
1180
1181	/* Set up the driver. */
1182	cfhsi->cb_ops.tx_done_cb = cfhsi_tx_done_cb;
1183	cfhsi->cb_ops.rx_done_cb = cfhsi_rx_done_cb;
1184	cfhsi->cb_ops.wake_up_cb = cfhsi_wake_up_cb;
1185	cfhsi->cb_ops.wake_down_cb = cfhsi_wake_down_cb;
1186
1187	/* Initialize the work queues. */
1188	INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up);
1189	INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down);
1190	INIT_WORK(&cfhsi->out_of_sync_work, cfhsi_out_of_sync);
 
1191
1192	/* Clear all bit fields. */
1193	clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
1194	clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
1195	clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
1196	clear_bit(CFHSI_AWAKE, &cfhsi->bits);
 
1197
1198	/* Create work thread. */
1199	cfhsi->wq = alloc_ordered_workqueue(cfhsi->ndev->name, WQ_MEM_RECLAIM);
1200	if (!cfhsi->wq) {
1201		netdev_err(cfhsi->ndev, "%s: Failed to create work queue.\n",
1202			__func__);
1203		res = -ENODEV;
1204		goto err_create_wq;
1205	}
1206
1207	/* Initialize wait queues. */
1208	init_waitqueue_head(&cfhsi->wake_up_wait);
1209	init_waitqueue_head(&cfhsi->wake_down_wait);
1210	init_waitqueue_head(&cfhsi->flush_fifo_wait);
1211
1212	/* Setup the inactivity timer. */
1213	timer_setup(&cfhsi->inactivity_timer, cfhsi_inactivity_tout, 0);
1214	/* Setup the slowpath RX timer. */
1215	timer_setup(&cfhsi->rx_slowpath_timer, cfhsi_rx_slowpath, 0);
1216	/* Setup the aggregation timer. */
1217	timer_setup(&cfhsi->aggregation_timer, cfhsi_aggregation_tout, 0);
 
 
 
1218
1219	/* Activate HSI interface. */
1220	res = cfhsi->ops->cfhsi_up(cfhsi->ops);
1221	if (res) {
1222		netdev_err(cfhsi->ndev,
1223			"%s: can't activate HSI interface: %d.\n",
1224			__func__, res);
1225		goto err_activate;
1226	}
1227
1228	/* Flush FIFO */
1229	res = cfhsi_flush_fifo(cfhsi);
1230	if (res) {
1231		netdev_err(cfhsi->ndev, "%s: Can't flush FIFO: %d.\n",
 
 
 
 
 
 
 
 
 
 
 
1232			__func__, res);
1233		goto err_net_reg;
1234	}
 
 
 
1235	return res;
1236
1237 err_net_reg:
1238	cfhsi->ops->cfhsi_down(cfhsi->ops);
1239 err_activate:
1240	destroy_workqueue(cfhsi->wq);
1241 err_create_wq:
1242	kfree(cfhsi->rx_flip_buf);
1243 err_alloc_rx_flip:
1244	kfree(cfhsi->rx_buf);
1245 err_alloc_rx:
1246	kfree(cfhsi->tx_buf);
1247 err_alloc_tx:
 
 
1248	return res;
1249}
1250
1251static int cfhsi_close(struct net_device *ndev)
1252{
1253	struct cfhsi *cfhsi = netdev_priv(ndev);
1254	u8 *tx_buf, *rx_buf, *flip_buf;
 
 
1255
1256	/* going to shutdown driver */
1257	set_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
1258
1259	/* Delete timers if pending */
1260	del_timer_sync(&cfhsi->inactivity_timer);
1261	del_timer_sync(&cfhsi->rx_slowpath_timer);
1262	del_timer_sync(&cfhsi->aggregation_timer);
 
 
 
 
 
 
 
 
 
 
 
 
 
1263
1264	/* Cancel pending RX request (if any) */
1265	cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
1266
1267	/* Destroy workqueue */
1268	destroy_workqueue(cfhsi->wq);
1269
1270	/* Store bufferes: will be freed later. */
1271	tx_buf = cfhsi->tx_buf;
1272	rx_buf = cfhsi->rx_buf;
1273	flip_buf = cfhsi->rx_flip_buf;
1274	/* Flush transmit queues. */
1275	cfhsi_abort_tx(cfhsi);
1276
1277	/* Deactivate interface */
1278	cfhsi->ops->cfhsi_down(cfhsi->ops);
 
 
 
1279
1280	/* Free buffers. */
1281	kfree(tx_buf);
1282	kfree(rx_buf);
1283	kfree(flip_buf);
1284	return 0;
1285}
1286
1287static void cfhsi_uninit(struct net_device *dev)
1288{
1289	struct cfhsi *cfhsi = netdev_priv(dev);
1290	ASSERT_RTNL();
1291	symbol_put(cfhsi_get_device);
1292	list_del(&cfhsi->list);
1293}
1294
1295static const struct net_device_ops cfhsi_netdevops = {
1296	.ndo_uninit = cfhsi_uninit,
1297	.ndo_open = cfhsi_open,
1298	.ndo_stop = cfhsi_close,
1299	.ndo_start_xmit = cfhsi_xmit
1300};
 
 
 
1301
1302static void cfhsi_netlink_parms(struct nlattr *data[], struct cfhsi *cfhsi)
1303{
1304	int i;
1305
1306	if (!data) {
1307		pr_debug("no params data found\n");
1308		return;
1309	}
1310
1311	i = __IFLA_CAIF_HSI_INACTIVITY_TOUT;
1312	/*
1313	 * Inactivity timeout in millisecs. Lowest possible value is 1,
1314	 * and highest possible is NEXT_TIMER_MAX_DELTA.
1315	 */
1316	if (data[i]) {
1317		u32 inactivity_timeout = nla_get_u32(data[i]);
1318		/* Pre-calculate inactivity timeout. */
1319		cfhsi->cfg.inactivity_timeout =	inactivity_timeout * HZ / 1000;
1320		if (cfhsi->cfg.inactivity_timeout == 0)
1321			cfhsi->cfg.inactivity_timeout = 1;
1322		else if (cfhsi->cfg.inactivity_timeout > NEXT_TIMER_MAX_DELTA)
1323			cfhsi->cfg.inactivity_timeout = NEXT_TIMER_MAX_DELTA;
1324	}
1325
1326	i = __IFLA_CAIF_HSI_AGGREGATION_TOUT;
1327	if (data[i])
1328		cfhsi->cfg.aggregation_timeout = nla_get_u32(data[i]);
1329
1330	i = __IFLA_CAIF_HSI_HEAD_ALIGN;
1331	if (data[i])
1332		cfhsi->cfg.head_align = nla_get_u32(data[i]);
1333
1334	i = __IFLA_CAIF_HSI_TAIL_ALIGN;
1335	if (data[i])
1336		cfhsi->cfg.tail_align = nla_get_u32(data[i]);
1337
1338	i = __IFLA_CAIF_HSI_QHIGH_WATERMARK;
1339	if (data[i])
1340		cfhsi->cfg.q_high_mark = nla_get_u32(data[i]);
1341
1342	i = __IFLA_CAIF_HSI_QLOW_WATERMARK;
1343	if (data[i])
1344		cfhsi->cfg.q_low_mark = nla_get_u32(data[i]);
1345}
1346
1347static int caif_hsi_changelink(struct net_device *dev, struct nlattr *tb[],
1348			       struct nlattr *data[],
1349			       struct netlink_ext_ack *extack)
1350{
1351	cfhsi_netlink_parms(data, netdev_priv(dev));
1352	netdev_state_change(dev);
1353	return 0;
1354}
1355
1356static const struct nla_policy caif_hsi_policy[__IFLA_CAIF_HSI_MAX + 1] = {
1357	[__IFLA_CAIF_HSI_INACTIVITY_TOUT] = { .type = NLA_U32, .len = 4 },
1358	[__IFLA_CAIF_HSI_AGGREGATION_TOUT] = { .type = NLA_U32, .len = 4 },
1359	[__IFLA_CAIF_HSI_HEAD_ALIGN] = { .type = NLA_U32, .len = 4 },
1360	[__IFLA_CAIF_HSI_TAIL_ALIGN] = { .type = NLA_U32, .len = 4 },
1361	[__IFLA_CAIF_HSI_QHIGH_WATERMARK] = { .type = NLA_U32, .len = 4 },
1362	[__IFLA_CAIF_HSI_QLOW_WATERMARK] = { .type = NLA_U32, .len = 4 },
1363};
1364
1365static size_t caif_hsi_get_size(const struct net_device *dev)
1366{
1367	int i;
1368	size_t s = 0;
1369	for (i = __IFLA_CAIF_HSI_UNSPEC + 1; i < __IFLA_CAIF_HSI_MAX; i++)
1370		s += nla_total_size(caif_hsi_policy[i].len);
1371	return s;
1372}
1373
1374static int caif_hsi_fill_info(struct sk_buff *skb, const struct net_device *dev)
1375{
1376	struct cfhsi *cfhsi = netdev_priv(dev);
1377
1378	if (nla_put_u32(skb, __IFLA_CAIF_HSI_INACTIVITY_TOUT,
1379			cfhsi->cfg.inactivity_timeout) ||
1380	    nla_put_u32(skb, __IFLA_CAIF_HSI_AGGREGATION_TOUT,
1381			cfhsi->cfg.aggregation_timeout) ||
1382	    nla_put_u32(skb, __IFLA_CAIF_HSI_HEAD_ALIGN,
1383			cfhsi->cfg.head_align) ||
1384	    nla_put_u32(skb, __IFLA_CAIF_HSI_TAIL_ALIGN,
1385			cfhsi->cfg.tail_align) ||
1386	    nla_put_u32(skb, __IFLA_CAIF_HSI_QHIGH_WATERMARK,
1387			cfhsi->cfg.q_high_mark) ||
1388	    nla_put_u32(skb, __IFLA_CAIF_HSI_QLOW_WATERMARK,
1389			cfhsi->cfg.q_low_mark))
1390		return -EMSGSIZE;
1391
1392	return 0;
1393}
1394
1395static int caif_hsi_newlink(struct net *src_net, struct net_device *dev,
1396			    struct nlattr *tb[], struct nlattr *data[],
1397			    struct netlink_ext_ack *extack)
1398{
 
 
1399	struct cfhsi *cfhsi = NULL;
1400	struct cfhsi_ops *(*get_ops)(void);
1401
1402	ASSERT_RTNL();
1403
1404	cfhsi = netdev_priv(dev);
1405	cfhsi_netlink_parms(data, cfhsi);
 
1406
1407	get_ops = symbol_get(cfhsi_get_ops);
1408	if (!get_ops) {
1409		pr_err("%s: failed to get the cfhsi_ops\n", __func__);
1410		return -ENODEV;
1411	}
1412
1413	/* Assign the HSI device. */
1414	cfhsi->ops = (*get_ops)();
1415	if (!cfhsi->ops) {
1416		pr_err("%s: failed to get the cfhsi_ops\n", __func__);
1417		goto err;
1418	}
1419
1420	/* Assign the driver to this HSI device. */
1421	cfhsi->ops->cb_ops = &cfhsi->cb_ops;
1422	if (register_netdevice(dev)) {
1423		pr_warn("%s: caif_hsi device registration failed\n", __func__);
1424		goto err;
1425	}
1426	/* Add CAIF HSI device to list. */
1427	list_add_tail(&cfhsi->list, &cfhsi_list);
1428
1429	return 0;
1430err:
1431	symbol_put(cfhsi_get_ops);
1432	return -ENODEV;
1433}
1434
1435static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
1436	.kind		= "cfhsi",
1437	.priv_size	= sizeof(struct cfhsi),
1438	.setup		= cfhsi_setup,
1439	.maxtype	= __IFLA_CAIF_HSI_MAX,
1440	.policy	= caif_hsi_policy,
1441	.newlink	= caif_hsi_newlink,
1442	.changelink	= caif_hsi_changelink,
1443	.get_size	= caif_hsi_get_size,
1444	.fill_info	= caif_hsi_fill_info,
1445};
1446
1447static void __exit cfhsi_exit_module(void)
1448{
1449	struct list_head *list_node;
1450	struct list_head *n;
1451	struct cfhsi *cfhsi;
1452
1453	rtnl_link_unregister(&caif_hsi_link_ops);
 
1454
1455	rtnl_lock();
1456	list_for_each_safe(list_node, n, &cfhsi_list) {
1457		cfhsi = list_entry(list_node, struct cfhsi, list);
1458		unregister_netdev(cfhsi->ndev);
 
 
1459	}
1460	rtnl_unlock();
1461}
1462
1463static int __init cfhsi_init_module(void)
1464{
1465	return rtnl_link_register(&caif_hsi_link_ops);
 
1466}
1467
1468module_init(cfhsi_init_module);
1469module_exit(cfhsi_exit_module);
v3.1
   1/*
   2 * Copyright (C) ST-Ericsson AB 2010
   3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
   4 * Author:  Daniel Martensson / daniel.martensson@stericsson.com
   5 *	    Dmitry.Tarnyagin  / dmitry.tarnyagin@stericsson.com
   6 * License terms: GNU General Public License (GPL) version 2.
   7 */
   8
 
 
   9#include <linux/init.h>
  10#include <linux/module.h>
  11#include <linux/device.h>
  12#include <linux/platform_device.h>
  13#include <linux/netdevice.h>
  14#include <linux/string.h>
  15#include <linux/list.h>
  16#include <linux/interrupt.h>
  17#include <linux/delay.h>
  18#include <linux/sched.h>
  19#include <linux/if_arp.h>
  20#include <linux/timer.h>
 
 
  21#include <net/caif/caif_layer.h>
  22#include <net/caif/caif_hsi.h>
  23
  24MODULE_LICENSE("GPL");
  25MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
  26MODULE_DESCRIPTION("CAIF HSI driver");
  27
  28/* Returns the number of padding bytes for alignment. */
  29#define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
  30				(((pow)-((x)&((pow)-1)))))
  31
  32/*
  33 * HSI padding options.
  34 * Warning: must be a base of 2 (& operation used) and can not be zero !
  35 */
  36static int hsi_head_align = 4;
  37module_param(hsi_head_align, int, S_IRUGO);
  38MODULE_PARM_DESC(hsi_head_align, "HSI head alignment.");
  39
  40static int hsi_tail_align = 4;
  41module_param(hsi_tail_align, int, S_IRUGO);
  42MODULE_PARM_DESC(hsi_tail_align, "HSI tail alignment.");
  43
  44/*
  45 * HSI link layer flowcontrol thresholds.
  46 * Warning: A high threshold value migth increase throughput but it will at
  47 * the same time prevent channel prioritization and increase the risk of
  48 * flooding the modem. The high threshold should be above the low.
  49 */
  50static int hsi_high_threshold = 100;
  51module_param(hsi_high_threshold, int, S_IRUGO);
  52MODULE_PARM_DESC(hsi_high_threshold, "HSI high threshold (FLOW OFF).");
  53
  54static int hsi_low_threshold = 50;
  55module_param(hsi_low_threshold, int, S_IRUGO);
  56MODULE_PARM_DESC(hsi_low_threshold, "HSI high threshold (FLOW ON).");
 
 
 
 
  57
  58#define ON 1
  59#define OFF 0
  60
  61/*
  62 * Threshold values for the HSI packet queue. Flowcontrol will be asserted
  63 * when the number of packets exceeds HIGH_WATER_MARK. It will not be
  64 * de-asserted before the number of packets drops below LOW_WATER_MARK.
  65 */
  66#define LOW_WATER_MARK   hsi_low_threshold
  67#define HIGH_WATER_MARK  hsi_high_threshold
  68
  69static LIST_HEAD(cfhsi_list);
  70static spinlock_t cfhsi_list_lock;
  71
  72static void cfhsi_inactivity_tout(unsigned long arg)
  73{
  74	struct cfhsi *cfhsi = (struct cfhsi *)arg;
  75
  76	dev_dbg(&cfhsi->ndev->dev, "%s.\n",
  77		__func__);
  78
  79	/* Schedule power down work queue. */
  80	if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
  81		queue_work(cfhsi->wq, &cfhsi->wake_down_work);
  82}
  83
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  84static void cfhsi_abort_tx(struct cfhsi *cfhsi)
  85{
  86	struct sk_buff *skb;
  87
  88	for (;;) {
  89		spin_lock_bh(&cfhsi->lock);
  90		skb = skb_dequeue(&cfhsi->qhead);
  91		if (!skb)
  92			break;
  93
  94		cfhsi->ndev->stats.tx_errors++;
  95		cfhsi->ndev->stats.tx_dropped++;
 
  96		spin_unlock_bh(&cfhsi->lock);
  97		kfree_skb(skb);
  98	}
  99	cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
 100	if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 101		mod_timer(&cfhsi->timer, jiffies + CFHSI_INACTIVITY_TOUT);
 
 102	spin_unlock_bh(&cfhsi->lock);
 103}
 104
 105static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
 106{
 107	char buffer[32]; /* Any reasonable value */
 108	size_t fifo_occupancy;
 109	int ret;
 110
 111	dev_dbg(&cfhsi->ndev->dev, "%s.\n",
 112		__func__);
 113
 114
 115	ret = cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
 116	if (ret) {
 117		dev_warn(&cfhsi->ndev->dev,
 118			"%s: can't wake up HSI interface: %d.\n",
 119			__func__, ret);
 120		return ret;
 121	}
 122
 123	do {
 124		ret = cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
 125				&fifo_occupancy);
 126		if (ret) {
 127			dev_warn(&cfhsi->ndev->dev,
 128				"%s: can't get FIFO occupancy: %d.\n",
 129				__func__, ret);
 130			break;
 131		} else if (!fifo_occupancy)
 132			/* No more data, exitting normally */
 133			break;
 134
 135		fifo_occupancy = min(sizeof(buffer), fifo_occupancy);
 136		set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
 137		ret = cfhsi->dev->cfhsi_rx(buffer, fifo_occupancy,
 138				cfhsi->dev);
 139		if (ret) {
 140			clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
 141			dev_warn(&cfhsi->ndev->dev,
 142				"%s: can't read data: %d.\n",
 143				__func__, ret);
 144			break;
 145		}
 146
 147		ret = 5 * HZ;
 148		wait_event_interruptible_timeout(cfhsi->flush_fifo_wait,
 149			 !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);
 150
 151		if (ret < 0) {
 152			dev_warn(&cfhsi->ndev->dev,
 153				"%s: can't wait for flush complete: %d.\n",
 154				__func__, ret);
 155			break;
 156		} else if (!ret) {
 157			ret = -ETIMEDOUT;
 158			dev_warn(&cfhsi->ndev->dev,
 159				"%s: timeout waiting for flush complete.\n",
 160				__func__);
 161			break;
 162		}
 163	} while (1);
 164
 165	cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
 166
 167	return ret;
 168}
 169
 170static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
 171{
 172	int nfrms = 0;
 173	int pld_len = 0;
 174	struct sk_buff *skb;
 175	u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
 176
 177	skb = skb_dequeue(&cfhsi->qhead);
 178	if (!skb)
 179		return 0;
 180
 
 
 
 181	/* Check if we can embed a CAIF frame. */
 182	if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
 183		struct caif_payload_info *info;
 184		int hpad = 0;
 185		int tpad = 0;
 186
 187		/* Calculate needed head alignment and tail alignment. */
 188		info = (struct caif_payload_info *)&skb->cb;
 189
 190		hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
 191		tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
 192
 193		/* Check if frame still fits with added alignment. */
 194		if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) {
 195			u8 *pemb = desc->emb_frm;
 196			desc->offset = CFHSI_DESC_SHORT_SZ;
 197			*pemb = (u8)(hpad - 1);
 198			pemb += hpad;
 199
 200			/* Update network statistics. */
 
 201			cfhsi->ndev->stats.tx_packets++;
 202			cfhsi->ndev->stats.tx_bytes += skb->len;
 
 
 203
 204			/* Copy in embedded CAIF frame. */
 205			skb_copy_bits(skb, 0, pemb, skb->len);
 
 
 206			consume_skb(skb);
 207			skb = NULL;
 208		}
 209	} else
 210		/* Clear offset. */
 211		desc->offset = 0;
 212
 213	/* Create payload CAIF frames. */
 214	pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
 215	while (nfrms < CFHSI_MAX_PKTS) {
 216		struct caif_payload_info *info;
 217		int hpad = 0;
 218		int tpad = 0;
 219
 220		if (!skb)
 221			skb = skb_dequeue(&cfhsi->qhead);
 222
 223		if (!skb)
 224			break;
 225
 226		/* Calculate needed head alignment and tail alignment. */
 227		info = (struct caif_payload_info *)&skb->cb;
 228
 229		hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
 230		tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
 231
 232		/* Fill in CAIF frame length in descriptor. */
 233		desc->cffrm_len[nfrms] = hpad + skb->len + tpad;
 234
 235		/* Fill head padding information. */
 236		*pfrm = (u8)(hpad - 1);
 237		pfrm += hpad;
 238
 239		/* Update network statistics. */
 
 240		cfhsi->ndev->stats.tx_packets++;
 241		cfhsi->ndev->stats.tx_bytes += skb->len;
 
 
 242
 243		/* Copy in CAIF frame. */
 244		skb_copy_bits(skb, 0, pfrm, skb->len);
 245
 246		/* Update payload length. */
 247		pld_len += desc->cffrm_len[nfrms];
 248
 249		/* Update frame pointer. */
 250		pfrm += skb->len + tpad;
 
 
 251		consume_skb(skb);
 252		skb = NULL;
 253
 254		/* Update number of frames. */
 255		nfrms++;
 256	}
 257
 258	/* Unused length fields should be zero-filled (according to SPEC). */
 259	while (nfrms < CFHSI_MAX_PKTS) {
 260		desc->cffrm_len[nfrms] = 0x0000;
 261		nfrms++;
 262	}
 263
 264	/* Check if we can piggy-back another descriptor. */
 265	skb = skb_peek(&cfhsi->qhead);
 266	if (skb)
 267		desc->header |= CFHSI_PIGGY_DESC;
 268	else
 269		desc->header &= ~CFHSI_PIGGY_DESC;
 270
 271	return CFHSI_DESC_SZ + pld_len;
 272}
 273
 274static void cfhsi_tx_done_work(struct work_struct *work)
 275{
 276	struct cfhsi *cfhsi = NULL;
 277	struct cfhsi_desc *desc = NULL;
 278	int len = 0;
 279	int res;
 280
 281	cfhsi = container_of(work, struct cfhsi, tx_done_work);
 282	dev_dbg(&cfhsi->ndev->dev, "%s.\n",
 283		__func__);
 284
 285	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 286		return;
 287
 288	desc = (struct cfhsi_desc *)cfhsi->tx_buf;
 289
 290	do {
 291		/*
 292		 * Send flow on if flow off has been previously signalled
 293		 * and number of packets is below low water mark.
 294		 */
 295		spin_lock_bh(&cfhsi->lock);
 296		if (cfhsi->flow_off_sent &&
 297				cfhsi->qhead.qlen <= cfhsi->q_low_mark &&
 298				cfhsi->cfdev.flowctrl) {
 299
 300			cfhsi->flow_off_sent = 0;
 301			cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
 302		}
 303		spin_unlock_bh(&cfhsi->lock);
 304
 305		/* Create HSI frame. */
 306		len = cfhsi_tx_frm(desc, cfhsi);
 307		if (!len) {
 
 
 
 
 
 
 308			cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
 309			/* Start inactivity timer. */
 310			mod_timer(&cfhsi->timer,
 311					jiffies + CFHSI_INACTIVITY_TOUT);
 
 312			break;
 313		}
 314
 315		/* Set up new transfer. */
 316		res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
 317		if (WARN_ON(res < 0)) {
 318			dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
 319				__func__, res);
 320		}
 321	} while (res < 0);
 322}
 323
 324static void cfhsi_tx_done_cb(struct cfhsi_drv *drv)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 325{
 326	struct cfhsi *cfhsi;
 327
 328	cfhsi = container_of(drv, struct cfhsi, drv);
 329	dev_dbg(&cfhsi->ndev->dev, "%s.\n",
 330		__func__);
 331
 332	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 333		return;
 334
 335	queue_work(cfhsi->wq, &cfhsi->tx_done_work);
 336}
 337
 338static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
 339{
 340	int xfer_sz = 0;
 341	int nfrms = 0;
 342	u16 *plen = NULL;
 343	u8 *pfrm = NULL;
 344
 345	if ((desc->header & ~CFHSI_PIGGY_DESC) ||
 346			(desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
 347		dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
 348			__func__);
 349		return 0;
 350	}
 351
 352	/* Check for embedded CAIF frame. */
 353	if (desc->offset) {
 354		struct sk_buff *skb;
 355		u8 *dst = NULL;
 356		int len = 0, retries = 0;
 357		pfrm = ((u8 *)desc) + desc->offset;
 358
 359		/* Remove offset padding. */
 360		pfrm += *pfrm + 1;
 361
 362		/* Read length of CAIF frame (little endian). */
 363		len = *pfrm;
 364		len |= ((*(pfrm+1)) << 8) & 0xFF00;
 365		len += 2;	/* Add FCS fields. */
 366
 
 
 
 
 
 
 367
 368		/* Allocate SKB (OK even in IRQ context). */
 369		skb = alloc_skb(len + 1, GFP_KERNEL);
 370		while (!skb) {
 371			retries++;
 372			schedule_timeout(1);
 373			skb = alloc_skb(len + 1, GFP_KERNEL);
 374			if (skb) {
 375				printk(KERN_WARNING "%s: slept for %u "
 376						"before getting memory\n",
 377						__func__, retries);
 378				break;
 379			}
 380			if (retries > HZ) {
 381				printk(KERN_ERR "%s: slept for 1HZ and "
 382						"did not get memory\n",
 383						__func__);
 384				cfhsi->ndev->stats.rx_dropped++;
 385				goto drop_frame;
 386			}
 387		}
 388		caif_assert(skb != NULL);
 389
 390		dst = skb_put(skb, len);
 391		memcpy(dst, pfrm, len);
 392
 393		skb->protocol = htons(ETH_P_CAIF);
 394		skb_reset_mac_header(skb);
 395		skb->dev = cfhsi->ndev;
 396
 397		/*
 398		 * We are called from a arch specific platform device.
 399		 * Unfortunately we don't know what context we're
 400		 * running in.
 401		 */
 402		if (in_interrupt())
 403			netif_rx(skb);
 404		else
 405			netif_rx_ni(skb);
 406
 407		/* Update network statistics. */
 408		cfhsi->ndev->stats.rx_packets++;
 409		cfhsi->ndev->stats.rx_bytes += len;
 410	}
 411
 412drop_frame:
 413	/* Calculate transfer length. */
 414	plen = desc->cffrm_len;
 415	while (nfrms < CFHSI_MAX_PKTS && *plen) {
 416		xfer_sz += *plen;
 417		plen++;
 418		nfrms++;
 419	}
 420
 421	/* Check for piggy-backed descriptor. */
 422	if (desc->header & CFHSI_PIGGY_DESC)
 423		xfer_sz += CFHSI_DESC_SZ;
 424
 425	if (xfer_sz % 4) {
 426		dev_err(&cfhsi->ndev->dev,
 427				"%s: Invalid payload len: %d, ignored.\n",
 428			__func__, xfer_sz);
 429		xfer_sz = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 430	}
 431
 
 
 
 
 432	return xfer_sz;
 433}
 434
 435static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
 436{
 437	int rx_sz = 0;
 438	int nfrms = 0;
 439	u16 *plen = NULL;
 440	u8 *pfrm = NULL;
 441
 442	/* Sanity check header and offset. */
 443	if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) ||
 444			(desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
 445		dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
 446			__func__);
 447		return -EINVAL;
 448	}
 449
 450	/* Set frame pointer to start of payload. */
 451	pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
 452	plen = desc->cffrm_len;
 
 
 
 
 
 
 
 
 
 
 453	while (nfrms < CFHSI_MAX_PKTS && *plen) {
 454		struct sk_buff *skb;
 455		u8 *dst = NULL;
 456		u8 *pcffrm = NULL;
 457		int len = 0, retries = 0;
 458
 459		if (WARN_ON(desc->cffrm_len[nfrms] > CFHSI_MAX_PAYLOAD_SZ)) {
 460			dev_err(&cfhsi->ndev->dev, "%s: Invalid payload.\n",
 461				__func__);
 462			return -EINVAL;
 463		}
 464
 465		/* CAIF frame starts after head padding. */
 466		pcffrm = pfrm + *pfrm + 1;
 467
 468		/* Read length of CAIF frame (little endian). */
 469		len = *pcffrm;
 470		len |= ((*(pcffrm + 1)) << 8) & 0xFF00;
 471		len += 2;	/* Add FCS fields. */
 472
 
 
 
 
 
 
 
 473		/* Allocate SKB (OK even in IRQ context). */
 474		skb = alloc_skb(len + 1, GFP_KERNEL);
 475		while (!skb) {
 476			retries++;
 477			schedule_timeout(1);
 478			skb = alloc_skb(len + 1, GFP_KERNEL);
 479			if (skb) {
 480				printk(KERN_WARNING "%s: slept for %u "
 481						"before getting memory\n",
 482						__func__, retries);
 483				break;
 484			}
 485			if (retries > HZ) {
 486				printk(KERN_ERR "%s: slept for 1HZ "
 487						"and did not get memory\n",
 488						__func__);
 489				cfhsi->ndev->stats.rx_dropped++;
 490				goto drop_frame;
 491			}
 492		}
 493		caif_assert(skb != NULL);
 494
 495		dst = skb_put(skb, len);
 496		memcpy(dst, pcffrm, len);
 497
 498		skb->protocol = htons(ETH_P_CAIF);
 499		skb_reset_mac_header(skb);
 500		skb->dev = cfhsi->ndev;
 501
 502		/*
 503		 * We're called from a platform device,
 504		 * and don't know the context we're running in.
 505		 */
 506		if (in_interrupt())
 507			netif_rx(skb);
 508		else
 509			netif_rx_ni(skb);
 510
 511		/* Update network statistics. */
 512		cfhsi->ndev->stats.rx_packets++;
 513		cfhsi->ndev->stats.rx_bytes += len;
 514
 515drop_frame:
 516		pfrm += *plen;
 517		rx_sz += *plen;
 518		plen++;
 519		nfrms++;
 520	}
 521
 522	return rx_sz;
 523}
 524
 525static void cfhsi_rx_done_work(struct work_struct *work)
 526{
 527	int res;
 528	int desc_pld_len = 0;
 529	struct cfhsi *cfhsi = NULL;
 530	struct cfhsi_desc *desc = NULL;
 
 
 531
 532	cfhsi = container_of(work, struct cfhsi, rx_done_work);
 533	desc = (struct cfhsi_desc *)cfhsi->rx_buf;
 534
 535	dev_dbg(&cfhsi->ndev->dev, "%s: Kick timer if pending.\n",
 536		__func__);
 537
 538	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 539		return;
 540
 541	/* Update inactivity timer if pending. */
 542	mod_timer_pending(&cfhsi->timer, jiffies + CFHSI_INACTIVITY_TOUT);
 
 
 
 
 
 
 
 
 
 543
 544	if (cfhsi->rx_state == CFHSI_RX_STATE_DESC) {
 545		desc_pld_len = cfhsi_rx_desc(desc, cfhsi);
 
 
 
 
 546	} else {
 547		int pld_len;
 548
 549		pld_len = cfhsi_rx_pld(desc, cfhsi);
 
 
 550
 551		if ((pld_len > 0) && (desc->header & CFHSI_PIGGY_DESC)) {
 552			struct cfhsi_desc *piggy_desc;
 553			piggy_desc = (struct cfhsi_desc *)
 554				(desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ +
 555						pld_len);
 556
 557			/* Extract piggy-backed descriptor. */
 558			desc_pld_len = cfhsi_rx_desc(piggy_desc, cfhsi);
 
 
 
 
 
 
 
 
 
 
 559
 560			/*
 561			 * Copy needed information from the piggy-backed
 562			 * descriptor to the descriptor in the start.
 563			 */
 564			memcpy((u8 *)desc, (u8 *)piggy_desc,
 565					CFHSI_DESC_SHORT_SZ);
 566		}
 567	}
 568
 569	if (desc_pld_len) {
 570		cfhsi->rx_state = CFHSI_RX_STATE_PAYLOAD;
 571		cfhsi->rx_ptr = cfhsi->rx_buf + CFHSI_DESC_SZ;
 572		cfhsi->rx_len = desc_pld_len;
 573	} else {
 574		cfhsi->rx_state = CFHSI_RX_STATE_DESC;
 575		cfhsi->rx_ptr = cfhsi->rx_buf;
 576		cfhsi->rx_len = CFHSI_DESC_SZ;
 577	}
 578	clear_bit(CFHSI_PENDING_RX, &cfhsi->bits);
 579
 
 580	if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
 581		/* Set up new transfer. */
 582		dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n",
 583			__func__);
 584		res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len,
 585				cfhsi->dev);
 
 586		if (WARN_ON(res < 0)) {
 587			dev_err(&cfhsi->ndev->dev, "%s: RX error %d.\n",
 588				__func__, res);
 589			cfhsi->ndev->stats.rx_errors++;
 590			cfhsi->ndev->stats.rx_dropped++;
 591		}
 592	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 593}
 594
 595static void cfhsi_rx_done_cb(struct cfhsi_drv *drv)
 596{
 597	struct cfhsi *cfhsi;
 598
 599	cfhsi = container_of(drv, struct cfhsi, drv);
 600	dev_dbg(&cfhsi->ndev->dev, "%s.\n",
 601		__func__);
 602
 603	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 604		return;
 605
 606	set_bit(CFHSI_PENDING_RX, &cfhsi->bits);
 607
 608	if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits))
 609		wake_up_interruptible(&cfhsi->flush_fifo_wait);
 610	else
 611		queue_work(cfhsi->wq, &cfhsi->rx_done_work);
 612}
 613
 614static void cfhsi_wake_up(struct work_struct *work)
 615{
 616	struct cfhsi *cfhsi = NULL;
 617	int res;
 618	int len;
 619	long ret;
 620
 621	cfhsi = container_of(work, struct cfhsi, wake_up_work);
 622
 623	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 624		return;
 625
 626	if (unlikely(test_bit(CFHSI_AWAKE, &cfhsi->bits))) {
 627		/* It happenes when wakeup is requested by
 628		 * both ends at the same time. */
 629		clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
 
 630		return;
 631	}
 632
 633	/* Activate wake line. */
 634	cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
 635
 636	dev_dbg(&cfhsi->ndev->dev, "%s: Start waiting.\n",
 637		__func__);
 638
 639	/* Wait for acknowledge. */
 640	ret = CFHSI_WAKEUP_TOUT;
 641	wait_event_interruptible_timeout(cfhsi->wake_up_wait,
 642					test_bit(CFHSI_WAKE_UP_ACK,
 643							&cfhsi->bits), ret);
 644	if (unlikely(ret < 0)) {
 645		/* Interrupted by signal. */
 646		dev_info(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
 647			__func__, ret);
 
 648		clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
 649		cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
 650		return;
 651	} else if (!ret) {
 
 
 
 652		/* Wakeup timeout */
 653		dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n",
 654			__func__);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 655		clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
 656		cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
 657		return;
 658	}
 659	dev_dbg(&cfhsi->ndev->dev, "%s: Woken.\n",
 
 660		__func__);
 661
 662	/* Clear power up bit. */
 663	set_bit(CFHSI_AWAKE, &cfhsi->bits);
 664	clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
 665
 666	/* Resume read operation. */
 667	if (!test_bit(CFHSI_PENDING_RX, &cfhsi->bits)) {
 668		dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n",
 669			__func__);
 670		res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr,
 671				cfhsi->rx_len, cfhsi->dev);
 672		if (WARN_ON(res < 0)) {
 673			dev_err(&cfhsi->ndev->dev, "%s: RX error %d.\n",
 674				__func__, res);
 675		}
 676	}
 677
 678	/* Clear power up acknowledment. */
 679	clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
 680
 681	spin_lock_bh(&cfhsi->lock);
 682
 683	/* Resume transmit if queue is not empty. */
 684	if (!skb_peek(&cfhsi->qhead)) {
 685		dev_dbg(&cfhsi->ndev->dev, "%s: Peer wake, start timer.\n",
 686			__func__);
 687		/* Start inactivity timer. */
 688		mod_timer(&cfhsi->timer,
 689				jiffies + CFHSI_INACTIVITY_TOUT);
 690		spin_unlock_bh(&cfhsi->lock);
 691		return;
 692	}
 693
 694	dev_dbg(&cfhsi->ndev->dev, "%s: Host wake.\n",
 695		__func__);
 696
 697	spin_unlock_bh(&cfhsi->lock);
 698
 699	/* Create HSI frame. */
 700	len = cfhsi_tx_frm((struct cfhsi_desc *)cfhsi->tx_buf, cfhsi);
 701
 702	if (likely(len > 0)) {
 703		/* Set up new transfer. */
 704		res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
 705		if (WARN_ON(res < 0)) {
 706			dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
 707				__func__, res);
 708			cfhsi_abort_tx(cfhsi);
 709		}
 710	} else {
 711		dev_err(&cfhsi->ndev->dev,
 712				"%s: Failed to create HSI frame: %d.\n",
 713				__func__, len);
 714	}
 715
 716}
 717
 718static void cfhsi_wake_down(struct work_struct *work)
 719{
 720	long ret;
 721	struct cfhsi *cfhsi = NULL;
 722	size_t fifo_occupancy;
 
 723
 724	cfhsi = container_of(work, struct cfhsi, wake_down_work);
 725	dev_dbg(&cfhsi->ndev->dev, "%s.\n",
 726		__func__);
 727
 728	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 729		return;
 730
 731	/* Check if there is something in FIFO. */
 732	if (WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
 733							&fifo_occupancy)))
 734		fifo_occupancy = 0;
 735
 736	if (fifo_occupancy) {
 737		dev_dbg(&cfhsi->ndev->dev,
 738				"%s: %u words in RX FIFO, restart timer.\n",
 739				__func__, (unsigned) fifo_occupancy);
 740		spin_lock_bh(&cfhsi->lock);
 741		mod_timer(&cfhsi->timer,
 742				jiffies + CFHSI_INACTIVITY_TOUT);
 743		spin_unlock_bh(&cfhsi->lock);
 744		return;
 745	}
 746
 747	/* Cancel pending RX requests */
 748	cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
 749
 750	/* Deactivate wake line. */
 751	cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
 752
 753	/* Wait for acknowledge. */
 754	ret = CFHSI_WAKEUP_TOUT;
 755	ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait,
 756					test_bit(CFHSI_WAKE_DOWN_ACK,
 757							&cfhsi->bits),
 758					ret);
 759	if (ret < 0) {
 760		/* Interrupted by signal. */
 761		dev_info(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
 762			__func__, ret);
 763		return;
 764	} else if (!ret) {
 
 
 765		/* Timeout */
 766		dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n",
 767			__func__);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 768	}
 769
 770	/* Clear power down acknowledment. */
 771	clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
 
 
 772	clear_bit(CFHSI_AWAKE, &cfhsi->bits);
 773
 774	/* Check if there is something in FIFO. */
 775	if (WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
 776							&fifo_occupancy)))
 777		fifo_occupancy = 0;
 778
 779	if (fifo_occupancy) {
 780		dev_dbg(&cfhsi->ndev->dev,
 781				"%s: %u words in RX FIFO, wakeup forced.\n",
 782				__func__, (unsigned) fifo_occupancy);
 783		if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
 784			queue_work(cfhsi->wq, &cfhsi->wake_up_work);
 785	} else
 786		dev_dbg(&cfhsi->ndev->dev, "%s: Done.\n",
 787			__func__);
 788}
 789
 790static void cfhsi_wake_up_cb(struct cfhsi_drv *drv)
 791{
 792	struct cfhsi *cfhsi = NULL;
 793
 794	cfhsi = container_of(drv, struct cfhsi, drv);
 795	dev_dbg(&cfhsi->ndev->dev, "%s.\n",
 796		__func__);
 797
 798	set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
 799	wake_up_interruptible(&cfhsi->wake_up_wait);
 800
 801	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 802		return;
 803
 804	/* Schedule wake up work queue if the peer initiates. */
 805	if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
 806		queue_work(cfhsi->wq, &cfhsi->wake_up_work);
 807}
 808
 809static void cfhsi_wake_down_cb(struct cfhsi_drv *drv)
 810{
 811	struct cfhsi *cfhsi = NULL;
 812
 813	cfhsi = container_of(drv, struct cfhsi, drv);
 814	dev_dbg(&cfhsi->ndev->dev, "%s.\n",
 815		__func__);
 816
 817	/* Initiating low power is only permitted by the host (us). */
 818	set_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
 819	wake_up_interruptible(&cfhsi->wake_down_wait);
 820}
 821
 
 
 
 
 
 
 
 
 
 
 822static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
 823{
 824	struct cfhsi *cfhsi = NULL;
 825	int start_xfer = 0;
 826	int timer_active;
 
 827
 828	if (!dev)
 829		return -EINVAL;
 830
 831	cfhsi = netdev_priv(dev);
 832
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 833	spin_lock_bh(&cfhsi->lock);
 834
 835	skb_queue_tail(&cfhsi->qhead, skb);
 
 
 
 
 836
 837	/* Sanity check; xmit should not be called after unregister_netdev */
 838	if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) {
 839		spin_unlock_bh(&cfhsi->lock);
 840		cfhsi_abort_tx(cfhsi);
 841		return -EINVAL;
 842	}
 843
 844	/* Send flow off if number of packets is above high water mark. */
 845	if (!cfhsi->flow_off_sent &&
 846		cfhsi->qhead.qlen > cfhsi->q_high_mark &&
 847		cfhsi->cfdev.flowctrl) {
 848		cfhsi->flow_off_sent = 1;
 849		cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
 850	}
 851
 852	if (cfhsi->tx_state == CFHSI_TX_STATE_IDLE) {
 853		cfhsi->tx_state = CFHSI_TX_STATE_XFER;
 854		start_xfer = 1;
 855	}
 856
 857	spin_unlock_bh(&cfhsi->lock);
 858
 859	if (!start_xfer)
 
 
 
 
 
 860		return 0;
 
 861
 862	/* Delete inactivity timer if started. */
 863#ifdef CONFIG_SMP
 864	timer_active = del_timer_sync(&cfhsi->timer);
 865#else
 866	timer_active = del_timer(&cfhsi->timer);
 867#endif /* CONFIG_SMP */
 868
 869	if (timer_active) {
 870		struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
 871		int len;
 872		int res;
 873
 874		/* Create HSI frame. */
 875		len = cfhsi_tx_frm(desc, cfhsi);
 876		BUG_ON(!len);
 877
 878		/* Set up new transfer. */
 879		res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
 880		if (WARN_ON(res < 0)) {
 881			dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
 882				__func__, res);
 883			cfhsi_abort_tx(cfhsi);
 884		}
 885	} else {
 886		/* Schedule wake up work queue if the we initiate. */
 887		if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
 888			queue_work(cfhsi->wq, &cfhsi->wake_up_work);
 889	}
 890
 891	return 0;
 892}
 893
 894static int cfhsi_open(struct net_device *dev)
 895{
 896	netif_wake_queue(dev);
 897
 898	return 0;
 899}
 900
 901static int cfhsi_close(struct net_device *dev)
 902{
 903	netif_stop_queue(dev);
 904
 905	return 0;
 906}
 907
 908static const struct net_device_ops cfhsi_ops = {
 909	.ndo_open = cfhsi_open,
 910	.ndo_stop = cfhsi_close,
 911	.ndo_start_xmit = cfhsi_xmit
 912};
 913
 914static void cfhsi_setup(struct net_device *dev)
 915{
 
 916	struct cfhsi *cfhsi = netdev_priv(dev);
 917	dev->features = 0;
 918	dev->netdev_ops = &cfhsi_ops;
 919	dev->type = ARPHRD_CAIF;
 920	dev->flags = IFF_POINTOPOINT | IFF_NOARP;
 921	dev->mtu = CFHSI_MAX_PAYLOAD_SZ;
 922	dev->tx_queue_len = 0;
 923	dev->destructor = free_netdev;
 924	skb_queue_head_init(&cfhsi->qhead);
 
 
 925	cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
 926	cfhsi->cfdev.use_frag = false;
 927	cfhsi->cfdev.use_stx = false;
 928	cfhsi->cfdev.use_fcs = false;
 929	cfhsi->ndev = dev;
 
 930}
 931
 932int cfhsi_probe(struct platform_device *pdev)
 933{
 934	struct cfhsi *cfhsi = NULL;
 935	struct net_device *ndev;
 936	struct cfhsi_dev *dev;
 937	int res;
 938
 939	ndev = alloc_netdev(sizeof(struct cfhsi), "cfhsi%d", cfhsi_setup);
 940	if (!ndev) {
 941		dev_err(&pdev->dev, "%s: alloc_netdev failed.\n",
 942			__func__);
 943		return -ENODEV;
 944	}
 945
 946	cfhsi = netdev_priv(ndev);
 947	cfhsi->ndev = ndev;
 948	cfhsi->pdev = pdev;
 949
 950	/* Initialize state vaiables. */
 951	cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
 952	cfhsi->rx_state = CFHSI_RX_STATE_DESC;
 953
 954	/* Set flow info */
 955	cfhsi->flow_off_sent = 0;
 956	cfhsi->q_low_mark = LOW_WATER_MARK;
 957	cfhsi->q_high_mark = HIGH_WATER_MARK;
 958
 959	/* Assign the HSI device. */
 960	dev = (struct cfhsi_dev *)pdev->dev.platform_data;
 961	cfhsi->dev = dev;
 962
 963	/* Assign the driver to this HSI device. */
 964	dev->drv = &cfhsi->drv;
 965
 966	/*
 967	 * Allocate a TX buffer with the size of a HSI packet descriptors
 968	 * and the necessary room for CAIF payload frames.
 969	 */
 970	cfhsi->tx_buf = kzalloc(CFHSI_BUF_SZ_TX, GFP_KERNEL);
 971	if (!cfhsi->tx_buf) {
 972		dev_err(&ndev->dev, "%s: Failed to allocate TX buffer.\n",
 973			__func__);
 974		res = -ENODEV;
 975		goto err_alloc_tx;
 976	}
 977
 978	/*
 979	 * Allocate a RX buffer with the size of two HSI packet descriptors and
 980	 * the necessary room for CAIF payload frames.
 981	 */
 982	cfhsi->rx_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
 983	if (!cfhsi->rx_buf) {
 984		dev_err(&ndev->dev, "%s: Failed to allocate RX buffer.\n",
 985			__func__);
 986		res = -ENODEV;
 987		goto err_alloc_rx;
 988	}
 989
 990	/* Initialize receive variables. */
 
 
 
 
 
 
 
 
 
 991	cfhsi->rx_ptr = cfhsi->rx_buf;
 992	cfhsi->rx_len = CFHSI_DESC_SZ;
 993
 994	/* Initialize spin locks. */
 995	spin_lock_init(&cfhsi->lock);
 996
 997	/* Set up the driver. */
 998	cfhsi->drv.tx_done_cb = cfhsi_tx_done_cb;
 999	cfhsi->drv.rx_done_cb = cfhsi_rx_done_cb;
 
 
1000
1001	/* Initialize the work queues. */
1002	INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up);
1003	INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down);
1004	INIT_WORK(&cfhsi->rx_done_work, cfhsi_rx_done_work);
1005	INIT_WORK(&cfhsi->tx_done_work, cfhsi_tx_done_work);
1006
1007	/* Clear all bit fields. */
1008	clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
1009	clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
1010	clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
1011	clear_bit(CFHSI_AWAKE, &cfhsi->bits);
1012	clear_bit(CFHSI_PENDING_RX, &cfhsi->bits);
1013
1014	/* Create work thread. */
1015	cfhsi->wq = create_singlethread_workqueue(pdev->name);
1016	if (!cfhsi->wq) {
1017		dev_err(&ndev->dev, "%s: Failed to create work queue.\n",
1018			__func__);
1019		res = -ENODEV;
1020		goto err_create_wq;
1021	}
1022
1023	/* Initialize wait queues. */
1024	init_waitqueue_head(&cfhsi->wake_up_wait);
1025	init_waitqueue_head(&cfhsi->wake_down_wait);
1026	init_waitqueue_head(&cfhsi->flush_fifo_wait);
1027
1028	/* Setup the inactivity timer. */
1029	init_timer(&cfhsi->timer);
1030	cfhsi->timer.data = (unsigned long)cfhsi;
1031	cfhsi->timer.function = cfhsi_inactivity_tout;
1032
1033	/* Add CAIF HSI device to list. */
1034	spin_lock(&cfhsi_list_lock);
1035	list_add_tail(&cfhsi->list, &cfhsi_list);
1036	spin_unlock(&cfhsi_list_lock);
1037
1038	/* Activate HSI interface. */
1039	res = cfhsi->dev->cfhsi_up(cfhsi->dev);
1040	if (res) {
1041		dev_err(&cfhsi->ndev->dev,
1042			"%s: can't activate HSI interface: %d.\n",
1043			__func__, res);
1044		goto err_activate;
1045	}
1046
1047	/* Flush FIFO */
1048	res = cfhsi_flush_fifo(cfhsi);
1049	if (res) {
1050		dev_err(&ndev->dev, "%s: Can't flush FIFO: %d.\n",
1051			__func__, res);
1052		goto err_net_reg;
1053	}
1054
1055	cfhsi->drv.wake_up_cb = cfhsi_wake_up_cb;
1056	cfhsi->drv.wake_down_cb = cfhsi_wake_down_cb;
1057
1058	/* Register network device. */
1059	res = register_netdev(ndev);
1060	if (res) {
1061		dev_err(&ndev->dev, "%s: Registration error: %d.\n",
1062			__func__, res);
1063		goto err_net_reg;
1064	}
1065
1066	netif_stop_queue(ndev);
1067
1068	return res;
1069
1070 err_net_reg:
1071	cfhsi->dev->cfhsi_down(cfhsi->dev);
1072 err_activate:
1073	destroy_workqueue(cfhsi->wq);
1074 err_create_wq:
 
 
1075	kfree(cfhsi->rx_buf);
1076 err_alloc_rx:
1077	kfree(cfhsi->tx_buf);
1078 err_alloc_tx:
1079	free_netdev(ndev);
1080
1081	return res;
1082}
1083
1084static void cfhsi_shutdown(struct cfhsi *cfhsi, bool remove_platform_dev)
1085{
1086	u8 *tx_buf, *rx_buf;
1087
1088	/* Stop TXing */
1089	netif_tx_stop_all_queues(cfhsi->ndev);
1090
1091	/* going to shutdown driver */
1092	set_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
1093
1094	if (remove_platform_dev) {
1095		/* Flush workqueue */
1096		flush_workqueue(cfhsi->wq);
1097
1098		/* Notify device. */
1099		platform_device_unregister(cfhsi->pdev);
1100	}
1101
1102	/* Flush workqueue */
1103	flush_workqueue(cfhsi->wq);
1104
1105	/* Delete timer if pending */
1106#ifdef CONFIG_SMP
1107	del_timer_sync(&cfhsi->timer);
1108#else
1109	del_timer(&cfhsi->timer);
1110#endif /* CONFIG_SMP */
1111
1112	/* Cancel pending RX request (if any) */
1113	cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
1114
1115	/* Flush again and destroy workqueue */
1116	destroy_workqueue(cfhsi->wq);
1117
1118	/* Store bufferes: will be freed later. */
1119	tx_buf = cfhsi->tx_buf;
1120	rx_buf = cfhsi->rx_buf;
1121
1122	/* Flush transmit queues. */
1123	cfhsi_abort_tx(cfhsi);
1124
1125	/* Deactivate interface */
1126	cfhsi->dev->cfhsi_down(cfhsi->dev);
1127
1128	/* Finally unregister the network device. */
1129	unregister_netdev(cfhsi->ndev);
1130
1131	/* Free buffers. */
1132	kfree(tx_buf);
1133	kfree(rx_buf);
 
 
1134}
1135
1136int cfhsi_remove(struct platform_device *pdev)
1137{
1138	struct list_head *list_node;
1139	struct list_head *n;
1140	struct cfhsi *cfhsi = NULL;
1141	struct cfhsi_dev *dev;
 
1142
1143	dev = (struct cfhsi_dev *)pdev->dev.platform_data;
1144	spin_lock(&cfhsi_list_lock);
1145	list_for_each_safe(list_node, n, &cfhsi_list) {
1146		cfhsi = list_entry(list_node, struct cfhsi, list);
1147		/* Find the corresponding device. */
1148		if (cfhsi->dev == dev) {
1149			/* Remove from list. */
1150			list_del(list_node);
1151			spin_unlock(&cfhsi_list_lock);
1152
1153			/* Shutdown driver. */
1154			cfhsi_shutdown(cfhsi, false);
 
1155
1156			return 0;
1157		}
 
1158	}
1159	spin_unlock(&cfhsi_list_lock);
1160	return -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1161}
1162
1163struct platform_driver cfhsi_plat_drv = {
1164	.probe = cfhsi_probe,
1165	.remove = cfhsi_remove,
1166	.driver = {
1167		   .name = "cfhsi",
1168		   .owner = THIS_MODULE,
1169		   },
1170};
1171
1172static void __exit cfhsi_exit_module(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1173{
1174	struct list_head *list_node;
1175	struct list_head *n;
1176	struct cfhsi *cfhsi = NULL;
 
 
 
1177
1178	spin_lock(&cfhsi_list_lock);
1179	list_for_each_safe(list_node, n, &cfhsi_list) {
1180		cfhsi = list_entry(list_node, struct cfhsi, list);
1181
1182		/* Remove from list. */
1183		list_del(list_node);
1184		spin_unlock(&cfhsi_list_lock);
 
 
1185
1186		/* Shutdown driver. */
1187		cfhsi_shutdown(cfhsi, true);
 
 
 
 
1188
1189		spin_lock(&cfhsi_list_lock);
 
 
 
 
1190	}
1191	spin_unlock(&cfhsi_list_lock);
 
1192
1193	/* Unregister platform driver. */
1194	platform_driver_unregister(&cfhsi_plat_drv);
 
 
1195}
1196
1197static int __init cfhsi_init_module(void)
 
 
 
 
 
 
 
 
 
 
 
 
1198{
1199	int result;
 
 
1200
1201	/* Initialize spin lock. */
1202	spin_lock_init(&cfhsi_list_lock);
1203
1204	/* Register platform driver. */
1205	result = platform_driver_register(&cfhsi_plat_drv);
1206	if (result) {
1207		printk(KERN_ERR "Could not register platform HSI driver: %d.\n",
1208			result);
1209		goto err_dev_register;
1210	}
 
 
1211
1212	return result;
1213
1214 err_dev_register:
1215	return result;
1216}
1217
1218module_init(cfhsi_init_module);
1219module_exit(cfhsi_exit_module);