Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * Copyright (C) ST-Ericsson AB 2010
  3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
  4 * Authors:  Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com,
  5 *           Daniel Martensson / daniel.martensson@stericsson.com
  6 * License terms: GNU General Public License (GPL) version 2
  7 */
  8
  9#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
 10
 11#include <linux/spinlock.h>
 12#include <linux/sched.h>
 13#include <linux/list.h>
 14#include <linux/netdevice.h>
 15#include <linux/if_arp.h>
 
 16
 17#include <net/caif/caif_device.h>
 18#include <net/caif/caif_shm.h>
 19
 20#define NR_TX_BUF		6
 21#define NR_RX_BUF		6
 22#define TX_BUF_SZ		0x2000
 23#define RX_BUF_SZ		0x2000
 24
 25#define CAIF_NEEDED_HEADROOM	32
 26
 27#define CAIF_FLOW_ON		1
 28#define CAIF_FLOW_OFF		0
 29
 30#define LOW_WATERMARK		3
 31#define HIGH_WATERMARK		4
 32
 33/* Maximum number of CAIF buffers per shared memory buffer. */
 34#define SHM_MAX_FRMS_PER_BUF	10
 35
 36/*
 37 * Size in bytes of the descriptor area
 38 * (With end of descriptor signalling)
 39 */
 40#define SHM_CAIF_DESC_SIZE	((SHM_MAX_FRMS_PER_BUF + 1) * \
 41					sizeof(struct shm_pck_desc))
 42
 43/*
 44 * Offset to the first CAIF frame within a shared memory buffer.
 45 * Aligned on 32 bytes.
 46 */
 47#define SHM_CAIF_FRM_OFS	(SHM_CAIF_DESC_SIZE + (SHM_CAIF_DESC_SIZE % 32))
 48
 49/* Number of bytes for CAIF shared memory header. */
 50#define SHM_HDR_LEN		1
 51
 52/* Number of padding bytes for the complete CAIF frame. */
 53#define SHM_FRM_PAD_LEN		4
 54
 55#define CAIF_MAX_MTU		4096
 56
 57#define SHM_SET_FULL(x)	(((x+1) & 0x0F) << 0)
 58#define SHM_GET_FULL(x)	(((x >> 0) & 0x0F) - 1)
 59
 60#define SHM_SET_EMPTY(x)	(((x+1) & 0x0F) << 4)
 61#define SHM_GET_EMPTY(x)	(((x >> 4) & 0x0F) - 1)
 62
 63#define SHM_FULL_MASK		(0x0F << 0)
 64#define SHM_EMPTY_MASK		(0x0F << 4)
 65
 66struct shm_pck_desc {
 67	/*
 68	 * Offset from start of shared memory area to start of
 69	 * shared memory CAIF frame.
 70	 */
 71	u32 frm_ofs;
 72	u32 frm_len;
 73};
 74
 75struct buf_list {
 76	unsigned char *desc_vptr;
 77	u32 phy_addr;
 78	u32 index;
 79	u32 len;
 80	u32 frames;
 81	u32 frm_ofs;
 82	struct list_head list;
 83};
 84
 85struct shm_caif_frm {
 86	/* Number of bytes of padding before the CAIF frame. */
 87	u8 hdr_ofs;
 88};
 89
 90struct shmdrv_layer {
 91	/* caif_dev_common must always be first in the structure*/
 92	struct caif_dev_common cfdev;
 93
 94	u32 shm_tx_addr;
 95	u32 shm_rx_addr;
 96	u32 shm_base_addr;
 97	u32 tx_empty_available;
 98	spinlock_t lock;
 99
100	struct list_head tx_empty_list;
101	struct list_head tx_pend_list;
102	struct list_head tx_full_list;
103	struct list_head rx_empty_list;
104	struct list_head rx_pend_list;
105	struct list_head rx_full_list;
106
107	struct workqueue_struct *pshm_tx_workqueue;
108	struct workqueue_struct *pshm_rx_workqueue;
109
110	struct work_struct shm_tx_work;
111	struct work_struct shm_rx_work;
112
113	struct sk_buff_head sk_qhead;
114	struct shmdev_layer *pshm_dev;
115};
116
117static int shm_netdev_open(struct net_device *shm_netdev)
118{
119	netif_wake_queue(shm_netdev);
120	return 0;
121}
122
123static int shm_netdev_close(struct net_device *shm_netdev)
124{
125	netif_stop_queue(shm_netdev);
126	return 0;
127}
128
129int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
130{
131	struct buf_list *pbuf;
132	struct shmdrv_layer *pshm_drv;
133	struct list_head *pos;
134	u32 avail_emptybuff = 0;
135	unsigned long flags = 0;
136
137	pshm_drv = priv;
138
139	/* Check for received buffers. */
140	if (mbx_msg & SHM_FULL_MASK) {
141		int idx;
142
143		spin_lock_irqsave(&pshm_drv->lock, flags);
144
145		/* Check whether we have any outstanding buffers. */
146		if (list_empty(&pshm_drv->rx_empty_list)) {
147
148			/* Release spin lock. */
149			spin_unlock_irqrestore(&pshm_drv->lock, flags);
150
151			/* We print even in IRQ context... */
152			pr_warn("No empty Rx buffers to fill: "
153					"mbx_msg:%x\n", mbx_msg);
154
155			/* Bail out. */
156			goto err_sync;
157		}
158
159		pbuf =
160			list_entry(pshm_drv->rx_empty_list.next,
161					struct buf_list, list);
162		idx = pbuf->index;
163
164		/* Check buffer synchronization. */
165		if (idx != SHM_GET_FULL(mbx_msg)) {
166
167			/* We print even in IRQ context... */
168			pr_warn(
169			"phyif_shm_mbx_msg_cb: RX full out of sync:"
170			" idx:%d, msg:%x SHM_GET_FULL(mbx_msg):%x\n",
171				idx, mbx_msg, SHM_GET_FULL(mbx_msg));
172
173			spin_unlock_irqrestore(&pshm_drv->lock, flags);
174
175			/* Bail out. */
176			goto err_sync;
177		}
178
179		list_del_init(&pbuf->list);
180		list_add_tail(&pbuf->list, &pshm_drv->rx_full_list);
181
182		spin_unlock_irqrestore(&pshm_drv->lock, flags);
183
184		/* Schedule RX work queue. */
185		if (!work_pending(&pshm_drv->shm_rx_work))
186			queue_work(pshm_drv->pshm_rx_workqueue,
187						&pshm_drv->shm_rx_work);
188	}
189
190	/* Check for emptied buffers. */
191	if (mbx_msg & SHM_EMPTY_MASK) {
192		int idx;
193
194		spin_lock_irqsave(&pshm_drv->lock, flags);
195
196		/* Check whether we have any outstanding buffers. */
197		if (list_empty(&pshm_drv->tx_full_list)) {
198
199			/* We print even in IRQ context... */
200			pr_warn("No TX to empty: msg:%x\n", mbx_msg);
201
202			spin_unlock_irqrestore(&pshm_drv->lock, flags);
203
204			/* Bail out. */
205			goto err_sync;
206		}
207
208		pbuf =
209			list_entry(pshm_drv->tx_full_list.next,
210					struct buf_list, list);
211		idx = pbuf->index;
212
213		/* Check buffer synchronization. */
214		if (idx != SHM_GET_EMPTY(mbx_msg)) {
215
216			spin_unlock_irqrestore(&pshm_drv->lock, flags);
217
218			/* We print even in IRQ context... */
219			pr_warn("TX empty "
220				"out of sync:idx:%d, msg:%x\n", idx, mbx_msg);
221
222			/* Bail out. */
223			goto err_sync;
224		}
225		list_del_init(&pbuf->list);
226
227		/* Reset buffer parameters. */
228		pbuf->frames = 0;
229		pbuf->frm_ofs = SHM_CAIF_FRM_OFS;
230
231		list_add_tail(&pbuf->list, &pshm_drv->tx_empty_list);
232
233		/* Check the available no. of buffers in the empty list */
234		list_for_each(pos, &pshm_drv->tx_empty_list)
235			avail_emptybuff++;
236
237		/* Check whether we have to wake up the transmitter. */
238		if ((avail_emptybuff > HIGH_WATERMARK) &&
239					(!pshm_drv->tx_empty_available)) {
240			pshm_drv->tx_empty_available = 1;
 
241			pshm_drv->cfdev.flowctrl
242					(pshm_drv->pshm_dev->pshm_netdev,
243								CAIF_FLOW_ON);
244
245			spin_unlock_irqrestore(&pshm_drv->lock, flags);
246
247			/* Schedule the work queue. if required */
248			if (!work_pending(&pshm_drv->shm_tx_work))
249				queue_work(pshm_drv->pshm_tx_workqueue,
250							&pshm_drv->shm_tx_work);
251		} else
252			spin_unlock_irqrestore(&pshm_drv->lock, flags);
253	}
254
255	return 0;
256
257err_sync:
258	return -EIO;
259}
260
261static void shm_rx_work_func(struct work_struct *rx_work)
262{
263	struct shmdrv_layer *pshm_drv;
264	struct buf_list *pbuf;
265	unsigned long flags = 0;
266	struct sk_buff *skb;
267	char *p;
268	int ret;
269
270	pshm_drv = container_of(rx_work, struct shmdrv_layer, shm_rx_work);
271
272	while (1) {
273
274		struct shm_pck_desc *pck_desc;
275
276		spin_lock_irqsave(&pshm_drv->lock, flags);
277
278		/* Check for received buffers. */
279		if (list_empty(&pshm_drv->rx_full_list)) {
280			spin_unlock_irqrestore(&pshm_drv->lock, flags);
281			break;
282		}
283
284		pbuf =
285			list_entry(pshm_drv->rx_full_list.next, struct buf_list,
286					list);
287		list_del_init(&pbuf->list);
 
288
289		/* Retrieve pointer to start of the packet descriptor area. */
290		pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr;
291
292		/*
293		 * Check whether descriptor contains a CAIF shared memory
294		 * frame.
295		 */
296		while (pck_desc->frm_ofs) {
297			unsigned int frm_buf_ofs;
298			unsigned int frm_pck_ofs;
299			unsigned int frm_pck_len;
300			/*
301			 * Check whether offset is within buffer limits
302			 * (lower).
303			 */
304			if (pck_desc->frm_ofs <
305				(pbuf->phy_addr - pshm_drv->shm_base_addr))
306				break;
307			/*
308			 * Check whether offset is within buffer limits
309			 * (higher).
310			 */
311			if (pck_desc->frm_ofs >
312				((pbuf->phy_addr - pshm_drv->shm_base_addr) +
313					pbuf->len))
314				break;
315
316			/* Calculate offset from start of buffer. */
317			frm_buf_ofs =
318				pck_desc->frm_ofs - (pbuf->phy_addr -
319						pshm_drv->shm_base_addr);
320
321			/*
322			 * Calculate offset and length of CAIF packet while
323			 * taking care of the shared memory header.
324			 */
325			frm_pck_ofs =
326				frm_buf_ofs + SHM_HDR_LEN +
327				(*(pbuf->desc_vptr + frm_buf_ofs));
328			frm_pck_len =
329				(pck_desc->frm_len - SHM_HDR_LEN -
330				(*(pbuf->desc_vptr + frm_buf_ofs)));
331
332			/* Check whether CAIF packet is within buffer limits */
333			if ((frm_pck_ofs + pck_desc->frm_len) > pbuf->len)
334				break;
335
336			/* Get a suitable CAIF packet and copy in data. */
337			skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev,
338							frm_pck_len + 1);
339			BUG_ON(skb == NULL);
 
 
 
 
340
341			p = skb_put(skb, frm_pck_len);
342			memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len);
343
344			skb->protocol = htons(ETH_P_CAIF);
345			skb_reset_mac_header(skb);
346			skb->dev = pshm_drv->pshm_dev->pshm_netdev;
347
348			/* Push received packet up the stack. */
349			ret = netif_rx_ni(skb);
350
351			if (!ret) {
352				pshm_drv->pshm_dev->pshm_netdev->stats.
353								rx_packets++;
354				pshm_drv->pshm_dev->pshm_netdev->stats.
355						rx_bytes += pck_desc->frm_len;
356			} else
357				++pshm_drv->pshm_dev->pshm_netdev->stats.
358								rx_dropped;
359			/* Move to next packet descriptor. */
360			pck_desc++;
361		}
362
 
363		list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list);
364
365		spin_unlock_irqrestore(&pshm_drv->lock, flags);
366
367	}
368
369	/* Schedule the work queue. if required */
370	if (!work_pending(&pshm_drv->shm_tx_work))
371		queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
372
373}
374
375static void shm_tx_work_func(struct work_struct *tx_work)
376{
377	u32 mbox_msg;
378	unsigned int frmlen, avail_emptybuff, append = 0;
379	unsigned long flags = 0;
380	struct buf_list *pbuf = NULL;
381	struct shmdrv_layer *pshm_drv;
382	struct shm_caif_frm *frm;
383	struct sk_buff *skb;
384	struct shm_pck_desc *pck_desc;
385	struct list_head *pos;
386
387	pshm_drv = container_of(tx_work, struct shmdrv_layer, shm_tx_work);
388
389	do {
390		/* Initialize mailbox message. */
391		mbox_msg = 0x00;
392		avail_emptybuff = 0;
393
394		spin_lock_irqsave(&pshm_drv->lock, flags);
395
396		/* Check for pending receive buffers. */
397		if (!list_empty(&pshm_drv->rx_pend_list)) {
398
399			pbuf = list_entry(pshm_drv->rx_pend_list.next,
400						struct buf_list, list);
401
402			list_del_init(&pbuf->list);
403			list_add_tail(&pbuf->list, &pshm_drv->rx_empty_list);
404			/*
405			 * Value index is never changed,
406			 * so read access should be safe.
407			 */
408			mbox_msg |= SHM_SET_EMPTY(pbuf->index);
409		}
410
411		skb = skb_peek(&pshm_drv->sk_qhead);
412
413		if (skb == NULL)
414			goto send_msg;
415
416		/* Check the available no. of buffers in the empty list */
417		list_for_each(pos, &pshm_drv->tx_empty_list)
418			avail_emptybuff++;
419
420		if ((avail_emptybuff < LOW_WATERMARK) &&
421					pshm_drv->tx_empty_available) {
422			/* Update blocking condition. */
423			pshm_drv->tx_empty_available = 0;
 
424			pshm_drv->cfdev.flowctrl
425					(pshm_drv->pshm_dev->pshm_netdev,
426					CAIF_FLOW_OFF);
 
427		}
428		/*
429		 * We simply return back to the caller if we do not have space
430		 * either in Tx pending list or Tx empty list. In this case,
431		 * we hold the received skb in the skb list, waiting to
432		 * be transmitted once Tx buffers become available
433		 */
434		if (list_empty(&pshm_drv->tx_empty_list))
435			goto send_msg;
436
437		/* Get the first free Tx buffer. */
438		pbuf = list_entry(pshm_drv->tx_empty_list.next,
439						struct buf_list, list);
440		do {
441			if (append) {
442				skb = skb_peek(&pshm_drv->sk_qhead);
443				if (skb == NULL)
444					break;
445			}
446
447			frm = (struct shm_caif_frm *)
448					(pbuf->desc_vptr + pbuf->frm_ofs);
449
450			frm->hdr_ofs = 0;
451			frmlen = 0;
452			frmlen += SHM_HDR_LEN + frm->hdr_ofs + skb->len;
453
454			/* Add tail padding if needed. */
455			if (frmlen % SHM_FRM_PAD_LEN)
456				frmlen += SHM_FRM_PAD_LEN -
457						(frmlen % SHM_FRM_PAD_LEN);
458
459			/*
460			 * Verify that packet, header and additional padding
461			 * can fit within the buffer frame area.
462			 */
463			if (frmlen >= (pbuf->len - pbuf->frm_ofs))
464				break;
465
466			if (!append) {
467				list_del_init(&pbuf->list);
468				append = 1;
469			}
470
471			skb = skb_dequeue(&pshm_drv->sk_qhead);
 
 
472			/* Copy in CAIF frame. */
473			skb_copy_bits(skb, 0, pbuf->desc_vptr +
474					pbuf->frm_ofs + SHM_HDR_LEN +
475						frm->hdr_ofs, skb->len);
476
477			pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++;
478			pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes +=
479									frmlen;
480			dev_kfree_skb(skb);
481
482			/* Fill in the shared memory packet descriptor area. */
483			pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr);
484			/* Forward to current frame. */
485			pck_desc += pbuf->frames;
486			pck_desc->frm_ofs = (pbuf->phy_addr -
487						pshm_drv->shm_base_addr) +
488								pbuf->frm_ofs;
489			pck_desc->frm_len = frmlen;
490			/* Terminate packet descriptor area. */
491			pck_desc++;
492			pck_desc->frm_ofs = 0;
493			/* Update buffer parameters. */
494			pbuf->frames++;
495			pbuf->frm_ofs += frmlen + (frmlen % 32);
496
497		} while (pbuf->frames < SHM_MAX_FRMS_PER_BUF);
498
499		/* Assign buffer as full. */
500		list_add_tail(&pbuf->list, &pshm_drv->tx_full_list);
501		append = 0;
502		mbox_msg |= SHM_SET_FULL(pbuf->index);
503send_msg:
504		spin_unlock_irqrestore(&pshm_drv->lock, flags);
505
506		if (mbox_msg)
507			pshm_drv->pshm_dev->pshmdev_mbxsend
508					(pshm_drv->pshm_dev->shm_id, mbox_msg);
509	} while (mbox_msg);
510}
511
512static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev)
513{
514	struct shmdrv_layer *pshm_drv;
515	unsigned long flags = 0;
516
517	pshm_drv = netdev_priv(shm_netdev);
518
519	spin_lock_irqsave(&pshm_drv->lock, flags);
520
521	skb_queue_tail(&pshm_drv->sk_qhead, skb);
522
523	spin_unlock_irqrestore(&pshm_drv->lock, flags);
524
525	/* Schedule Tx work queue. for deferred processing of skbs*/
526	if (!work_pending(&pshm_drv->shm_tx_work))
527		queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
528
529	return 0;
530}
531
532static const struct net_device_ops netdev_ops = {
533	.ndo_open = shm_netdev_open,
534	.ndo_stop = shm_netdev_close,
535	.ndo_start_xmit = shm_netdev_tx,
536};
537
538static void shm_netdev_setup(struct net_device *pshm_netdev)
539{
540	struct shmdrv_layer *pshm_drv;
541	pshm_netdev->netdev_ops = &netdev_ops;
542
543	pshm_netdev->mtu = CAIF_MAX_MTU;
544	pshm_netdev->type = ARPHRD_CAIF;
545	pshm_netdev->hard_header_len = CAIF_NEEDED_HEADROOM;
546	pshm_netdev->tx_queue_len = 0;
547	pshm_netdev->destructor = free_netdev;
548
549	pshm_drv = netdev_priv(pshm_netdev);
550
551	/* Initialize structures in a clean state. */
552	memset(pshm_drv, 0, sizeof(struct shmdrv_layer));
553
554	pshm_drv->cfdev.link_select = CAIF_LINK_LOW_LATENCY;
555}
556
557int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
558{
559	int result, j;
560	struct shmdrv_layer *pshm_drv = NULL;
561
562	pshm_dev->pshm_netdev = alloc_netdev(sizeof(struct shmdrv_layer),
563						"cfshm%d", shm_netdev_setup);
564	if (!pshm_dev->pshm_netdev)
565		return -ENOMEM;
566
567	pshm_drv = netdev_priv(pshm_dev->pshm_netdev);
568	pshm_drv->pshm_dev = pshm_dev;
569
570	/*
571	 * Initialization starts with the verification of the
572	 * availability of MBX driver by calling its setup function.
573	 * MBX driver must be available by this time for proper
574	 * functioning of SHM driver.
575	 */
576	if ((pshm_dev->pshmdev_mbxsetup
577				(caif_shmdrv_rx_cb, pshm_dev, pshm_drv)) != 0) {
578		pr_warn("Could not config. SHM Mailbox,"
579				" Bailing out.....\n");
580		free_netdev(pshm_dev->pshm_netdev);
581		return -ENODEV;
582	}
583
584	skb_queue_head_init(&pshm_drv->sk_qhead);
585
586	pr_info("SHM DEVICE[%d] PROBED BY DRIVER, NEW SHM DRIVER"
587			" INSTANCE AT pshm_drv =0x%p\n",
588			pshm_drv->pshm_dev->shm_id, pshm_drv);
589
590	if (pshm_dev->shm_total_sz <
591			(NR_TX_BUF * TX_BUF_SZ + NR_RX_BUF * RX_BUF_SZ)) {
592
593		pr_warn("ERROR, Amount of available"
594				" Phys. SHM cannot accommodate current SHM "
595				"driver configuration, Bailing out ...\n");
596		free_netdev(pshm_dev->pshm_netdev);
597		return -ENOMEM;
598	}
599
600	pshm_drv->shm_base_addr = pshm_dev->shm_base_addr;
601	pshm_drv->shm_tx_addr = pshm_drv->shm_base_addr;
602
603	if (pshm_dev->shm_loopback)
604		pshm_drv->shm_rx_addr = pshm_drv->shm_tx_addr;
605	else
606		pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr +
607						(NR_TX_BUF * TX_BUF_SZ);
608
 
609	INIT_LIST_HEAD(&pshm_drv->tx_empty_list);
610	INIT_LIST_HEAD(&pshm_drv->tx_pend_list);
611	INIT_LIST_HEAD(&pshm_drv->tx_full_list);
612
613	INIT_LIST_HEAD(&pshm_drv->rx_empty_list);
614	INIT_LIST_HEAD(&pshm_drv->rx_pend_list);
615	INIT_LIST_HEAD(&pshm_drv->rx_full_list);
616
617	INIT_WORK(&pshm_drv->shm_tx_work, shm_tx_work_func);
618	INIT_WORK(&pshm_drv->shm_rx_work, shm_rx_work_func);
619
620	pshm_drv->pshm_tx_workqueue =
621				create_singlethread_workqueue("shm_tx_work");
622	pshm_drv->pshm_rx_workqueue =
623				create_singlethread_workqueue("shm_rx_work");
624
625	for (j = 0; j < NR_TX_BUF; j++) {
626		struct buf_list *tx_buf =
627				kmalloc(sizeof(struct buf_list), GFP_KERNEL);
628
629		if (tx_buf == NULL) {
630			pr_warn("ERROR, Could not"
631					" allocate dynamic mem. for tx_buf,"
632					" Bailing out ...\n");
633			free_netdev(pshm_dev->pshm_netdev);
634			return -ENOMEM;
635		}
636		tx_buf->index = j;
637		tx_buf->phy_addr = pshm_drv->shm_tx_addr + (TX_BUF_SZ * j);
638		tx_buf->len = TX_BUF_SZ;
639		tx_buf->frames = 0;
640		tx_buf->frm_ofs = SHM_CAIF_FRM_OFS;
641
642		if (pshm_dev->shm_loopback)
643			tx_buf->desc_vptr = (char *)tx_buf->phy_addr;
644		else
 
 
 
645			tx_buf->desc_vptr =
646					ioremap(tx_buf->phy_addr, TX_BUF_SZ);
647
648		list_add_tail(&tx_buf->list, &pshm_drv->tx_empty_list);
649	}
650
651	for (j = 0; j < NR_RX_BUF; j++) {
652		struct buf_list *rx_buf =
653				kmalloc(sizeof(struct buf_list), GFP_KERNEL);
654
655		if (rx_buf == NULL) {
656			pr_warn("ERROR, Could not"
657					" allocate dynamic mem.for rx_buf,"
658					" Bailing out ...\n");
659			free_netdev(pshm_dev->pshm_netdev);
660			return -ENOMEM;
661		}
662		rx_buf->index = j;
663		rx_buf->phy_addr = pshm_drv->shm_rx_addr + (RX_BUF_SZ * j);
664		rx_buf->len = RX_BUF_SZ;
665
666		if (pshm_dev->shm_loopback)
667			rx_buf->desc_vptr = (char *)rx_buf->phy_addr;
668		else
669			rx_buf->desc_vptr =
670					ioremap(rx_buf->phy_addr, RX_BUF_SZ);
671		list_add_tail(&rx_buf->list, &pshm_drv->rx_empty_list);
672	}
673
674	pshm_drv->tx_empty_available = 1;
675	result = register_netdev(pshm_dev->pshm_netdev);
676	if (result)
677		pr_warn("ERROR[%d], SHM could not, "
678			"register with NW FRMWK Bailing out ...\n", result);
679
680	return result;
681}
682
683void caif_shmcore_remove(struct net_device *pshm_netdev)
684{
685	struct buf_list *pbuf;
686	struct shmdrv_layer *pshm_drv = NULL;
687
688	pshm_drv = netdev_priv(pshm_netdev);
689
690	while (!(list_empty(&pshm_drv->tx_pend_list))) {
691		pbuf =
692			list_entry(pshm_drv->tx_pend_list.next,
693					struct buf_list, list);
694
695		list_del(&pbuf->list);
696		kfree(pbuf);
697	}
698
699	while (!(list_empty(&pshm_drv->tx_full_list))) {
700		pbuf =
701			list_entry(pshm_drv->tx_full_list.next,
702					struct buf_list, list);
703		list_del(&pbuf->list);
704		kfree(pbuf);
705	}
706
707	while (!(list_empty(&pshm_drv->tx_empty_list))) {
708		pbuf =
709			list_entry(pshm_drv->tx_empty_list.next,
710					struct buf_list, list);
711		list_del(&pbuf->list);
712		kfree(pbuf);
713	}
714
715	while (!(list_empty(&pshm_drv->rx_full_list))) {
716		pbuf =
717			list_entry(pshm_drv->tx_full_list.next,
718				struct buf_list, list);
719		list_del(&pbuf->list);
720		kfree(pbuf);
721	}
722
723	while (!(list_empty(&pshm_drv->rx_pend_list))) {
724		pbuf =
725			list_entry(pshm_drv->tx_pend_list.next,
726				struct buf_list, list);
727		list_del(&pbuf->list);
728		kfree(pbuf);
729	}
730
731	while (!(list_empty(&pshm_drv->rx_empty_list))) {
732		pbuf =
733			list_entry(pshm_drv->rx_empty_list.next,
734				struct buf_list, list);
735		list_del(&pbuf->list);
736		kfree(pbuf);
737	}
738
739	/* Destroy work queues. */
740	destroy_workqueue(pshm_drv->pshm_tx_workqueue);
741	destroy_workqueue(pshm_drv->pshm_rx_workqueue);
742
743	unregister_netdev(pshm_netdev);
744}
v3.5.6
  1/*
  2 * Copyright (C) ST-Ericsson AB 2010
  3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
  4 * Authors:  Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com,
  5 *           Daniel Martensson / daniel.martensson@stericsson.com
  6 * License terms: GNU General Public License (GPL) version 2
  7 */
  8
  9#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
 10
 11#include <linux/spinlock.h>
 12#include <linux/sched.h>
 13#include <linux/list.h>
 14#include <linux/netdevice.h>
 15#include <linux/if_arp.h>
 16#include <linux/io.h>
 17
 18#include <net/caif/caif_device.h>
 19#include <net/caif/caif_shm.h>
 20
 21#define NR_TX_BUF		6
 22#define NR_RX_BUF		6
 23#define TX_BUF_SZ		0x2000
 24#define RX_BUF_SZ		0x2000
 25
 26#define CAIF_NEEDED_HEADROOM	32
 27
 28#define CAIF_FLOW_ON		1
 29#define CAIF_FLOW_OFF		0
 30
 31#define LOW_WATERMARK		3
 32#define HIGH_WATERMARK		4
 33
 34/* Maximum number of CAIF buffers per shared memory buffer. */
 35#define SHM_MAX_FRMS_PER_BUF	10
 36
 37/*
 38 * Size in bytes of the descriptor area
 39 * (With end of descriptor signalling)
 40 */
 41#define SHM_CAIF_DESC_SIZE	((SHM_MAX_FRMS_PER_BUF + 1) * \
 42					sizeof(struct shm_pck_desc))
 43
 44/*
 45 * Offset to the first CAIF frame within a shared memory buffer.
 46 * Aligned on 32 bytes.
 47 */
 48#define SHM_CAIF_FRM_OFS	(SHM_CAIF_DESC_SIZE + (SHM_CAIF_DESC_SIZE % 32))
 49
 50/* Number of bytes for CAIF shared memory header. */
 51#define SHM_HDR_LEN		1
 52
 53/* Number of padding bytes for the complete CAIF frame. */
 54#define SHM_FRM_PAD_LEN		4
 55
 56#define CAIF_MAX_MTU		4096
 57
 58#define SHM_SET_FULL(x)	(((x+1) & 0x0F) << 0)
 59#define SHM_GET_FULL(x)	(((x >> 0) & 0x0F) - 1)
 60
 61#define SHM_SET_EMPTY(x)	(((x+1) & 0x0F) << 4)
 62#define SHM_GET_EMPTY(x)	(((x >> 4) & 0x0F) - 1)
 63
 64#define SHM_FULL_MASK		(0x0F << 0)
 65#define SHM_EMPTY_MASK		(0x0F << 4)
 66
 67struct shm_pck_desc {
 68	/*
 69	 * Offset from start of shared memory area to start of
 70	 * shared memory CAIF frame.
 71	 */
 72	u32 frm_ofs;
 73	u32 frm_len;
 74};
 75
 76struct buf_list {
 77	unsigned char *desc_vptr;
 78	u32 phy_addr;
 79	u32 index;
 80	u32 len;
 81	u32 frames;
 82	u32 frm_ofs;
 83	struct list_head list;
 84};
 85
 86struct shm_caif_frm {
 87	/* Number of bytes of padding before the CAIF frame. */
 88	u8 hdr_ofs;
 89};
 90
 91struct shmdrv_layer {
 92	/* caif_dev_common must always be first in the structure*/
 93	struct caif_dev_common cfdev;
 94
 95	u32 shm_tx_addr;
 96	u32 shm_rx_addr;
 97	u32 shm_base_addr;
 98	u32 tx_empty_available;
 99	spinlock_t lock;
100
101	struct list_head tx_empty_list;
102	struct list_head tx_pend_list;
103	struct list_head tx_full_list;
104	struct list_head rx_empty_list;
105	struct list_head rx_pend_list;
106	struct list_head rx_full_list;
107
108	struct workqueue_struct *pshm_tx_workqueue;
109	struct workqueue_struct *pshm_rx_workqueue;
110
111	struct work_struct shm_tx_work;
112	struct work_struct shm_rx_work;
113
114	struct sk_buff_head sk_qhead;
115	struct shmdev_layer *pshm_dev;
116};
117
118static int shm_netdev_open(struct net_device *shm_netdev)
119{
120	netif_wake_queue(shm_netdev);
121	return 0;
122}
123
124static int shm_netdev_close(struct net_device *shm_netdev)
125{
126	netif_stop_queue(shm_netdev);
127	return 0;
128}
129
130int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
131{
132	struct buf_list *pbuf;
133	struct shmdrv_layer *pshm_drv;
134	struct list_head *pos;
135	u32 avail_emptybuff = 0;
136	unsigned long flags = 0;
137
138	pshm_drv = priv;
139
140	/* Check for received buffers. */
141	if (mbx_msg & SHM_FULL_MASK) {
142		int idx;
143
144		spin_lock_irqsave(&pshm_drv->lock, flags);
145
146		/* Check whether we have any outstanding buffers. */
147		if (list_empty(&pshm_drv->rx_empty_list)) {
148
149			/* Release spin lock. */
150			spin_unlock_irqrestore(&pshm_drv->lock, flags);
151
152			/* We print even in IRQ context... */
153			pr_warn("No empty Rx buffers to fill: "
154					"mbx_msg:%x\n", mbx_msg);
155
156			/* Bail out. */
157			goto err_sync;
158		}
159
160		pbuf =
161			list_entry(pshm_drv->rx_empty_list.next,
162					struct buf_list, list);
163		idx = pbuf->index;
164
165		/* Check buffer synchronization. */
166		if (idx != SHM_GET_FULL(mbx_msg)) {
167
168			/* We print even in IRQ context... */
169			pr_warn(
170			"phyif_shm_mbx_msg_cb: RX full out of sync:"
171			" idx:%d, msg:%x SHM_GET_FULL(mbx_msg):%x\n",
172				idx, mbx_msg, SHM_GET_FULL(mbx_msg));
173
174			spin_unlock_irqrestore(&pshm_drv->lock, flags);
175
176			/* Bail out. */
177			goto err_sync;
178		}
179
180		list_del_init(&pbuf->list);
181		list_add_tail(&pbuf->list, &pshm_drv->rx_full_list);
182
183		spin_unlock_irqrestore(&pshm_drv->lock, flags);
184
185		/* Schedule RX work queue. */
186		if (!work_pending(&pshm_drv->shm_rx_work))
187			queue_work(pshm_drv->pshm_rx_workqueue,
188						&pshm_drv->shm_rx_work);
189	}
190
191	/* Check for emptied buffers. */
192	if (mbx_msg & SHM_EMPTY_MASK) {
193		int idx;
194
195		spin_lock_irqsave(&pshm_drv->lock, flags);
196
197		/* Check whether we have any outstanding buffers. */
198		if (list_empty(&pshm_drv->tx_full_list)) {
199
200			/* We print even in IRQ context... */
201			pr_warn("No TX to empty: msg:%x\n", mbx_msg);
202
203			spin_unlock_irqrestore(&pshm_drv->lock, flags);
204
205			/* Bail out. */
206			goto err_sync;
207		}
208
209		pbuf =
210			list_entry(pshm_drv->tx_full_list.next,
211					struct buf_list, list);
212		idx = pbuf->index;
213
214		/* Check buffer synchronization. */
215		if (idx != SHM_GET_EMPTY(mbx_msg)) {
216
217			spin_unlock_irqrestore(&pshm_drv->lock, flags);
218
219			/* We print even in IRQ context... */
220			pr_warn("TX empty "
221				"out of sync:idx:%d, msg:%x\n", idx, mbx_msg);
222
223			/* Bail out. */
224			goto err_sync;
225		}
226		list_del_init(&pbuf->list);
227
228		/* Reset buffer parameters. */
229		pbuf->frames = 0;
230		pbuf->frm_ofs = SHM_CAIF_FRM_OFS;
231
232		list_add_tail(&pbuf->list, &pshm_drv->tx_empty_list);
233
234		/* Check the available no. of buffers in the empty list */
235		list_for_each(pos, &pshm_drv->tx_empty_list)
236			avail_emptybuff++;
237
238		/* Check whether we have to wake up the transmitter. */
239		if ((avail_emptybuff > HIGH_WATERMARK) &&
240					(!pshm_drv->tx_empty_available)) {
241			pshm_drv->tx_empty_available = 1;
242			spin_unlock_irqrestore(&pshm_drv->lock, flags);
243			pshm_drv->cfdev.flowctrl
244					(pshm_drv->pshm_dev->pshm_netdev,
245								CAIF_FLOW_ON);
246
 
247
248			/* Schedule the work queue. if required */
249			if (!work_pending(&pshm_drv->shm_tx_work))
250				queue_work(pshm_drv->pshm_tx_workqueue,
251							&pshm_drv->shm_tx_work);
252		} else
253			spin_unlock_irqrestore(&pshm_drv->lock, flags);
254	}
255
256	return 0;
257
258err_sync:
259	return -EIO;
260}
261
262static void shm_rx_work_func(struct work_struct *rx_work)
263{
264	struct shmdrv_layer *pshm_drv;
265	struct buf_list *pbuf;
266	unsigned long flags = 0;
267	struct sk_buff *skb;
268	char *p;
269	int ret;
270
271	pshm_drv = container_of(rx_work, struct shmdrv_layer, shm_rx_work);
272
273	while (1) {
274
275		struct shm_pck_desc *pck_desc;
276
277		spin_lock_irqsave(&pshm_drv->lock, flags);
278
279		/* Check for received buffers. */
280		if (list_empty(&pshm_drv->rx_full_list)) {
281			spin_unlock_irqrestore(&pshm_drv->lock, flags);
282			break;
283		}
284
285		pbuf =
286			list_entry(pshm_drv->rx_full_list.next, struct buf_list,
287					list);
288		list_del_init(&pbuf->list);
289		spin_unlock_irqrestore(&pshm_drv->lock, flags);
290
291		/* Retrieve pointer to start of the packet descriptor area. */
292		pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr;
293
294		/*
295		 * Check whether descriptor contains a CAIF shared memory
296		 * frame.
297		 */
298		while (pck_desc->frm_ofs) {
299			unsigned int frm_buf_ofs;
300			unsigned int frm_pck_ofs;
301			unsigned int frm_pck_len;
302			/*
303			 * Check whether offset is within buffer limits
304			 * (lower).
305			 */
306			if (pck_desc->frm_ofs <
307				(pbuf->phy_addr - pshm_drv->shm_base_addr))
308				break;
309			/*
310			 * Check whether offset is within buffer limits
311			 * (higher).
312			 */
313			if (pck_desc->frm_ofs >
314				((pbuf->phy_addr - pshm_drv->shm_base_addr) +
315					pbuf->len))
316				break;
317
318			/* Calculate offset from start of buffer. */
319			frm_buf_ofs =
320				pck_desc->frm_ofs - (pbuf->phy_addr -
321						pshm_drv->shm_base_addr);
322
323			/*
324			 * Calculate offset and length of CAIF packet while
325			 * taking care of the shared memory header.
326			 */
327			frm_pck_ofs =
328				frm_buf_ofs + SHM_HDR_LEN +
329				(*(pbuf->desc_vptr + frm_buf_ofs));
330			frm_pck_len =
331				(pck_desc->frm_len - SHM_HDR_LEN -
332				(*(pbuf->desc_vptr + frm_buf_ofs)));
333
334			/* Check whether CAIF packet is within buffer limits */
335			if ((frm_pck_ofs + pck_desc->frm_len) > pbuf->len)
336				break;
337
338			/* Get a suitable CAIF packet and copy in data. */
339			skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev,
340							frm_pck_len + 1);
341
342			if (skb == NULL) {
343				pr_info("OOM: Try next frame in descriptor\n");
344				break;
345			}
346
347			p = skb_put(skb, frm_pck_len);
348			memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len);
349
350			skb->protocol = htons(ETH_P_CAIF);
351			skb_reset_mac_header(skb);
352			skb->dev = pshm_drv->pshm_dev->pshm_netdev;
353
354			/* Push received packet up the stack. */
355			ret = netif_rx_ni(skb);
356
357			if (!ret) {
358				pshm_drv->pshm_dev->pshm_netdev->stats.
359								rx_packets++;
360				pshm_drv->pshm_dev->pshm_netdev->stats.
361						rx_bytes += pck_desc->frm_len;
362			} else
363				++pshm_drv->pshm_dev->pshm_netdev->stats.
364								rx_dropped;
365			/* Move to next packet descriptor. */
366			pck_desc++;
367		}
368
369		spin_lock_irqsave(&pshm_drv->lock, flags);
370		list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list);
371
372		spin_unlock_irqrestore(&pshm_drv->lock, flags);
373
374	}
375
376	/* Schedule the work queue. if required */
377	if (!work_pending(&pshm_drv->shm_tx_work))
378		queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
379
380}
381
382static void shm_tx_work_func(struct work_struct *tx_work)
383{
384	u32 mbox_msg;
385	unsigned int frmlen, avail_emptybuff, append = 0;
386	unsigned long flags = 0;
387	struct buf_list *pbuf = NULL;
388	struct shmdrv_layer *pshm_drv;
389	struct shm_caif_frm *frm;
390	struct sk_buff *skb;
391	struct shm_pck_desc *pck_desc;
392	struct list_head *pos;
393
394	pshm_drv = container_of(tx_work, struct shmdrv_layer, shm_tx_work);
395
396	do {
397		/* Initialize mailbox message. */
398		mbox_msg = 0x00;
399		avail_emptybuff = 0;
400
401		spin_lock_irqsave(&pshm_drv->lock, flags);
402
403		/* Check for pending receive buffers. */
404		if (!list_empty(&pshm_drv->rx_pend_list)) {
405
406			pbuf = list_entry(pshm_drv->rx_pend_list.next,
407						struct buf_list, list);
408
409			list_del_init(&pbuf->list);
410			list_add_tail(&pbuf->list, &pshm_drv->rx_empty_list);
411			/*
412			 * Value index is never changed,
413			 * so read access should be safe.
414			 */
415			mbox_msg |= SHM_SET_EMPTY(pbuf->index);
416		}
417
418		skb = skb_peek(&pshm_drv->sk_qhead);
419
420		if (skb == NULL)
421			goto send_msg;
 
422		/* Check the available no. of buffers in the empty list */
423		list_for_each(pos, &pshm_drv->tx_empty_list)
424			avail_emptybuff++;
425
426		if ((avail_emptybuff < LOW_WATERMARK) &&
427					pshm_drv->tx_empty_available) {
428			/* Update blocking condition. */
429			pshm_drv->tx_empty_available = 0;
430			spin_unlock_irqrestore(&pshm_drv->lock, flags);
431			pshm_drv->cfdev.flowctrl
432					(pshm_drv->pshm_dev->pshm_netdev,
433					CAIF_FLOW_OFF);
434			spin_lock_irqsave(&pshm_drv->lock, flags);
435		}
436		/*
437		 * We simply return back to the caller if we do not have space
438		 * either in Tx pending list or Tx empty list. In this case,
439		 * we hold the received skb in the skb list, waiting to
440		 * be transmitted once Tx buffers become available
441		 */
442		if (list_empty(&pshm_drv->tx_empty_list))
443			goto send_msg;
444
445		/* Get the first free Tx buffer. */
446		pbuf = list_entry(pshm_drv->tx_empty_list.next,
447						struct buf_list, list);
448		do {
449			if (append) {
450				skb = skb_peek(&pshm_drv->sk_qhead);
451				if (skb == NULL)
452					break;
453			}
454
455			frm = (struct shm_caif_frm *)
456					(pbuf->desc_vptr + pbuf->frm_ofs);
457
458			frm->hdr_ofs = 0;
459			frmlen = 0;
460			frmlen += SHM_HDR_LEN + frm->hdr_ofs + skb->len;
461
462			/* Add tail padding if needed. */
463			if (frmlen % SHM_FRM_PAD_LEN)
464				frmlen += SHM_FRM_PAD_LEN -
465						(frmlen % SHM_FRM_PAD_LEN);
466
467			/*
468			 * Verify that packet, header and additional padding
469			 * can fit within the buffer frame area.
470			 */
471			if (frmlen >= (pbuf->len - pbuf->frm_ofs))
472				break;
473
474			if (!append) {
475				list_del_init(&pbuf->list);
476				append = 1;
477			}
478
479			skb = skb_dequeue(&pshm_drv->sk_qhead);
480			if (skb == NULL)
481				break;
482			/* Copy in CAIF frame. */
483			skb_copy_bits(skb, 0, pbuf->desc_vptr +
484					pbuf->frm_ofs + SHM_HDR_LEN +
485						frm->hdr_ofs, skb->len);
486
487			pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++;
488			pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes +=
489									frmlen;
490			dev_kfree_skb_irq(skb);
491
492			/* Fill in the shared memory packet descriptor area. */
493			pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr);
494			/* Forward to current frame. */
495			pck_desc += pbuf->frames;
496			pck_desc->frm_ofs = (pbuf->phy_addr -
497						pshm_drv->shm_base_addr) +
498								pbuf->frm_ofs;
499			pck_desc->frm_len = frmlen;
500			/* Terminate packet descriptor area. */
501			pck_desc++;
502			pck_desc->frm_ofs = 0;
503			/* Update buffer parameters. */
504			pbuf->frames++;
505			pbuf->frm_ofs += frmlen + (frmlen % 32);
506
507		} while (pbuf->frames < SHM_MAX_FRMS_PER_BUF);
508
509		/* Assign buffer as full. */
510		list_add_tail(&pbuf->list, &pshm_drv->tx_full_list);
511		append = 0;
512		mbox_msg |= SHM_SET_FULL(pbuf->index);
513send_msg:
514		spin_unlock_irqrestore(&pshm_drv->lock, flags);
515
516		if (mbox_msg)
517			pshm_drv->pshm_dev->pshmdev_mbxsend
518					(pshm_drv->pshm_dev->shm_id, mbox_msg);
519	} while (mbox_msg);
520}
521
522static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev)
523{
524	struct shmdrv_layer *pshm_drv;
 
525
526	pshm_drv = netdev_priv(shm_netdev);
527
 
 
528	skb_queue_tail(&pshm_drv->sk_qhead, skb);
529
 
 
530	/* Schedule Tx work queue. for deferred processing of skbs*/
531	if (!work_pending(&pshm_drv->shm_tx_work))
532		queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
533
534	return 0;
535}
536
537static const struct net_device_ops netdev_ops = {
538	.ndo_open = shm_netdev_open,
539	.ndo_stop = shm_netdev_close,
540	.ndo_start_xmit = shm_netdev_tx,
541};
542
543static void shm_netdev_setup(struct net_device *pshm_netdev)
544{
545	struct shmdrv_layer *pshm_drv;
546	pshm_netdev->netdev_ops = &netdev_ops;
547
548	pshm_netdev->mtu = CAIF_MAX_MTU;
549	pshm_netdev->type = ARPHRD_CAIF;
550	pshm_netdev->hard_header_len = CAIF_NEEDED_HEADROOM;
551	pshm_netdev->tx_queue_len = 0;
552	pshm_netdev->destructor = free_netdev;
553
554	pshm_drv = netdev_priv(pshm_netdev);
555
556	/* Initialize structures in a clean state. */
557	memset(pshm_drv, 0, sizeof(struct shmdrv_layer));
558
559	pshm_drv->cfdev.link_select = CAIF_LINK_LOW_LATENCY;
560}
561
562int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
563{
564	int result, j;
565	struct shmdrv_layer *pshm_drv = NULL;
566
567	pshm_dev->pshm_netdev = alloc_netdev(sizeof(struct shmdrv_layer),
568						"cfshm%d", shm_netdev_setup);
569	if (!pshm_dev->pshm_netdev)
570		return -ENOMEM;
571
572	pshm_drv = netdev_priv(pshm_dev->pshm_netdev);
573	pshm_drv->pshm_dev = pshm_dev;
574
575	/*
576	 * Initialization starts with the verification of the
577	 * availability of MBX driver by calling its setup function.
578	 * MBX driver must be available by this time for proper
579	 * functioning of SHM driver.
580	 */
581	if ((pshm_dev->pshmdev_mbxsetup
582				(caif_shmdrv_rx_cb, pshm_dev, pshm_drv)) != 0) {
583		pr_warn("Could not config. SHM Mailbox,"
584				" Bailing out.....\n");
585		free_netdev(pshm_dev->pshm_netdev);
586		return -ENODEV;
587	}
588
589	skb_queue_head_init(&pshm_drv->sk_qhead);
590
591	pr_info("SHM DEVICE[%d] PROBED BY DRIVER, NEW SHM DRIVER"
592			" INSTANCE AT pshm_drv =0x%p\n",
593			pshm_drv->pshm_dev->shm_id, pshm_drv);
594
595	if (pshm_dev->shm_total_sz <
596			(NR_TX_BUF * TX_BUF_SZ + NR_RX_BUF * RX_BUF_SZ)) {
597
598		pr_warn("ERROR, Amount of available"
599				" Phys. SHM cannot accommodate current SHM "
600				"driver configuration, Bailing out ...\n");
601		free_netdev(pshm_dev->pshm_netdev);
602		return -ENOMEM;
603	}
604
605	pshm_drv->shm_base_addr = pshm_dev->shm_base_addr;
606	pshm_drv->shm_tx_addr = pshm_drv->shm_base_addr;
607
608	if (pshm_dev->shm_loopback)
609		pshm_drv->shm_rx_addr = pshm_drv->shm_tx_addr;
610	else
611		pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr +
612						(NR_TX_BUF * TX_BUF_SZ);
613
614	spin_lock_init(&pshm_drv->lock);
615	INIT_LIST_HEAD(&pshm_drv->tx_empty_list);
616	INIT_LIST_HEAD(&pshm_drv->tx_pend_list);
617	INIT_LIST_HEAD(&pshm_drv->tx_full_list);
618
619	INIT_LIST_HEAD(&pshm_drv->rx_empty_list);
620	INIT_LIST_HEAD(&pshm_drv->rx_pend_list);
621	INIT_LIST_HEAD(&pshm_drv->rx_full_list);
622
623	INIT_WORK(&pshm_drv->shm_tx_work, shm_tx_work_func);
624	INIT_WORK(&pshm_drv->shm_rx_work, shm_rx_work_func);
625
626	pshm_drv->pshm_tx_workqueue =
627				create_singlethread_workqueue("shm_tx_work");
628	pshm_drv->pshm_rx_workqueue =
629				create_singlethread_workqueue("shm_rx_work");
630
631	for (j = 0; j < NR_TX_BUF; j++) {
632		struct buf_list *tx_buf =
633				kmalloc(sizeof(struct buf_list), GFP_KERNEL);
634
635		if (tx_buf == NULL) {
636			pr_warn("ERROR, Could not"
637					" allocate dynamic mem. for tx_buf,"
638					" Bailing out ...\n");
639			free_netdev(pshm_dev->pshm_netdev);
640			return -ENOMEM;
641		}
642		tx_buf->index = j;
643		tx_buf->phy_addr = pshm_drv->shm_tx_addr + (TX_BUF_SZ * j);
644		tx_buf->len = TX_BUF_SZ;
645		tx_buf->frames = 0;
646		tx_buf->frm_ofs = SHM_CAIF_FRM_OFS;
647
648		if (pshm_dev->shm_loopback)
649			tx_buf->desc_vptr = (unsigned char *)tx_buf->phy_addr;
650		else
651			/*
652			 * FIXME: the result of ioremap is not a pointer - arnd
653			 */
654			tx_buf->desc_vptr =
655					ioremap(tx_buf->phy_addr, TX_BUF_SZ);
656
657		list_add_tail(&tx_buf->list, &pshm_drv->tx_empty_list);
658	}
659
660	for (j = 0; j < NR_RX_BUF; j++) {
661		struct buf_list *rx_buf =
662				kmalloc(sizeof(struct buf_list), GFP_KERNEL);
663
664		if (rx_buf == NULL) {
665			pr_warn("ERROR, Could not"
666					" allocate dynamic mem.for rx_buf,"
667					" Bailing out ...\n");
668			free_netdev(pshm_dev->pshm_netdev);
669			return -ENOMEM;
670		}
671		rx_buf->index = j;
672		rx_buf->phy_addr = pshm_drv->shm_rx_addr + (RX_BUF_SZ * j);
673		rx_buf->len = RX_BUF_SZ;
674
675		if (pshm_dev->shm_loopback)
676			rx_buf->desc_vptr = (unsigned char *)rx_buf->phy_addr;
677		else
678			rx_buf->desc_vptr =
679					ioremap(rx_buf->phy_addr, RX_BUF_SZ);
680		list_add_tail(&rx_buf->list, &pshm_drv->rx_empty_list);
681	}
682
683	pshm_drv->tx_empty_available = 1;
684	result = register_netdev(pshm_dev->pshm_netdev);
685	if (result)
686		pr_warn("ERROR[%d], SHM could not, "
687			"register with NW FRMWK Bailing out ...\n", result);
688
689	return result;
690}
691
692void caif_shmcore_remove(struct net_device *pshm_netdev)
693{
694	struct buf_list *pbuf;
695	struct shmdrv_layer *pshm_drv = NULL;
696
697	pshm_drv = netdev_priv(pshm_netdev);
698
699	while (!(list_empty(&pshm_drv->tx_pend_list))) {
700		pbuf =
701			list_entry(pshm_drv->tx_pend_list.next,
702					struct buf_list, list);
703
704		list_del(&pbuf->list);
705		kfree(pbuf);
706	}
707
708	while (!(list_empty(&pshm_drv->tx_full_list))) {
709		pbuf =
710			list_entry(pshm_drv->tx_full_list.next,
711					struct buf_list, list);
712		list_del(&pbuf->list);
713		kfree(pbuf);
714	}
715
716	while (!(list_empty(&pshm_drv->tx_empty_list))) {
717		pbuf =
718			list_entry(pshm_drv->tx_empty_list.next,
719					struct buf_list, list);
720		list_del(&pbuf->list);
721		kfree(pbuf);
722	}
723
724	while (!(list_empty(&pshm_drv->rx_full_list))) {
725		pbuf =
726			list_entry(pshm_drv->tx_full_list.next,
727				struct buf_list, list);
728		list_del(&pbuf->list);
729		kfree(pbuf);
730	}
731
732	while (!(list_empty(&pshm_drv->rx_pend_list))) {
733		pbuf =
734			list_entry(pshm_drv->tx_pend_list.next,
735				struct buf_list, list);
736		list_del(&pbuf->list);
737		kfree(pbuf);
738	}
739
740	while (!(list_empty(&pshm_drv->rx_empty_list))) {
741		pbuf =
742			list_entry(pshm_drv->rx_empty_list.next,
743				struct buf_list, list);
744		list_del(&pbuf->list);
745		kfree(pbuf);
746	}
747
748	/* Destroy work queues. */
749	destroy_workqueue(pshm_drv->pshm_tx_workqueue);
750	destroy_workqueue(pshm_drv->pshm_rx_workqueue);
751
752	unregister_netdev(pshm_netdev);
753}