Linux Audio

Check our new training course

Loading...
v4.17
 
   1/* Freescale QUICC Engine HDLC Device Driver
   2 *
   3 * Copyright 2016 Freescale Semiconductor Inc.
   4 *
   5 * This program is free software; you can redistribute  it and/or modify it
   6 * under  the terms of  the GNU General  Public License as published by the
   7 * Free Software Foundation;  either version 2 of the  License, or (at your
   8 * option) any later version.
   9 */
  10
  11#include <linux/delay.h>
  12#include <linux/dma-mapping.h>
  13#include <linux/hdlc.h>
  14#include <linux/init.h>
  15#include <linux/interrupt.h>
  16#include <linux/io.h>
  17#include <linux/irq.h>
  18#include <linux/kernel.h>
  19#include <linux/module.h>
  20#include <linux/netdevice.h>
  21#include <linux/of_address.h>
  22#include <linux/of_irq.h>
  23#include <linux/of_platform.h>
  24#include <linux/platform_device.h>
  25#include <linux/sched.h>
  26#include <linux/skbuff.h>
  27#include <linux/slab.h>
  28#include <linux/spinlock.h>
  29#include <linux/stddef.h>
  30#include <soc/fsl/qe/qe_tdm.h>
  31#include <uapi/linux/if_arp.h>
  32
  33#include "fsl_ucc_hdlc.h"
  34
  35#define DRV_DESC "Freescale QE UCC HDLC Driver"
  36#define DRV_NAME "ucc_hdlc"
  37
  38#define TDM_PPPOHT_SLIC_MAXIN
 
  39
  40static struct ucc_tdm_info utdm_primary_info = {
  41	.uf_info = {
  42		.tsa = 0,
  43		.cdp = 0,
  44		.cds = 1,
  45		.ctsp = 1,
  46		.ctss = 1,
  47		.revd = 0,
  48		.urfs = 256,
  49		.utfs = 256,
  50		.urfet = 128,
  51		.urfset = 192,
  52		.utfet = 128,
  53		.utftt = 0x40,
  54		.ufpt = 256,
  55		.mode = UCC_FAST_PROTOCOL_MODE_HDLC,
  56		.ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
  57		.tenc = UCC_FAST_TX_ENCODING_NRZ,
  58		.renc = UCC_FAST_RX_ENCODING_NRZ,
  59		.tcrc = UCC_FAST_16_BIT_CRC,
  60		.synl = UCC_FAST_SYNC_LEN_NOT_USED,
  61	},
  62
  63	.si_info = {
  64#ifdef TDM_PPPOHT_SLIC_MAXIN
  65		.simr_rfsd = 1,
  66		.simr_tfsd = 2,
  67#else
  68		.simr_rfsd = 0,
  69		.simr_tfsd = 0,
  70#endif
  71		.simr_crt = 0,
  72		.simr_sl = 0,
  73		.simr_ce = 1,
  74		.simr_fe = 1,
  75		.simr_gm = 0,
  76	},
  77};
  78
  79static struct ucc_tdm_info utdm_info[MAX_HDLC_NUM];
  80
  81static int uhdlc_init(struct ucc_hdlc_private *priv)
  82{
  83	struct ucc_tdm_info *ut_info;
  84	struct ucc_fast_info *uf_info;
  85	u32 cecr_subblock;
  86	u16 bd_status;
  87	int ret, i;
  88	void *bd_buffer;
  89	dma_addr_t bd_dma_addr;
  90	u32 riptr;
  91	u32 tiptr;
  92	u32 gumr;
  93
  94	ut_info = priv->ut_info;
  95	uf_info = &ut_info->uf_info;
  96
  97	if (priv->tsa) {
  98		uf_info->tsa = 1;
  99		uf_info->ctsp = 1;
 
 
 
 
 
 
 100	}
 101
 102	/* This sets HPM register in CMXUCR register which configures a
 103	 * open drain connected HDLC bus
 104	 */
 105	if (priv->hdlc_bus)
 106		uf_info->brkpt_support = 1;
 107
 108	uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF |
 109				UCC_HDLC_UCCE_TXB) << 16);
 110
 111	ret = ucc_fast_init(uf_info, &priv->uccf);
 112	if (ret) {
 113		dev_err(priv->dev, "Failed to init uccf.");
 114		return ret;
 115	}
 116
 117	priv->uf_regs = priv->uccf->uf_regs;
 118	ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
 119
 120	/* Loopback mode */
 121	if (priv->loopback) {
 122		dev_info(priv->dev, "Loopback Mode\n");
 123		/* use the same clock when work in loopback */
 124		qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1);
 125
 126		gumr = ioread32be(&priv->uf_regs->gumr);
 127		gumr |= (UCC_FAST_GUMR_LOOPBACK | UCC_FAST_GUMR_CDS |
 128			 UCC_FAST_GUMR_TCI);
 129		gumr &= ~(UCC_FAST_GUMR_CTSP | UCC_FAST_GUMR_RSYN);
 130		iowrite32be(gumr, &priv->uf_regs->gumr);
 131	}
 132
 133	/* Initialize SI */
 134	if (priv->tsa)
 135		ucc_tdm_init(priv->utdm, priv->ut_info);
 136
 137	/* Write to QE CECR, UCCx channel to Stop Transmission */
 138	cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
 139	ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
 140			   QE_CR_PROTOCOL_UNSPECIFIED, 0);
 141
 142	/* Set UPSMR normal mode (need fixed)*/
 143	iowrite32be(0, &priv->uf_regs->upsmr);
 144
 145	/* hdlc_bus mode */
 146	if (priv->hdlc_bus) {
 147		u32 upsmr;
 148
 149		dev_info(priv->dev, "HDLC bus Mode\n");
 150		upsmr = ioread32be(&priv->uf_regs->upsmr);
 151
 152		/* bus mode and retransmit enable, with collision window
 153		 * set to 8 bytes
 154		 */
 155		upsmr |= UCC_HDLC_UPSMR_RTE | UCC_HDLC_UPSMR_BUS |
 156				UCC_HDLC_UPSMR_CW8;
 157		iowrite32be(upsmr, &priv->uf_regs->upsmr);
 158
 159		/* explicitly disable CDS & CTSP */
 160		gumr = ioread32be(&priv->uf_regs->gumr);
 161		gumr &= ~(UCC_FAST_GUMR_CDS | UCC_FAST_GUMR_CTSP);
 162		/* set automatic sync to explicitly ignore CD signal */
 163		gumr |= UCC_FAST_GUMR_SYNL_AUTO;
 164		iowrite32be(gumr, &priv->uf_regs->gumr);
 165	}
 166
 167	priv->rx_ring_size = RX_BD_RING_LEN;
 168	priv->tx_ring_size = TX_BD_RING_LEN;
 169	/* Alloc Rx BD */
 170	priv->rx_bd_base = dma_alloc_coherent(priv->dev,
 171			RX_BD_RING_LEN * sizeof(struct qe_bd),
 172			&priv->dma_rx_bd, GFP_KERNEL);
 173
 174	if (!priv->rx_bd_base) {
 175		dev_err(priv->dev, "Cannot allocate MURAM memory for RxBDs\n");
 176		ret = -ENOMEM;
 177		goto free_uccf;
 178	}
 179
 180	/* Alloc Tx BD */
 181	priv->tx_bd_base = dma_alloc_coherent(priv->dev,
 182			TX_BD_RING_LEN * sizeof(struct qe_bd),
 183			&priv->dma_tx_bd, GFP_KERNEL);
 184
 185	if (!priv->tx_bd_base) {
 186		dev_err(priv->dev, "Cannot allocate MURAM memory for TxBDs\n");
 187		ret = -ENOMEM;
 188		goto free_rx_bd;
 189	}
 190
 191	/* Alloc parameter ram for ucc hdlc */
 192	priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param),
 193				ALIGNMENT_OF_UCC_HDLC_PRAM);
 194
 195	if (priv->ucc_pram_offset < 0) {
 196		dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n");
 197		ret = -ENOMEM;
 198		goto free_tx_bd;
 199	}
 200
 201	priv->rx_skbuff = kzalloc(priv->rx_ring_size * sizeof(*priv->rx_skbuff),
 
 202				  GFP_KERNEL);
 203	if (!priv->rx_skbuff)
 204		goto free_ucc_pram;
 205
 206	priv->tx_skbuff = kzalloc(priv->tx_ring_size * sizeof(*priv->tx_skbuff),
 
 207				  GFP_KERNEL);
 208	if (!priv->tx_skbuff)
 209		goto free_rx_skbuff;
 210
 211	priv->skb_curtx = 0;
 212	priv->skb_dirtytx = 0;
 213	priv->curtx_bd = priv->tx_bd_base;
 214	priv->dirty_tx = priv->tx_bd_base;
 215	priv->currx_bd = priv->rx_bd_base;
 216	priv->currx_bdnum = 0;
 217
 218	/* init parameter base */
 219	cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
 220	ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
 221			   QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
 222
 223	priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
 224					qe_muram_addr(priv->ucc_pram_offset);
 225
 226	/* Zero out parameter ram */
 227	memset_io(priv->ucc_pram, 0, sizeof(struct ucc_hdlc_param));
 228
 229	/* Alloc riptr, tiptr */
 230	riptr = qe_muram_alloc(32, 32);
 231	if (riptr < 0) {
 232		dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
 233		ret = -ENOMEM;
 234		goto free_tx_skbuff;
 235	}
 236
 237	tiptr = qe_muram_alloc(32, 32);
 238	if (tiptr < 0) {
 239		dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
 240		ret = -ENOMEM;
 241		goto free_riptr;
 242	}
 243
 244	/* Set RIPTR, TIPTR */
 245	iowrite16be(riptr, &priv->ucc_pram->riptr);
 246	iowrite16be(tiptr, &priv->ucc_pram->tiptr);
 247
 248	/* Set MRBLR */
 249	iowrite16be(MAX_RX_BUF_LENGTH, &priv->ucc_pram->mrblr);
 250
 251	/* Set RBASE, TBASE */
 252	iowrite32be(priv->dma_rx_bd, &priv->ucc_pram->rbase);
 253	iowrite32be(priv->dma_tx_bd, &priv->ucc_pram->tbase);
 254
 255	/* Set RSTATE, TSTATE */
 256	iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->rstate);
 257	iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->tstate);
 258
 259	/* Set C_MASK, C_PRES for 16bit CRC */
 260	iowrite32be(CRC_16BIT_MASK, &priv->ucc_pram->c_mask);
 261	iowrite32be(CRC_16BIT_PRES, &priv->ucc_pram->c_pres);
 262
 263	iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr);
 264	iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr);
 265	iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt);
 266	iowrite16be(DEFAULT_ADDR_MASK, &priv->ucc_pram->hmask);
 267	iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1);
 268	iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2);
 269	iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3);
 270	iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4);
 271
 272	/* Get BD buffer */
 273	bd_buffer = dma_alloc_coherent(priv->dev,
 274				       (RX_BD_RING_LEN + TX_BD_RING_LEN) *
 275				       MAX_RX_BUF_LENGTH,
 276				       &bd_dma_addr, GFP_KERNEL);
 277
 278	if (!bd_buffer) {
 279		dev_err(priv->dev, "Could not allocate buffer descriptors\n");
 280		ret = -ENOMEM;
 281		goto free_tiptr;
 282	}
 283
 284	memset(bd_buffer, 0, (RX_BD_RING_LEN + TX_BD_RING_LEN)
 285			* MAX_RX_BUF_LENGTH);
 286
 287	priv->rx_buffer = bd_buffer;
 288	priv->tx_buffer = bd_buffer + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
 289
 290	priv->dma_rx_addr = bd_dma_addr;
 291	priv->dma_tx_addr = bd_dma_addr + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
 292
 293	for (i = 0; i < RX_BD_RING_LEN; i++) {
 294		if (i < (RX_BD_RING_LEN - 1))
 295			bd_status = R_E_S | R_I_S;
 296		else
 297			bd_status = R_E_S | R_I_S | R_W_S;
 298
 299		iowrite16be(bd_status, &priv->rx_bd_base[i].status);
 300		iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
 301			    &priv->rx_bd_base[i].buf);
 302	}
 303
 304	for (i = 0; i < TX_BD_RING_LEN; i++) {
 305		if (i < (TX_BD_RING_LEN - 1))
 306			bd_status =  T_I_S | T_TC_S;
 307		else
 308			bd_status =  T_I_S | T_TC_S | T_W_S;
 309
 310		iowrite16be(bd_status, &priv->tx_bd_base[i].status);
 311		iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
 312			    &priv->tx_bd_base[i].buf);
 313	}
 314
 315	return 0;
 316
 317free_tiptr:
 318	qe_muram_free(tiptr);
 319free_riptr:
 320	qe_muram_free(riptr);
 321free_tx_skbuff:
 322	kfree(priv->tx_skbuff);
 323free_rx_skbuff:
 324	kfree(priv->rx_skbuff);
 325free_ucc_pram:
 326	qe_muram_free(priv->ucc_pram_offset);
 327free_tx_bd:
 328	dma_free_coherent(priv->dev,
 329			  TX_BD_RING_LEN * sizeof(struct qe_bd),
 330			  priv->tx_bd_base, priv->dma_tx_bd);
 331free_rx_bd:
 332	dma_free_coherent(priv->dev,
 333			  RX_BD_RING_LEN * sizeof(struct qe_bd),
 334			  priv->rx_bd_base, priv->dma_rx_bd);
 335free_uccf:
 336	ucc_fast_free(priv->uccf);
 337
 338	return ret;
 339}
 340
 341static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
 342{
 343	hdlc_device *hdlc = dev_to_hdlc(dev);
 344	struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv;
 345	struct qe_bd __iomem *bd;
 346	u16 bd_status;
 347	unsigned long flags;
 348	u16 *proto_head;
 349
 350	switch (dev->type) {
 351	case ARPHRD_RAWHDLC:
 352		if (skb_headroom(skb) < HDLC_HEAD_LEN) {
 353			dev->stats.tx_dropped++;
 354			dev_kfree_skb(skb);
 355			netdev_err(dev, "No enough space for hdlc head\n");
 356			return -ENOMEM;
 357		}
 358
 359		skb_push(skb, HDLC_HEAD_LEN);
 360
 361		proto_head = (u16 *)skb->data;
 362		*proto_head = htons(DEFAULT_HDLC_HEAD);
 363
 364		dev->stats.tx_bytes += skb->len;
 365		break;
 366
 367	case ARPHRD_PPP:
 368		proto_head = (u16 *)skb->data;
 369		if (*proto_head != htons(DEFAULT_PPP_HEAD)) {
 370			dev->stats.tx_dropped++;
 371			dev_kfree_skb(skb);
 372			netdev_err(dev, "Wrong ppp header\n");
 373			return -ENOMEM;
 374		}
 375
 376		dev->stats.tx_bytes += skb->len;
 377		break;
 378
 
 
 
 
 379	default:
 380		dev->stats.tx_dropped++;
 381		dev_kfree_skb(skb);
 382		return -ENOMEM;
 383	}
 
 384	spin_lock_irqsave(&priv->lock, flags);
 385
 386	/* Start from the next BD that should be filled */
 387	bd = priv->curtx_bd;
 388	bd_status = ioread16be(&bd->status);
 389	/* Save the skb pointer so we can free it later */
 390	priv->tx_skbuff[priv->skb_curtx] = skb;
 391
 392	/* Update the current skb pointer (wrapping if this was the last) */
 393	priv->skb_curtx =
 394	    (priv->skb_curtx + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
 395
 396	/* copy skb data to tx buffer for sdma processing */
 397	memcpy(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
 398	       skb->data, skb->len);
 399
 400	/* set bd status and length */
 401	bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
 402
 403	iowrite16be(skb->len, &bd->length);
 404	iowrite16be(bd_status, &bd->status);
 405
 406	/* Move to next BD in the ring */
 407	if (!(bd_status & T_W_S))
 408		bd += 1;
 409	else
 410		bd = priv->tx_bd_base;
 411
 412	if (bd == priv->dirty_tx) {
 413		if (!netif_queue_stopped(dev))
 414			netif_stop_queue(dev);
 415	}
 416
 417	priv->curtx_bd = bd;
 418
 419	spin_unlock_irqrestore(&priv->lock, flags);
 420
 421	return NETDEV_TX_OK;
 422}
 423
 
 
 
 
 
 
 
 
 
 
 
 
 424static int hdlc_tx_done(struct ucc_hdlc_private *priv)
 425{
 426	/* Start from the next BD that should be filled */
 427	struct net_device *dev = priv->ndev;
 
 
 428	struct qe_bd *bd;		/* BD pointer */
 429	u16 bd_status;
 
 430
 431	bd = priv->dirty_tx;
 432	bd_status = ioread16be(&bd->status);
 433
 434	/* Normal processing. */
 435	while ((bd_status & T_R_S) == 0) {
 436		struct sk_buff *skb;
 437
 
 
 
 
 
 
 
 
 
 438		/* BD contains already transmitted buffer.   */
 439		/* Handle the transmitted buffer and release */
 440		/* the BD to be used with the current frame  */
 441
 442		skb = priv->tx_skbuff[priv->skb_dirtytx];
 443		if (!skb)
 444			break;
 
 
 445		dev->stats.tx_packets++;
 446		memset(priv->tx_buffer +
 447		       (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
 448		       0, skb->len);
 449		dev_kfree_skb_irq(skb);
 450
 451		priv->tx_skbuff[priv->skb_dirtytx] = NULL;
 452		priv->skb_dirtytx =
 453		    (priv->skb_dirtytx +
 454		     1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
 455
 456		/* We freed a buffer, so now we can restart transmission */
 457		if (netif_queue_stopped(dev))
 458			netif_wake_queue(dev);
 459
 460		/* Advance the confirmation BD pointer */
 461		if (!(bd_status & T_W_S))
 462			bd += 1;
 463		else
 464			bd = priv->tx_bd_base;
 465		bd_status = ioread16be(&bd->status);
 466	}
 467	priv->dirty_tx = bd;
 468
 
 
 
 
 469	return 0;
 470}
 471
 472static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
 473{
 474	struct net_device *dev = priv->ndev;
 475	struct sk_buff *skb = NULL;
 476	hdlc_device *hdlc = dev_to_hdlc(dev);
 477	struct qe_bd *bd;
 478	u16 bd_status;
 479	u16 length, howmany = 0;
 480	u8 *bdbuffer;
 481
 482	bd = priv->currx_bd;
 483	bd_status = ioread16be(&bd->status);
 484
 485	/* while there are received buffers and BD is full (~R_E) */
 486	while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) {
 487		if (bd_status & R_OV_S)
 488			dev->stats.rx_over_errors++;
 489		if (bd_status & R_CR_S) {
 490			dev->stats.rx_crc_errors++;
 491			dev->stats.rx_dropped++;
 
 
 
 
 
 
 
 
 
 
 
 492			goto recycle;
 493		}
 494		bdbuffer = priv->rx_buffer +
 495			(priv->currx_bdnum * MAX_RX_BUF_LENGTH);
 496		length = ioread16be(&bd->length);
 497
 498		switch (dev->type) {
 499		case ARPHRD_RAWHDLC:
 500			bdbuffer += HDLC_HEAD_LEN;
 501			length -= (HDLC_HEAD_LEN + HDLC_CRC_SIZE);
 502
 503			skb = dev_alloc_skb(length);
 504			if (!skb) {
 505				dev->stats.rx_dropped++;
 506				return -ENOMEM;
 507			}
 508
 509			skb_put(skb, length);
 510			skb->len = length;
 511			skb->dev = dev;
 512			memcpy(skb->data, bdbuffer, length);
 513			break;
 514
 515		case ARPHRD_PPP:
 
 516			length -= HDLC_CRC_SIZE;
 517
 518			skb = dev_alloc_skb(length);
 519			if (!skb) {
 520				dev->stats.rx_dropped++;
 521				return -ENOMEM;
 522			}
 523
 524			skb_put(skb, length);
 525			skb->len = length;
 526			skb->dev = dev;
 527			memcpy(skb->data, bdbuffer, length);
 528			break;
 529		}
 530
 531		dev->stats.rx_packets++;
 532		dev->stats.rx_bytes += skb->len;
 533		howmany++;
 534		if (hdlc->proto)
 535			skb->protocol = hdlc_type_trans(skb, dev);
 536		netif_receive_skb(skb);
 537
 538recycle:
 539		iowrite16be(bd_status | R_E_S | R_I_S, &bd->status);
 540
 541		/* update to point at the next bd */
 542		if (bd_status & R_W_S) {
 543			priv->currx_bdnum = 0;
 544			bd = priv->rx_bd_base;
 545		} else {
 546			if (priv->currx_bdnum < (RX_BD_RING_LEN - 1))
 547				priv->currx_bdnum += 1;
 548			else
 549				priv->currx_bdnum = RX_BD_RING_LEN - 1;
 550
 551			bd += 1;
 552		}
 553
 554		bd_status = ioread16be(&bd->status);
 555	}
 556
 557	priv->currx_bd = bd;
 558	return howmany;
 559}
 560
 561static int ucc_hdlc_poll(struct napi_struct *napi, int budget)
 562{
 563	struct ucc_hdlc_private *priv = container_of(napi,
 564						     struct ucc_hdlc_private,
 565						     napi);
 566	int howmany;
 567
 568	/* Tx event processing */
 569	spin_lock(&priv->lock);
 570	hdlc_tx_done(priv);
 571	spin_unlock(&priv->lock);
 572
 573	howmany = 0;
 574	howmany += hdlc_rx_done(priv, budget - howmany);
 575
 576	if (howmany < budget) {
 577		napi_complete_done(napi, howmany);
 578		qe_setbits32(priv->uccf->p_uccm,
 579			     (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
 580	}
 581
 582	return howmany;
 583}
 584
 585static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id)
 586{
 587	struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id;
 588	struct net_device *dev = priv->ndev;
 589	struct ucc_fast_private *uccf;
 590	struct ucc_tdm_info *ut_info;
 591	u32 ucce;
 592	u32 uccm;
 593
 594	ut_info = priv->ut_info;
 595	uccf = priv->uccf;
 596
 597	ucce = ioread32be(uccf->p_ucce);
 598	uccm = ioread32be(uccf->p_uccm);
 599	ucce &= uccm;
 600	iowrite32be(ucce, uccf->p_ucce);
 601	if (!ucce)
 602		return IRQ_NONE;
 603
 604	if ((ucce >> 16) & (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)) {
 605		if (napi_schedule_prep(&priv->napi)) {
 606			uccm &= ~((UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)
 607				  << 16);
 608			iowrite32be(uccm, uccf->p_uccm);
 609			__napi_schedule(&priv->napi);
 610		}
 611	}
 612
 613	/* Errors and other events */
 614	if (ucce >> 16 & UCC_HDLC_UCCE_BSY)
 615		dev->stats.rx_errors++;
 616	if (ucce >> 16 & UCC_HDLC_UCCE_TXE)
 617		dev->stats.tx_errors++;
 618
 619	return IRQ_HANDLED;
 620}
 621
 622static int uhdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 623{
 624	const size_t size = sizeof(te1_settings);
 625	te1_settings line;
 626	struct ucc_hdlc_private *priv = netdev_priv(dev);
 627
 628	if (cmd != SIOCWANDEV)
 629		return hdlc_ioctl(dev, ifr, cmd);
 630
 631	switch (ifr->ifr_settings.type) {
 632	case IF_GET_IFACE:
 633		ifr->ifr_settings.type = IF_IFACE_E1;
 634		if (ifr->ifr_settings.size < size) {
 635			ifr->ifr_settings.size = size; /* data size wanted */
 636			return -ENOBUFS;
 637		}
 638		memset(&line, 0, sizeof(line));
 639		line.clock_type = priv->clocking;
 640
 641		if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
 642			return -EFAULT;
 643		return 0;
 644
 645	default:
 646		return hdlc_ioctl(dev, ifr, cmd);
 647	}
 648}
 649
 650static int uhdlc_open(struct net_device *dev)
 651{
 652	u32 cecr_subblock;
 653	hdlc_device *hdlc = dev_to_hdlc(dev);
 654	struct ucc_hdlc_private *priv = hdlc->priv;
 655	struct ucc_tdm *utdm = priv->utdm;
 656
 657	if (priv->hdlc_busy != 1) {
 658		if (request_irq(priv->ut_info->uf_info.irq,
 659				ucc_hdlc_irq_handler, 0, "hdlc", priv))
 660			return -ENODEV;
 661
 662		cecr_subblock = ucc_fast_get_qe_cr_subblock(
 663					priv->ut_info->uf_info.ucc_num);
 664
 665		qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
 666			     QE_CR_PROTOCOL_UNSPECIFIED, 0);
 667
 668		ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
 669
 670		/* Enable the TDM port */
 671		if (priv->tsa)
 672			utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
 673
 674		priv->hdlc_busy = 1;
 675		netif_device_attach(priv->ndev);
 676		napi_enable(&priv->napi);
 
 677		netif_start_queue(dev);
 678		hdlc_open(dev);
 679	}
 680
 681	return 0;
 682}
 683
 684static void uhdlc_memclean(struct ucc_hdlc_private *priv)
 685{
 686	qe_muram_free(priv->ucc_pram->riptr);
 687	qe_muram_free(priv->ucc_pram->tiptr);
 688
 689	if (priv->rx_bd_base) {
 690		dma_free_coherent(priv->dev,
 691				  RX_BD_RING_LEN * sizeof(struct qe_bd),
 692				  priv->rx_bd_base, priv->dma_rx_bd);
 693
 694		priv->rx_bd_base = NULL;
 695		priv->dma_rx_bd = 0;
 696	}
 697
 698	if (priv->tx_bd_base) {
 699		dma_free_coherent(priv->dev,
 700				  TX_BD_RING_LEN * sizeof(struct qe_bd),
 701				  priv->tx_bd_base, priv->dma_tx_bd);
 702
 703		priv->tx_bd_base = NULL;
 704		priv->dma_tx_bd = 0;
 705	}
 706
 707	if (priv->ucc_pram) {
 708		qe_muram_free(priv->ucc_pram_offset);
 709		priv->ucc_pram = NULL;
 710		priv->ucc_pram_offset = 0;
 711	 }
 712
 713	kfree(priv->rx_skbuff);
 714	priv->rx_skbuff = NULL;
 715
 716	kfree(priv->tx_skbuff);
 717	priv->tx_skbuff = NULL;
 718
 719	if (priv->uf_regs) {
 720		iounmap(priv->uf_regs);
 721		priv->uf_regs = NULL;
 722	}
 723
 724	if (priv->uccf) {
 725		ucc_fast_free(priv->uccf);
 726		priv->uccf = NULL;
 727	}
 728
 729	if (priv->rx_buffer) {
 730		dma_free_coherent(priv->dev,
 731				  RX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
 732				  priv->rx_buffer, priv->dma_rx_addr);
 733		priv->rx_buffer = NULL;
 734		priv->dma_rx_addr = 0;
 735	}
 736
 737	if (priv->tx_buffer) {
 738		dma_free_coherent(priv->dev,
 739				  TX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
 740				  priv->tx_buffer, priv->dma_tx_addr);
 741		priv->tx_buffer = NULL;
 742		priv->dma_tx_addr = 0;
 743	}
 744}
 745
 746static int uhdlc_close(struct net_device *dev)
 747{
 748	struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
 749	struct ucc_tdm *utdm = priv->utdm;
 750	u32 cecr_subblock;
 751
 752	napi_disable(&priv->napi);
 753	cecr_subblock = ucc_fast_get_qe_cr_subblock(
 754				priv->ut_info->uf_info.ucc_num);
 755
 756	qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
 757		     (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
 758	qe_issue_cmd(QE_CLOSE_RX_BD, cecr_subblock,
 759		     (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
 760
 761	if (priv->tsa)
 762		utdm->si_regs->siglmr1_h &= ~(0x1 << utdm->tdm_port);
 763
 764	ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
 765
 766	free_irq(priv->ut_info->uf_info.irq, priv);
 767	netif_stop_queue(dev);
 
 768	priv->hdlc_busy = 0;
 769
 770	return 0;
 771}
 772
 773static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding,
 774			   unsigned short parity)
 775{
 776	struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
 777
 778	if (encoding != ENCODING_NRZ &&
 779	    encoding != ENCODING_NRZI)
 780		return -EINVAL;
 781
 782	if (parity != PARITY_NONE &&
 783	    parity != PARITY_CRC32_PR1_CCITT &&
 
 784	    parity != PARITY_CRC16_PR1_CCITT)
 785		return -EINVAL;
 786
 787	priv->encoding = encoding;
 788	priv->parity = parity;
 789
 790	return 0;
 791}
 792
 793#ifdef CONFIG_PM
 794static void store_clk_config(struct ucc_hdlc_private *priv)
 795{
 796	struct qe_mux *qe_mux_reg = &qe_immr->qmx;
 797
 798	/* store si clk */
 799	priv->cmxsi1cr_h = ioread32be(&qe_mux_reg->cmxsi1cr_h);
 800	priv->cmxsi1cr_l = ioread32be(&qe_mux_reg->cmxsi1cr_l);
 801
 802	/* store si sync */
 803	priv->cmxsi1syr = ioread32be(&qe_mux_reg->cmxsi1syr);
 804
 805	/* store ucc clk */
 806	memcpy_fromio(priv->cmxucr, qe_mux_reg->cmxucr, 4 * sizeof(u32));
 807}
 808
 809static void resume_clk_config(struct ucc_hdlc_private *priv)
 810{
 811	struct qe_mux *qe_mux_reg = &qe_immr->qmx;
 812
 813	memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32));
 814
 815	iowrite32be(priv->cmxsi1cr_h, &qe_mux_reg->cmxsi1cr_h);
 816	iowrite32be(priv->cmxsi1cr_l, &qe_mux_reg->cmxsi1cr_l);
 817
 818	iowrite32be(priv->cmxsi1syr, &qe_mux_reg->cmxsi1syr);
 819}
 820
 821static int uhdlc_suspend(struct device *dev)
 822{
 823	struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
 824	struct ucc_tdm_info *ut_info;
 825	struct ucc_fast __iomem *uf_regs;
 826
 827	if (!priv)
 828		return -EINVAL;
 829
 830	if (!netif_running(priv->ndev))
 831		return 0;
 832
 833	netif_device_detach(priv->ndev);
 834	napi_disable(&priv->napi);
 835
 836	ut_info = priv->ut_info;
 837	uf_regs = priv->uf_regs;
 838
 839	/* backup gumr guemr*/
 840	priv->gumr = ioread32be(&uf_regs->gumr);
 841	priv->guemr = ioread8(&uf_regs->guemr);
 842
 843	priv->ucc_pram_bak = kmalloc(sizeof(*priv->ucc_pram_bak),
 844					GFP_KERNEL);
 845	if (!priv->ucc_pram_bak)
 846		return -ENOMEM;
 847
 848	/* backup HDLC parameter */
 849	memcpy_fromio(priv->ucc_pram_bak, priv->ucc_pram,
 850		      sizeof(struct ucc_hdlc_param));
 851
 852	/* store the clk configuration */
 853	store_clk_config(priv);
 854
 855	/* save power */
 856	ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
 857
 858	return 0;
 859}
 860
 861static int uhdlc_resume(struct device *dev)
 862{
 863	struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
 864	struct ucc_tdm *utdm;
 865	struct ucc_tdm_info *ut_info;
 866	struct ucc_fast __iomem *uf_regs;
 867	struct ucc_fast_private *uccf;
 868	struct ucc_fast_info *uf_info;
 869	int ret, i;
 870	u32 cecr_subblock;
 871	u16 bd_status;
 872
 873	if (!priv)
 874		return -EINVAL;
 875
 876	if (!netif_running(priv->ndev))
 877		return 0;
 878
 879	utdm = priv->utdm;
 880	ut_info = priv->ut_info;
 881	uf_info = &ut_info->uf_info;
 882	uf_regs = priv->uf_regs;
 883	uccf = priv->uccf;
 884
 885	/* restore gumr guemr */
 886	iowrite8(priv->guemr, &uf_regs->guemr);
 887	iowrite32be(priv->gumr, &uf_regs->gumr);
 888
 889	/* Set Virtual Fifo registers */
 890	iowrite16be(uf_info->urfs, &uf_regs->urfs);
 891	iowrite16be(uf_info->urfet, &uf_regs->urfet);
 892	iowrite16be(uf_info->urfset, &uf_regs->urfset);
 893	iowrite16be(uf_info->utfs, &uf_regs->utfs);
 894	iowrite16be(uf_info->utfet, &uf_regs->utfet);
 895	iowrite16be(uf_info->utftt, &uf_regs->utftt);
 896	/* utfb, urfb are offsets from MURAM base */
 897	iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb);
 898	iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb);
 899
 900	/* Rx Tx and sync clock routing */
 901	resume_clk_config(priv);
 902
 903	iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
 904	iowrite32be(0xffffffff, &uf_regs->ucce);
 905
 906	ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
 907
 908	/* rebuild SIRAM */
 909	if (priv->tsa)
 910		ucc_tdm_init(priv->utdm, priv->ut_info);
 911
 912	/* Write to QE CECR, UCCx channel to Stop Transmission */
 913	cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
 914	ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
 915			   (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
 916
 917	/* Set UPSMR normal mode */
 918	iowrite32be(0, &uf_regs->upsmr);
 919
 920	/* init parameter base */
 921	cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
 922	ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
 923			   QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
 924
 925	priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
 926				qe_muram_addr(priv->ucc_pram_offset);
 927
 928	/* restore ucc parameter */
 929	memcpy_toio(priv->ucc_pram, priv->ucc_pram_bak,
 930		    sizeof(struct ucc_hdlc_param));
 931	kfree(priv->ucc_pram_bak);
 932
 933	/* rebuild BD entry */
 934	for (i = 0; i < RX_BD_RING_LEN; i++) {
 935		if (i < (RX_BD_RING_LEN - 1))
 936			bd_status = R_E_S | R_I_S;
 937		else
 938			bd_status = R_E_S | R_I_S | R_W_S;
 939
 940		iowrite16be(bd_status, &priv->rx_bd_base[i].status);
 941		iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
 942			    &priv->rx_bd_base[i].buf);
 943	}
 944
 945	for (i = 0; i < TX_BD_RING_LEN; i++) {
 946		if (i < (TX_BD_RING_LEN - 1))
 947			bd_status =  T_I_S | T_TC_S;
 948		else
 949			bd_status =  T_I_S | T_TC_S | T_W_S;
 950
 951		iowrite16be(bd_status, &priv->tx_bd_base[i].status);
 952		iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
 953			    &priv->tx_bd_base[i].buf);
 954	}
 955
 956	/* if hdlc is busy enable TX and RX */
 957	if (priv->hdlc_busy == 1) {
 958		cecr_subblock = ucc_fast_get_qe_cr_subblock(
 959					priv->ut_info->uf_info.ucc_num);
 960
 961		qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
 962			     (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
 963
 964		ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
 965
 966		/* Enable the TDM port */
 967		if (priv->tsa)
 968			utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
 969	}
 970
 971	napi_enable(&priv->napi);
 972	netif_device_attach(priv->ndev);
 973
 974	return 0;
 975}
 976
 977static const struct dev_pm_ops uhdlc_pm_ops = {
 978	.suspend = uhdlc_suspend,
 979	.resume = uhdlc_resume,
 980	.freeze = uhdlc_suspend,
 981	.thaw = uhdlc_resume,
 982};
 983
 984#define HDLC_PM_OPS (&uhdlc_pm_ops)
 985
 986#else
 987
 988#define HDLC_PM_OPS NULL
 989
 990#endif
 
 
 
 
 
 991static const struct net_device_ops uhdlc_ops = {
 992	.ndo_open       = uhdlc_open,
 993	.ndo_stop       = uhdlc_close,
 994	.ndo_start_xmit = hdlc_start_xmit,
 995	.ndo_do_ioctl   = uhdlc_ioctl,
 
 996};
 997
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 998static int ucc_hdlc_probe(struct platform_device *pdev)
 999{
1000	struct device_node *np = pdev->dev.of_node;
1001	struct ucc_hdlc_private *uhdlc_priv = NULL;
1002	struct ucc_tdm_info *ut_info;
1003	struct ucc_tdm *utdm = NULL;
1004	struct resource res;
1005	struct net_device *dev;
1006	hdlc_device *hdlc;
1007	int ucc_num;
1008	const char *sprop;
1009	int ret;
1010	u32 val;
1011
1012	ret = of_property_read_u32_index(np, "cell-index", 0, &val);
1013	if (ret) {
1014		dev_err(&pdev->dev, "Invalid ucc property\n");
1015		return -ENODEV;
1016	}
1017
1018	ucc_num = val - 1;
1019	if ((ucc_num > 3) || (ucc_num < 0)) {
1020		dev_err(&pdev->dev, ": Invalid UCC num\n");
1021		return -EINVAL;
1022	}
1023
1024	memcpy(&utdm_info[ucc_num], &utdm_primary_info,
1025	       sizeof(utdm_primary_info));
1026
1027	ut_info = &utdm_info[ucc_num];
1028	ut_info->uf_info.ucc_num = ucc_num;
1029
1030	sprop = of_get_property(np, "rx-clock-name", NULL);
1031	if (sprop) {
1032		ut_info->uf_info.rx_clock = qe_clock_source(sprop);
1033		if ((ut_info->uf_info.rx_clock < QE_CLK_NONE) ||
1034		    (ut_info->uf_info.rx_clock > QE_CLK24)) {
1035			dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1036			return -EINVAL;
1037		}
1038	} else {
1039		dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1040		return -EINVAL;
1041	}
1042
1043	sprop = of_get_property(np, "tx-clock-name", NULL);
1044	if (sprop) {
1045		ut_info->uf_info.tx_clock = qe_clock_source(sprop);
1046		if ((ut_info->uf_info.tx_clock < QE_CLK_NONE) ||
1047		    (ut_info->uf_info.tx_clock > QE_CLK24)) {
1048			dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1049			return -EINVAL;
1050		}
1051	} else {
1052		dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1053		return -EINVAL;
1054	}
1055
1056	ret = of_address_to_resource(np, 0, &res);
1057	if (ret)
1058		return -EINVAL;
1059
1060	ut_info->uf_info.regs = res.start;
1061	ut_info->uf_info.irq = irq_of_parse_and_map(np, 0);
1062
1063	uhdlc_priv = kzalloc(sizeof(*uhdlc_priv), GFP_KERNEL);
1064	if (!uhdlc_priv) {
1065		return -ENOMEM;
1066	}
1067
1068	dev_set_drvdata(&pdev->dev, uhdlc_priv);
1069	uhdlc_priv->dev = &pdev->dev;
1070	uhdlc_priv->ut_info = ut_info;
1071
1072	if (of_get_property(np, "fsl,tdm-interface", NULL))
1073		uhdlc_priv->tsa = 1;
1074
1075	if (of_get_property(np, "fsl,ucc-internal-loopback", NULL))
1076		uhdlc_priv->loopback = 1;
1077
1078	if (of_get_property(np, "fsl,hdlc-bus", NULL))
1079		uhdlc_priv->hdlc_bus = 1;
1080
1081	if (uhdlc_priv->tsa == 1) {
1082		utdm = kzalloc(sizeof(*utdm), GFP_KERNEL);
1083		if (!utdm) {
1084			ret = -ENOMEM;
1085			dev_err(&pdev->dev, "No mem to alloc ucc tdm data\n");
1086			goto free_uhdlc_priv;
1087		}
1088		uhdlc_priv->utdm = utdm;
1089		ret = ucc_of_parse_tdm(np, utdm, ut_info);
1090		if (ret)
1091			goto free_utdm;
 
 
 
 
 
 
 
 
 
1092	}
1093
 
 
 
1094	ret = uhdlc_init(uhdlc_priv);
1095	if (ret) {
1096		dev_err(&pdev->dev, "Failed to init uhdlc\n");
1097		goto free_utdm;
1098	}
1099
1100	dev = alloc_hdlcdev(uhdlc_priv);
1101	if (!dev) {
1102		ret = -ENOMEM;
1103		pr_err("ucc_hdlc: unable to allocate memory\n");
1104		goto undo_uhdlc_init;
1105	}
1106
1107	uhdlc_priv->ndev = dev;
1108	hdlc = dev_to_hdlc(dev);
1109	dev->tx_queue_len = 16;
1110	dev->netdev_ops = &uhdlc_ops;
 
1111	hdlc->attach = ucc_hdlc_attach;
1112	hdlc->xmit = ucc_hdlc_tx;
1113	netif_napi_add(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32);
1114	if (register_hdlc_device(dev)) {
1115		ret = -ENOBUFS;
1116		pr_err("ucc_hdlc: unable to register hdlc device\n");
1117		free_netdev(dev);
1118		goto free_dev;
1119	}
1120
1121	return 0;
1122
1123free_dev:
1124	free_netdev(dev);
1125undo_uhdlc_init:
 
 
 
1126free_utdm:
1127	if (uhdlc_priv->tsa)
1128		kfree(utdm);
1129free_uhdlc_priv:
1130	kfree(uhdlc_priv);
1131	return ret;
1132}
1133
1134static int ucc_hdlc_remove(struct platform_device *pdev)
1135{
1136	struct ucc_hdlc_private *priv = dev_get_drvdata(&pdev->dev);
1137
1138	uhdlc_memclean(priv);
1139
1140	if (priv->utdm->si_regs) {
1141		iounmap(priv->utdm->si_regs);
1142		priv->utdm->si_regs = NULL;
1143	}
1144
1145	if (priv->utdm->siram) {
1146		iounmap(priv->utdm->siram);
1147		priv->utdm->siram = NULL;
1148	}
1149	kfree(priv);
1150
1151	dev_info(&pdev->dev, "UCC based hdlc module removed\n");
1152
1153	return 0;
1154}
1155
1156static const struct of_device_id fsl_ucc_hdlc_of_match[] = {
1157	{
1158	.compatible = "fsl,ucc-hdlc",
1159	},
1160	{},
1161};
1162
1163MODULE_DEVICE_TABLE(of, fsl_ucc_hdlc_of_match);
1164
1165static struct platform_driver ucc_hdlc_driver = {
1166	.probe	= ucc_hdlc_probe,
1167	.remove	= ucc_hdlc_remove,
1168	.driver	= {
1169		.name		= DRV_NAME,
1170		.pm		= HDLC_PM_OPS,
1171		.of_match_table	= fsl_ucc_hdlc_of_match,
1172	},
1173};
1174
1175module_platform_driver(ucc_hdlc_driver);
1176MODULE_LICENSE("GPL");
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* Freescale QUICC Engine HDLC Device Driver
   3 *
   4 * Copyright 2016 Freescale Semiconductor Inc.
 
 
 
 
 
   5 */
   6
   7#include <linux/delay.h>
   8#include <linux/dma-mapping.h>
   9#include <linux/hdlc.h>
  10#include <linux/init.h>
  11#include <linux/interrupt.h>
  12#include <linux/io.h>
  13#include <linux/irq.h>
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/netdevice.h>
  17#include <linux/of_address.h>
  18#include <linux/of_irq.h>
  19#include <linux/of_platform.h>
  20#include <linux/platform_device.h>
  21#include <linux/sched.h>
  22#include <linux/skbuff.h>
  23#include <linux/slab.h>
  24#include <linux/spinlock.h>
  25#include <linux/stddef.h>
  26#include <soc/fsl/qe/qe_tdm.h>
  27#include <uapi/linux/if_arp.h>
  28
  29#include "fsl_ucc_hdlc.h"
  30
  31#define DRV_DESC "Freescale QE UCC HDLC Driver"
  32#define DRV_NAME "ucc_hdlc"
  33
  34#define TDM_PPPOHT_SLIC_MAXIN
  35#define RX_BD_ERRORS (R_CD_S | R_OV_S | R_CR_S | R_AB_S | R_NO_S | R_LG_S)
  36
  37static struct ucc_tdm_info utdm_primary_info = {
  38	.uf_info = {
  39		.tsa = 0,
  40		.cdp = 0,
  41		.cds = 1,
  42		.ctsp = 1,
  43		.ctss = 1,
  44		.revd = 0,
  45		.urfs = 256,
  46		.utfs = 256,
  47		.urfet = 128,
  48		.urfset = 192,
  49		.utfet = 128,
  50		.utftt = 0x40,
  51		.ufpt = 256,
  52		.mode = UCC_FAST_PROTOCOL_MODE_HDLC,
  53		.ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
  54		.tenc = UCC_FAST_TX_ENCODING_NRZ,
  55		.renc = UCC_FAST_RX_ENCODING_NRZ,
  56		.tcrc = UCC_FAST_16_BIT_CRC,
  57		.synl = UCC_FAST_SYNC_LEN_NOT_USED,
  58	},
  59
  60	.si_info = {
  61#ifdef TDM_PPPOHT_SLIC_MAXIN
  62		.simr_rfsd = 1,
  63		.simr_tfsd = 2,
  64#else
  65		.simr_rfsd = 0,
  66		.simr_tfsd = 0,
  67#endif
  68		.simr_crt = 0,
  69		.simr_sl = 0,
  70		.simr_ce = 1,
  71		.simr_fe = 1,
  72		.simr_gm = 0,
  73	},
  74};
  75
  76static struct ucc_tdm_info utdm_info[MAX_HDLC_NUM];
  77
  78static int uhdlc_init(struct ucc_hdlc_private *priv)
  79{
  80	struct ucc_tdm_info *ut_info;
  81	struct ucc_fast_info *uf_info;
  82	u32 cecr_subblock;
  83	u16 bd_status;
  84	int ret, i;
  85	void *bd_buffer;
  86	dma_addr_t bd_dma_addr;
  87	u32 riptr;
  88	u32 tiptr;
  89	u32 gumr;
  90
  91	ut_info = priv->ut_info;
  92	uf_info = &ut_info->uf_info;
  93
  94	if (priv->tsa) {
  95		uf_info->tsa = 1;
  96		uf_info->ctsp = 1;
  97		uf_info->cds = 1;
  98		uf_info->ctss = 1;
  99	} else {
 100		uf_info->cds = 0;
 101		uf_info->ctsp = 0;
 102		uf_info->ctss = 0;
 103	}
 104
 105	/* This sets HPM register in CMXUCR register which configures a
 106	 * open drain connected HDLC bus
 107	 */
 108	if (priv->hdlc_bus)
 109		uf_info->brkpt_support = 1;
 110
 111	uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF |
 112				UCC_HDLC_UCCE_TXB) << 16);
 113
 114	ret = ucc_fast_init(uf_info, &priv->uccf);
 115	if (ret) {
 116		dev_err(priv->dev, "Failed to init uccf.");
 117		return ret;
 118	}
 119
 120	priv->uf_regs = priv->uccf->uf_regs;
 121	ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
 122
 123	/* Loopback mode */
 124	if (priv->loopback) {
 125		dev_info(priv->dev, "Loopback Mode\n");
 126		/* use the same clock when work in loopback */
 127		qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1);
 128
 129		gumr = ioread32be(&priv->uf_regs->gumr);
 130		gumr |= (UCC_FAST_GUMR_LOOPBACK | UCC_FAST_GUMR_CDS |
 131			 UCC_FAST_GUMR_TCI);
 132		gumr &= ~(UCC_FAST_GUMR_CTSP | UCC_FAST_GUMR_RSYN);
 133		iowrite32be(gumr, &priv->uf_regs->gumr);
 134	}
 135
 136	/* Initialize SI */
 137	if (priv->tsa)
 138		ucc_tdm_init(priv->utdm, priv->ut_info);
 139
 140	/* Write to QE CECR, UCCx channel to Stop Transmission */
 141	cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
 142	ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
 143			   QE_CR_PROTOCOL_UNSPECIFIED, 0);
 144
 145	/* Set UPSMR normal mode (need fixed)*/
 146	iowrite32be(0, &priv->uf_regs->upsmr);
 147
 148	/* hdlc_bus mode */
 149	if (priv->hdlc_bus) {
 150		u32 upsmr;
 151
 152		dev_info(priv->dev, "HDLC bus Mode\n");
 153		upsmr = ioread32be(&priv->uf_regs->upsmr);
 154
 155		/* bus mode and retransmit enable, with collision window
 156		 * set to 8 bytes
 157		 */
 158		upsmr |= UCC_HDLC_UPSMR_RTE | UCC_HDLC_UPSMR_BUS |
 159				UCC_HDLC_UPSMR_CW8;
 160		iowrite32be(upsmr, &priv->uf_regs->upsmr);
 161
 162		/* explicitly disable CDS & CTSP */
 163		gumr = ioread32be(&priv->uf_regs->gumr);
 164		gumr &= ~(UCC_FAST_GUMR_CDS | UCC_FAST_GUMR_CTSP);
 165		/* set automatic sync to explicitly ignore CD signal */
 166		gumr |= UCC_FAST_GUMR_SYNL_AUTO;
 167		iowrite32be(gumr, &priv->uf_regs->gumr);
 168	}
 169
 170	priv->rx_ring_size = RX_BD_RING_LEN;
 171	priv->tx_ring_size = TX_BD_RING_LEN;
 172	/* Alloc Rx BD */
 173	priv->rx_bd_base = dma_alloc_coherent(priv->dev,
 174			RX_BD_RING_LEN * sizeof(struct qe_bd),
 175			&priv->dma_rx_bd, GFP_KERNEL);
 176
 177	if (!priv->rx_bd_base) {
 178		dev_err(priv->dev, "Cannot allocate MURAM memory for RxBDs\n");
 179		ret = -ENOMEM;
 180		goto free_uccf;
 181	}
 182
 183	/* Alloc Tx BD */
 184	priv->tx_bd_base = dma_alloc_coherent(priv->dev,
 185			TX_BD_RING_LEN * sizeof(struct qe_bd),
 186			&priv->dma_tx_bd, GFP_KERNEL);
 187
 188	if (!priv->tx_bd_base) {
 189		dev_err(priv->dev, "Cannot allocate MURAM memory for TxBDs\n");
 190		ret = -ENOMEM;
 191		goto free_rx_bd;
 192	}
 193
 194	/* Alloc parameter ram for ucc hdlc */
 195	priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param),
 196				ALIGNMENT_OF_UCC_HDLC_PRAM);
 197
 198	if (IS_ERR_VALUE(priv->ucc_pram_offset)) {
 199		dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n");
 200		ret = -ENOMEM;
 201		goto free_tx_bd;
 202	}
 203
 204	priv->rx_skbuff = kcalloc(priv->rx_ring_size,
 205				  sizeof(*priv->rx_skbuff),
 206				  GFP_KERNEL);
 207	if (!priv->rx_skbuff)
 208		goto free_ucc_pram;
 209
 210	priv->tx_skbuff = kcalloc(priv->tx_ring_size,
 211				  sizeof(*priv->tx_skbuff),
 212				  GFP_KERNEL);
 213	if (!priv->tx_skbuff)
 214		goto free_rx_skbuff;
 215
 216	priv->skb_curtx = 0;
 217	priv->skb_dirtytx = 0;
 218	priv->curtx_bd = priv->tx_bd_base;
 219	priv->dirty_tx = priv->tx_bd_base;
 220	priv->currx_bd = priv->rx_bd_base;
 221	priv->currx_bdnum = 0;
 222
 223	/* init parameter base */
 224	cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
 225	ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
 226			   QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
 227
 228	priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
 229					qe_muram_addr(priv->ucc_pram_offset);
 230
 231	/* Zero out parameter ram */
 232	memset_io(priv->ucc_pram, 0, sizeof(struct ucc_hdlc_param));
 233
 234	/* Alloc riptr, tiptr */
 235	riptr = qe_muram_alloc(32, 32);
 236	if (IS_ERR_VALUE(riptr)) {
 237		dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
 238		ret = -ENOMEM;
 239		goto free_tx_skbuff;
 240	}
 241
 242	tiptr = qe_muram_alloc(32, 32);
 243	if (IS_ERR_VALUE(tiptr)) {
 244		dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
 245		ret = -ENOMEM;
 246		goto free_riptr;
 247	}
 248
 249	/* Set RIPTR, TIPTR */
 250	iowrite16be(riptr, &priv->ucc_pram->riptr);
 251	iowrite16be(tiptr, &priv->ucc_pram->tiptr);
 252
 253	/* Set MRBLR */
 254	iowrite16be(MAX_RX_BUF_LENGTH, &priv->ucc_pram->mrblr);
 255
 256	/* Set RBASE, TBASE */
 257	iowrite32be(priv->dma_rx_bd, &priv->ucc_pram->rbase);
 258	iowrite32be(priv->dma_tx_bd, &priv->ucc_pram->tbase);
 259
 260	/* Set RSTATE, TSTATE */
 261	iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->rstate);
 262	iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->tstate);
 263
 264	/* Set C_MASK, C_PRES for 16bit CRC */
 265	iowrite32be(CRC_16BIT_MASK, &priv->ucc_pram->c_mask);
 266	iowrite32be(CRC_16BIT_PRES, &priv->ucc_pram->c_pres);
 267
 268	iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr);
 269	iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr);
 270	iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt);
 271	iowrite16be(priv->hmask, &priv->ucc_pram->hmask);
 272	iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1);
 273	iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2);
 274	iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3);
 275	iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4);
 276
 277	/* Get BD buffer */
 278	bd_buffer = dma_alloc_coherent(priv->dev,
 279				       (RX_BD_RING_LEN + TX_BD_RING_LEN) * MAX_RX_BUF_LENGTH,
 
 280				       &bd_dma_addr, GFP_KERNEL);
 281
 282	if (!bd_buffer) {
 283		dev_err(priv->dev, "Could not allocate buffer descriptors\n");
 284		ret = -ENOMEM;
 285		goto free_tiptr;
 286	}
 287
 
 
 
 288	priv->rx_buffer = bd_buffer;
 289	priv->tx_buffer = bd_buffer + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
 290
 291	priv->dma_rx_addr = bd_dma_addr;
 292	priv->dma_tx_addr = bd_dma_addr + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
 293
 294	for (i = 0; i < RX_BD_RING_LEN; i++) {
 295		if (i < (RX_BD_RING_LEN - 1))
 296			bd_status = R_E_S | R_I_S;
 297		else
 298			bd_status = R_E_S | R_I_S | R_W_S;
 299
 300		iowrite16be(bd_status, &priv->rx_bd_base[i].status);
 301		iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
 302			    &priv->rx_bd_base[i].buf);
 303	}
 304
 305	for (i = 0; i < TX_BD_RING_LEN; i++) {
 306		if (i < (TX_BD_RING_LEN - 1))
 307			bd_status =  T_I_S | T_TC_S;
 308		else
 309			bd_status =  T_I_S | T_TC_S | T_W_S;
 310
 311		iowrite16be(bd_status, &priv->tx_bd_base[i].status);
 312		iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
 313			    &priv->tx_bd_base[i].buf);
 314	}
 315
 316	return 0;
 317
 318free_tiptr:
 319	qe_muram_free(tiptr);
 320free_riptr:
 321	qe_muram_free(riptr);
 322free_tx_skbuff:
 323	kfree(priv->tx_skbuff);
 324free_rx_skbuff:
 325	kfree(priv->rx_skbuff);
 326free_ucc_pram:
 327	qe_muram_free(priv->ucc_pram_offset);
 328free_tx_bd:
 329	dma_free_coherent(priv->dev,
 330			  TX_BD_RING_LEN * sizeof(struct qe_bd),
 331			  priv->tx_bd_base, priv->dma_tx_bd);
 332free_rx_bd:
 333	dma_free_coherent(priv->dev,
 334			  RX_BD_RING_LEN * sizeof(struct qe_bd),
 335			  priv->rx_bd_base, priv->dma_rx_bd);
 336free_uccf:
 337	ucc_fast_free(priv->uccf);
 338
 339	return ret;
 340}
 341
 342static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
 343{
 344	hdlc_device *hdlc = dev_to_hdlc(dev);
 345	struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv;
 346	struct qe_bd __iomem *bd;
 347	u16 bd_status;
 348	unsigned long flags;
 349	u16 *proto_head;
 350
 351	switch (dev->type) {
 352	case ARPHRD_RAWHDLC:
 353		if (skb_headroom(skb) < HDLC_HEAD_LEN) {
 354			dev->stats.tx_dropped++;
 355			dev_kfree_skb(skb);
 356			netdev_err(dev, "No enough space for hdlc head\n");
 357			return -ENOMEM;
 358		}
 359
 360		skb_push(skb, HDLC_HEAD_LEN);
 361
 362		proto_head = (u16 *)skb->data;
 363		*proto_head = htons(DEFAULT_HDLC_HEAD);
 364
 365		dev->stats.tx_bytes += skb->len;
 366		break;
 367
 368	case ARPHRD_PPP:
 369		proto_head = (u16 *)skb->data;
 370		if (*proto_head != htons(DEFAULT_PPP_HEAD)) {
 371			dev->stats.tx_dropped++;
 372			dev_kfree_skb(skb);
 373			netdev_err(dev, "Wrong ppp header\n");
 374			return -ENOMEM;
 375		}
 376
 377		dev->stats.tx_bytes += skb->len;
 378		break;
 379
 380	case ARPHRD_ETHER:
 381		dev->stats.tx_bytes += skb->len;
 382		break;
 383
 384	default:
 385		dev->stats.tx_dropped++;
 386		dev_kfree_skb(skb);
 387		return -ENOMEM;
 388	}
 389	netdev_sent_queue(dev, skb->len);
 390	spin_lock_irqsave(&priv->lock, flags);
 391
 392	/* Start from the next BD that should be filled */
 393	bd = priv->curtx_bd;
 394	bd_status = ioread16be(&bd->status);
 395	/* Save the skb pointer so we can free it later */
 396	priv->tx_skbuff[priv->skb_curtx] = skb;
 397
 398	/* Update the current skb pointer (wrapping if this was the last) */
 399	priv->skb_curtx =
 400	    (priv->skb_curtx + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
 401
 402	/* copy skb data to tx buffer for sdma processing */
 403	memcpy(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
 404	       skb->data, skb->len);
 405
 406	/* set bd status and length */
 407	bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
 408
 409	iowrite16be(skb->len, &bd->length);
 410	iowrite16be(bd_status, &bd->status);
 411
 412	/* Move to next BD in the ring */
 413	if (!(bd_status & T_W_S))
 414		bd += 1;
 415	else
 416		bd = priv->tx_bd_base;
 417
 418	if (bd == priv->dirty_tx) {
 419		if (!netif_queue_stopped(dev))
 420			netif_stop_queue(dev);
 421	}
 422
 423	priv->curtx_bd = bd;
 424
 425	spin_unlock_irqrestore(&priv->lock, flags);
 426
 427	return NETDEV_TX_OK;
 428}
 429
 430static int hdlc_tx_restart(struct ucc_hdlc_private *priv)
 431{
 432	u32 cecr_subblock;
 433
 434	cecr_subblock =
 435		ucc_fast_get_qe_cr_subblock(priv->ut_info->uf_info.ucc_num);
 436
 437	qe_issue_cmd(QE_RESTART_TX, cecr_subblock,
 438		     QE_CR_PROTOCOL_UNSPECIFIED, 0);
 439	return 0;
 440}
 441
 442static int hdlc_tx_done(struct ucc_hdlc_private *priv)
 443{
 444	/* Start from the next BD that should be filled */
 445	struct net_device *dev = priv->ndev;
 446	unsigned int bytes_sent = 0;
 447	int howmany = 0;
 448	struct qe_bd *bd;		/* BD pointer */
 449	u16 bd_status;
 450	int tx_restart = 0;
 451
 452	bd = priv->dirty_tx;
 453	bd_status = ioread16be(&bd->status);
 454
 455	/* Normal processing. */
 456	while ((bd_status & T_R_S) == 0) {
 457		struct sk_buff *skb;
 458
 459		if (bd_status & T_UN_S) { /* Underrun */
 460			dev->stats.tx_fifo_errors++;
 461			tx_restart = 1;
 462		}
 463		if (bd_status & T_CT_S) { /* Carrier lost */
 464			dev->stats.tx_carrier_errors++;
 465			tx_restart = 1;
 466		}
 467
 468		/* BD contains already transmitted buffer.   */
 469		/* Handle the transmitted buffer and release */
 470		/* the BD to be used with the current frame  */
 471
 472		skb = priv->tx_skbuff[priv->skb_dirtytx];
 473		if (!skb)
 474			break;
 475		howmany++;
 476		bytes_sent += skb->len;
 477		dev->stats.tx_packets++;
 478		memset(priv->tx_buffer +
 479		       (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
 480		       0, skb->len);
 481		dev_consume_skb_irq(skb);
 482
 483		priv->tx_skbuff[priv->skb_dirtytx] = NULL;
 484		priv->skb_dirtytx =
 485		    (priv->skb_dirtytx +
 486		     1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
 487
 488		/* We freed a buffer, so now we can restart transmission */
 489		if (netif_queue_stopped(dev))
 490			netif_wake_queue(dev);
 491
 492		/* Advance the confirmation BD pointer */
 493		if (!(bd_status & T_W_S))
 494			bd += 1;
 495		else
 496			bd = priv->tx_bd_base;
 497		bd_status = ioread16be(&bd->status);
 498	}
 499	priv->dirty_tx = bd;
 500
 501	if (tx_restart)
 502		hdlc_tx_restart(priv);
 503
 504	netdev_completed_queue(dev, howmany, bytes_sent);
 505	return 0;
 506}
 507
 508static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
 509{
 510	struct net_device *dev = priv->ndev;
 511	struct sk_buff *skb = NULL;
 512	hdlc_device *hdlc = dev_to_hdlc(dev);
 513	struct qe_bd *bd;
 514	u16 bd_status;
 515	u16 length, howmany = 0;
 516	u8 *bdbuffer;
 517
 518	bd = priv->currx_bd;
 519	bd_status = ioread16be(&bd->status);
 520
 521	/* while there are received buffers and BD is full (~R_E) */
 522	while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) {
 523		if (bd_status & (RX_BD_ERRORS)) {
 524			dev->stats.rx_errors++;
 525
 526			if (bd_status & R_CD_S)
 527				dev->stats.collisions++;
 528			if (bd_status & R_OV_S)
 529				dev->stats.rx_fifo_errors++;
 530			if (bd_status & R_CR_S)
 531				dev->stats.rx_crc_errors++;
 532			if (bd_status & R_AB_S)
 533				dev->stats.rx_over_errors++;
 534			if (bd_status & R_NO_S)
 535				dev->stats.rx_frame_errors++;
 536			if (bd_status & R_LG_S)
 537				dev->stats.rx_length_errors++;
 538
 539			goto recycle;
 540		}
 541		bdbuffer = priv->rx_buffer +
 542			(priv->currx_bdnum * MAX_RX_BUF_LENGTH);
 543		length = ioread16be(&bd->length);
 544
 545		switch (dev->type) {
 546		case ARPHRD_RAWHDLC:
 547			bdbuffer += HDLC_HEAD_LEN;
 548			length -= (HDLC_HEAD_LEN + HDLC_CRC_SIZE);
 549
 550			skb = dev_alloc_skb(length);
 551			if (!skb) {
 552				dev->stats.rx_dropped++;
 553				return -ENOMEM;
 554			}
 555
 556			skb_put(skb, length);
 557			skb->len = length;
 558			skb->dev = dev;
 559			memcpy(skb->data, bdbuffer, length);
 560			break;
 561
 562		case ARPHRD_PPP:
 563		case ARPHRD_ETHER:
 564			length -= HDLC_CRC_SIZE;
 565
 566			skb = dev_alloc_skb(length);
 567			if (!skb) {
 568				dev->stats.rx_dropped++;
 569				return -ENOMEM;
 570			}
 571
 572			skb_put(skb, length);
 573			skb->len = length;
 574			skb->dev = dev;
 575			memcpy(skb->data, bdbuffer, length);
 576			break;
 577		}
 578
 579		dev->stats.rx_packets++;
 580		dev->stats.rx_bytes += skb->len;
 581		howmany++;
 582		if (hdlc->proto)
 583			skb->protocol = hdlc_type_trans(skb, dev);
 584		netif_receive_skb(skb);
 585
 586recycle:
 587		iowrite16be((bd_status & R_W_S) | R_E_S | R_I_S, &bd->status);
 588
 589		/* update to point at the next bd */
 590		if (bd_status & R_W_S) {
 591			priv->currx_bdnum = 0;
 592			bd = priv->rx_bd_base;
 593		} else {
 594			if (priv->currx_bdnum < (RX_BD_RING_LEN - 1))
 595				priv->currx_bdnum += 1;
 596			else
 597				priv->currx_bdnum = RX_BD_RING_LEN - 1;
 598
 599			bd += 1;
 600		}
 601
 602		bd_status = ioread16be(&bd->status);
 603	}
 604
 605	priv->currx_bd = bd;
 606	return howmany;
 607}
 608
 609static int ucc_hdlc_poll(struct napi_struct *napi, int budget)
 610{
 611	struct ucc_hdlc_private *priv = container_of(napi,
 612						     struct ucc_hdlc_private,
 613						     napi);
 614	int howmany;
 615
 616	/* Tx event processing */
 617	spin_lock(&priv->lock);
 618	hdlc_tx_done(priv);
 619	spin_unlock(&priv->lock);
 620
 621	howmany = 0;
 622	howmany += hdlc_rx_done(priv, budget - howmany);
 623
 624	if (howmany < budget) {
 625		napi_complete_done(napi, howmany);
 626		qe_setbits32(priv->uccf->p_uccm,
 627			     (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
 628	}
 629
 630	return howmany;
 631}
 632
 633static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id)
 634{
 635	struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id;
 636	struct net_device *dev = priv->ndev;
 637	struct ucc_fast_private *uccf;
 638	struct ucc_tdm_info *ut_info;
 639	u32 ucce;
 640	u32 uccm;
 641
 642	ut_info = priv->ut_info;
 643	uccf = priv->uccf;
 644
 645	ucce = ioread32be(uccf->p_ucce);
 646	uccm = ioread32be(uccf->p_uccm);
 647	ucce &= uccm;
 648	iowrite32be(ucce, uccf->p_ucce);
 649	if (!ucce)
 650		return IRQ_NONE;
 651
 652	if ((ucce >> 16) & (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)) {
 653		if (napi_schedule_prep(&priv->napi)) {
 654			uccm &= ~((UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)
 655				  << 16);
 656			iowrite32be(uccm, uccf->p_uccm);
 657			__napi_schedule(&priv->napi);
 658		}
 659	}
 660
 661	/* Errors and other events */
 662	if (ucce >> 16 & UCC_HDLC_UCCE_BSY)
 663		dev->stats.rx_missed_errors++;
 664	if (ucce >> 16 & UCC_HDLC_UCCE_TXE)
 665		dev->stats.tx_errors++;
 666
 667	return IRQ_HANDLED;
 668}
 669
 670static int uhdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 671{
 672	const size_t size = sizeof(te1_settings);
 673	te1_settings line;
 674	struct ucc_hdlc_private *priv = netdev_priv(dev);
 675
 676	if (cmd != SIOCWANDEV)
 677		return hdlc_ioctl(dev, ifr, cmd);
 678
 679	switch (ifr->ifr_settings.type) {
 680	case IF_GET_IFACE:
 681		ifr->ifr_settings.type = IF_IFACE_E1;
 682		if (ifr->ifr_settings.size < size) {
 683			ifr->ifr_settings.size = size; /* data size wanted */
 684			return -ENOBUFS;
 685		}
 686		memset(&line, 0, sizeof(line));
 687		line.clock_type = priv->clocking;
 688
 689		if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
 690			return -EFAULT;
 691		return 0;
 692
 693	default:
 694		return hdlc_ioctl(dev, ifr, cmd);
 695	}
 696}
 697
 698static int uhdlc_open(struct net_device *dev)
 699{
 700	u32 cecr_subblock;
 701	hdlc_device *hdlc = dev_to_hdlc(dev);
 702	struct ucc_hdlc_private *priv = hdlc->priv;
 703	struct ucc_tdm *utdm = priv->utdm;
 704
 705	if (priv->hdlc_busy != 1) {
 706		if (request_irq(priv->ut_info->uf_info.irq,
 707				ucc_hdlc_irq_handler, 0, "hdlc", priv))
 708			return -ENODEV;
 709
 710		cecr_subblock = ucc_fast_get_qe_cr_subblock(
 711					priv->ut_info->uf_info.ucc_num);
 712
 713		qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
 714			     QE_CR_PROTOCOL_UNSPECIFIED, 0);
 715
 716		ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
 717
 718		/* Enable the TDM port */
 719		if (priv->tsa)
 720			utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
 721
 722		priv->hdlc_busy = 1;
 723		netif_device_attach(priv->ndev);
 724		napi_enable(&priv->napi);
 725		netdev_reset_queue(dev);
 726		netif_start_queue(dev);
 727		hdlc_open(dev);
 728	}
 729
 730	return 0;
 731}
 732
 733static void uhdlc_memclean(struct ucc_hdlc_private *priv)
 734{
 735	qe_muram_free(priv->ucc_pram->riptr);
 736	qe_muram_free(priv->ucc_pram->tiptr);
 737
 738	if (priv->rx_bd_base) {
 739		dma_free_coherent(priv->dev,
 740				  RX_BD_RING_LEN * sizeof(struct qe_bd),
 741				  priv->rx_bd_base, priv->dma_rx_bd);
 742
 743		priv->rx_bd_base = NULL;
 744		priv->dma_rx_bd = 0;
 745	}
 746
 747	if (priv->tx_bd_base) {
 748		dma_free_coherent(priv->dev,
 749				  TX_BD_RING_LEN * sizeof(struct qe_bd),
 750				  priv->tx_bd_base, priv->dma_tx_bd);
 751
 752		priv->tx_bd_base = NULL;
 753		priv->dma_tx_bd = 0;
 754	}
 755
 756	if (priv->ucc_pram) {
 757		qe_muram_free(priv->ucc_pram_offset);
 758		priv->ucc_pram = NULL;
 759		priv->ucc_pram_offset = 0;
 760	 }
 761
 762	kfree(priv->rx_skbuff);
 763	priv->rx_skbuff = NULL;
 764
 765	kfree(priv->tx_skbuff);
 766	priv->tx_skbuff = NULL;
 767
 768	if (priv->uf_regs) {
 769		iounmap(priv->uf_regs);
 770		priv->uf_regs = NULL;
 771	}
 772
 773	if (priv->uccf) {
 774		ucc_fast_free(priv->uccf);
 775		priv->uccf = NULL;
 776	}
 777
 778	if (priv->rx_buffer) {
 779		dma_free_coherent(priv->dev,
 780				  RX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
 781				  priv->rx_buffer, priv->dma_rx_addr);
 782		priv->rx_buffer = NULL;
 783		priv->dma_rx_addr = 0;
 784	}
 785
 786	if (priv->tx_buffer) {
 787		dma_free_coherent(priv->dev,
 788				  TX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
 789				  priv->tx_buffer, priv->dma_tx_addr);
 790		priv->tx_buffer = NULL;
 791		priv->dma_tx_addr = 0;
 792	}
 793}
 794
 795static int uhdlc_close(struct net_device *dev)
 796{
 797	struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
 798	struct ucc_tdm *utdm = priv->utdm;
 799	u32 cecr_subblock;
 800
 801	napi_disable(&priv->napi);
 802	cecr_subblock = ucc_fast_get_qe_cr_subblock(
 803				priv->ut_info->uf_info.ucc_num);
 804
 805	qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
 806		     (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
 807	qe_issue_cmd(QE_CLOSE_RX_BD, cecr_subblock,
 808		     (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
 809
 810	if (priv->tsa)
 811		utdm->si_regs->siglmr1_h &= ~(0x1 << utdm->tdm_port);
 812
 813	ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
 814
 815	free_irq(priv->ut_info->uf_info.irq, priv);
 816	netif_stop_queue(dev);
 817	netdev_reset_queue(dev);
 818	priv->hdlc_busy = 0;
 819
 820	return 0;
 821}
 822
 823static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding,
 824			   unsigned short parity)
 825{
 826	struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
 827
 828	if (encoding != ENCODING_NRZ &&
 829	    encoding != ENCODING_NRZI)
 830		return -EINVAL;
 831
 832	if (parity != PARITY_NONE &&
 833	    parity != PARITY_CRC32_PR1_CCITT &&
 834	    parity != PARITY_CRC16_PR0_CCITT &&
 835	    parity != PARITY_CRC16_PR1_CCITT)
 836		return -EINVAL;
 837
 838	priv->encoding = encoding;
 839	priv->parity = parity;
 840
 841	return 0;
 842}
 843
 844#ifdef CONFIG_PM
 845static void store_clk_config(struct ucc_hdlc_private *priv)
 846{
 847	struct qe_mux *qe_mux_reg = &qe_immr->qmx;
 848
 849	/* store si clk */
 850	priv->cmxsi1cr_h = ioread32be(&qe_mux_reg->cmxsi1cr_h);
 851	priv->cmxsi1cr_l = ioread32be(&qe_mux_reg->cmxsi1cr_l);
 852
 853	/* store si sync */
 854	priv->cmxsi1syr = ioread32be(&qe_mux_reg->cmxsi1syr);
 855
 856	/* store ucc clk */
 857	memcpy_fromio(priv->cmxucr, qe_mux_reg->cmxucr, 4 * sizeof(u32));
 858}
 859
 860static void resume_clk_config(struct ucc_hdlc_private *priv)
 861{
 862	struct qe_mux *qe_mux_reg = &qe_immr->qmx;
 863
 864	memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32));
 865
 866	iowrite32be(priv->cmxsi1cr_h, &qe_mux_reg->cmxsi1cr_h);
 867	iowrite32be(priv->cmxsi1cr_l, &qe_mux_reg->cmxsi1cr_l);
 868
 869	iowrite32be(priv->cmxsi1syr, &qe_mux_reg->cmxsi1syr);
 870}
 871
 872static int uhdlc_suspend(struct device *dev)
 873{
 874	struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
 875	struct ucc_tdm_info *ut_info;
 876	struct ucc_fast __iomem *uf_regs;
 877
 878	if (!priv)
 879		return -EINVAL;
 880
 881	if (!netif_running(priv->ndev))
 882		return 0;
 883
 884	netif_device_detach(priv->ndev);
 885	napi_disable(&priv->napi);
 886
 887	ut_info = priv->ut_info;
 888	uf_regs = priv->uf_regs;
 889
 890	/* backup gumr guemr*/
 891	priv->gumr = ioread32be(&uf_regs->gumr);
 892	priv->guemr = ioread8(&uf_regs->guemr);
 893
 894	priv->ucc_pram_bak = kmalloc(sizeof(*priv->ucc_pram_bak),
 895					GFP_KERNEL);
 896	if (!priv->ucc_pram_bak)
 897		return -ENOMEM;
 898
 899	/* backup HDLC parameter */
 900	memcpy_fromio(priv->ucc_pram_bak, priv->ucc_pram,
 901		      sizeof(struct ucc_hdlc_param));
 902
 903	/* store the clk configuration */
 904	store_clk_config(priv);
 905
 906	/* save power */
 907	ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
 908
 909	return 0;
 910}
 911
 912static int uhdlc_resume(struct device *dev)
 913{
 914	struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
 915	struct ucc_tdm *utdm;
 916	struct ucc_tdm_info *ut_info;
 917	struct ucc_fast __iomem *uf_regs;
 918	struct ucc_fast_private *uccf;
 919	struct ucc_fast_info *uf_info;
 920	int ret, i;
 921	u32 cecr_subblock;
 922	u16 bd_status;
 923
 924	if (!priv)
 925		return -EINVAL;
 926
 927	if (!netif_running(priv->ndev))
 928		return 0;
 929
 930	utdm = priv->utdm;
 931	ut_info = priv->ut_info;
 932	uf_info = &ut_info->uf_info;
 933	uf_regs = priv->uf_regs;
 934	uccf = priv->uccf;
 935
 936	/* restore gumr guemr */
 937	iowrite8(priv->guemr, &uf_regs->guemr);
 938	iowrite32be(priv->gumr, &uf_regs->gumr);
 939
 940	/* Set Virtual Fifo registers */
 941	iowrite16be(uf_info->urfs, &uf_regs->urfs);
 942	iowrite16be(uf_info->urfet, &uf_regs->urfet);
 943	iowrite16be(uf_info->urfset, &uf_regs->urfset);
 944	iowrite16be(uf_info->utfs, &uf_regs->utfs);
 945	iowrite16be(uf_info->utfet, &uf_regs->utfet);
 946	iowrite16be(uf_info->utftt, &uf_regs->utftt);
 947	/* utfb, urfb are offsets from MURAM base */
 948	iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb);
 949	iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb);
 950
 951	/* Rx Tx and sync clock routing */
 952	resume_clk_config(priv);
 953
 954	iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
 955	iowrite32be(0xffffffff, &uf_regs->ucce);
 956
 957	ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
 958
 959	/* rebuild SIRAM */
 960	if (priv->tsa)
 961		ucc_tdm_init(priv->utdm, priv->ut_info);
 962
 963	/* Write to QE CECR, UCCx channel to Stop Transmission */
 964	cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
 965	ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
 966			   (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
 967
 968	/* Set UPSMR normal mode */
 969	iowrite32be(0, &uf_regs->upsmr);
 970
 971	/* init parameter base */
 972	cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
 973	ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
 974			   QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
 975
 976	priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
 977				qe_muram_addr(priv->ucc_pram_offset);
 978
 979	/* restore ucc parameter */
 980	memcpy_toio(priv->ucc_pram, priv->ucc_pram_bak,
 981		    sizeof(struct ucc_hdlc_param));
 982	kfree(priv->ucc_pram_bak);
 983
 984	/* rebuild BD entry */
 985	for (i = 0; i < RX_BD_RING_LEN; i++) {
 986		if (i < (RX_BD_RING_LEN - 1))
 987			bd_status = R_E_S | R_I_S;
 988		else
 989			bd_status = R_E_S | R_I_S | R_W_S;
 990
 991		iowrite16be(bd_status, &priv->rx_bd_base[i].status);
 992		iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
 993			    &priv->rx_bd_base[i].buf);
 994	}
 995
 996	for (i = 0; i < TX_BD_RING_LEN; i++) {
 997		if (i < (TX_BD_RING_LEN - 1))
 998			bd_status =  T_I_S | T_TC_S;
 999		else
1000			bd_status =  T_I_S | T_TC_S | T_W_S;
1001
1002		iowrite16be(bd_status, &priv->tx_bd_base[i].status);
1003		iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
1004			    &priv->tx_bd_base[i].buf);
1005	}
1006
1007	/* if hdlc is busy enable TX and RX */
1008	if (priv->hdlc_busy == 1) {
1009		cecr_subblock = ucc_fast_get_qe_cr_subblock(
1010					priv->ut_info->uf_info.ucc_num);
1011
1012		qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
1013			     (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
1014
1015		ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
1016
1017		/* Enable the TDM port */
1018		if (priv->tsa)
1019			utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
1020	}
1021
1022	napi_enable(&priv->napi);
1023	netif_device_attach(priv->ndev);
1024
1025	return 0;
1026}
1027
1028static const struct dev_pm_ops uhdlc_pm_ops = {
1029	.suspend = uhdlc_suspend,
1030	.resume = uhdlc_resume,
1031	.freeze = uhdlc_suspend,
1032	.thaw = uhdlc_resume,
1033};
1034
1035#define HDLC_PM_OPS (&uhdlc_pm_ops)
1036
1037#else
1038
1039#define HDLC_PM_OPS NULL
1040
1041#endif
1042static void uhdlc_tx_timeout(struct net_device *ndev)
1043{
1044	netdev_err(ndev, "%s\n", __func__);
1045}
1046
1047static const struct net_device_ops uhdlc_ops = {
1048	.ndo_open       = uhdlc_open,
1049	.ndo_stop       = uhdlc_close,
1050	.ndo_start_xmit = hdlc_start_xmit,
1051	.ndo_do_ioctl   = uhdlc_ioctl,
1052	.ndo_tx_timeout	= uhdlc_tx_timeout,
1053};
1054
1055static int hdlc_map_iomem(char *name, int init_flag, void __iomem **ptr)
1056{
1057	struct device_node *np;
1058	struct platform_device *pdev;
1059	struct resource *res;
1060	static int siram_init_flag;
1061	int ret = 0;
1062
1063	np = of_find_compatible_node(NULL, NULL, name);
1064	if (!np)
1065		return -EINVAL;
1066
1067	pdev = of_find_device_by_node(np);
1068	if (!pdev) {
1069		pr_err("%pOFn: failed to lookup pdev\n", np);
1070		of_node_put(np);
1071		return -EINVAL;
1072	}
1073
1074	of_node_put(np);
1075	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1076	if (!res) {
1077		ret = -EINVAL;
1078		goto error_put_device;
1079	}
1080	*ptr = ioremap(res->start, resource_size(res));
1081	if (!*ptr) {
1082		ret = -ENOMEM;
1083		goto error_put_device;
1084	}
1085
1086	/* We've remapped the addresses, and we don't need the device any
1087	 * more, so we should release it.
1088	 */
1089	put_device(&pdev->dev);
1090
1091	if (init_flag && siram_init_flag == 0) {
1092		memset_io(*ptr, 0, resource_size(res));
1093		siram_init_flag = 1;
1094	}
1095	return  0;
1096
1097error_put_device:
1098	put_device(&pdev->dev);
1099
1100	return ret;
1101}
1102
1103static int ucc_hdlc_probe(struct platform_device *pdev)
1104{
1105	struct device_node *np = pdev->dev.of_node;
1106	struct ucc_hdlc_private *uhdlc_priv = NULL;
1107	struct ucc_tdm_info *ut_info;
1108	struct ucc_tdm *utdm = NULL;
1109	struct resource res;
1110	struct net_device *dev;
1111	hdlc_device *hdlc;
1112	int ucc_num;
1113	const char *sprop;
1114	int ret;
1115	u32 val;
1116
1117	ret = of_property_read_u32_index(np, "cell-index", 0, &val);
1118	if (ret) {
1119		dev_err(&pdev->dev, "Invalid ucc property\n");
1120		return -ENODEV;
1121	}
1122
1123	ucc_num = val - 1;
1124	if (ucc_num > (UCC_MAX_NUM - 1) || ucc_num < 0) {
1125		dev_err(&pdev->dev, ": Invalid UCC num\n");
1126		return -EINVAL;
1127	}
1128
1129	memcpy(&utdm_info[ucc_num], &utdm_primary_info,
1130	       sizeof(utdm_primary_info));
1131
1132	ut_info = &utdm_info[ucc_num];
1133	ut_info->uf_info.ucc_num = ucc_num;
1134
1135	sprop = of_get_property(np, "rx-clock-name", NULL);
1136	if (sprop) {
1137		ut_info->uf_info.rx_clock = qe_clock_source(sprop);
1138		if ((ut_info->uf_info.rx_clock < QE_CLK_NONE) ||
1139		    (ut_info->uf_info.rx_clock > QE_CLK24)) {
1140			dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1141			return -EINVAL;
1142		}
1143	} else {
1144		dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1145		return -EINVAL;
1146	}
1147
1148	sprop = of_get_property(np, "tx-clock-name", NULL);
1149	if (sprop) {
1150		ut_info->uf_info.tx_clock = qe_clock_source(sprop);
1151		if ((ut_info->uf_info.tx_clock < QE_CLK_NONE) ||
1152		    (ut_info->uf_info.tx_clock > QE_CLK24)) {
1153			dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1154			return -EINVAL;
1155		}
1156	} else {
1157		dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1158		return -EINVAL;
1159	}
1160
1161	ret = of_address_to_resource(np, 0, &res);
1162	if (ret)
1163		return -EINVAL;
1164
1165	ut_info->uf_info.regs = res.start;
1166	ut_info->uf_info.irq = irq_of_parse_and_map(np, 0);
1167
1168	uhdlc_priv = kzalloc(sizeof(*uhdlc_priv), GFP_KERNEL);
1169	if (!uhdlc_priv) {
1170		return -ENOMEM;
1171	}
1172
1173	dev_set_drvdata(&pdev->dev, uhdlc_priv);
1174	uhdlc_priv->dev = &pdev->dev;
1175	uhdlc_priv->ut_info = ut_info;
1176
1177	if (of_get_property(np, "fsl,tdm-interface", NULL))
1178		uhdlc_priv->tsa = 1;
1179
1180	if (of_get_property(np, "fsl,ucc-internal-loopback", NULL))
1181		uhdlc_priv->loopback = 1;
1182
1183	if (of_get_property(np, "fsl,hdlc-bus", NULL))
1184		uhdlc_priv->hdlc_bus = 1;
1185
1186	if (uhdlc_priv->tsa == 1) {
1187		utdm = kzalloc(sizeof(*utdm), GFP_KERNEL);
1188		if (!utdm) {
1189			ret = -ENOMEM;
1190			dev_err(&pdev->dev, "No mem to alloc ucc tdm data\n");
1191			goto free_uhdlc_priv;
1192		}
1193		uhdlc_priv->utdm = utdm;
1194		ret = ucc_of_parse_tdm(np, utdm, ut_info);
1195		if (ret)
1196			goto free_utdm;
1197
1198		ret = hdlc_map_iomem("fsl,t1040-qe-si", 0,
1199				     (void __iomem **)&utdm->si_regs);
1200		if (ret)
1201			goto free_utdm;
1202		ret = hdlc_map_iomem("fsl,t1040-qe-siram", 1,
1203				     (void __iomem **)&utdm->siram);
1204		if (ret)
1205			goto unmap_si_regs;
1206	}
1207
1208	if (of_property_read_u16(np, "fsl,hmask", &uhdlc_priv->hmask))
1209		uhdlc_priv->hmask = DEFAULT_ADDR_MASK;
1210
1211	ret = uhdlc_init(uhdlc_priv);
1212	if (ret) {
1213		dev_err(&pdev->dev, "Failed to init uhdlc\n");
1214		goto undo_uhdlc_init;
1215	}
1216
1217	dev = alloc_hdlcdev(uhdlc_priv);
1218	if (!dev) {
1219		ret = -ENOMEM;
1220		pr_err("ucc_hdlc: unable to allocate memory\n");
1221		goto undo_uhdlc_init;
1222	}
1223
1224	uhdlc_priv->ndev = dev;
1225	hdlc = dev_to_hdlc(dev);
1226	dev->tx_queue_len = 16;
1227	dev->netdev_ops = &uhdlc_ops;
1228	dev->watchdog_timeo = 2 * HZ;
1229	hdlc->attach = ucc_hdlc_attach;
1230	hdlc->xmit = ucc_hdlc_tx;
1231	netif_napi_add(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32);
1232	if (register_hdlc_device(dev)) {
1233		ret = -ENOBUFS;
1234		pr_err("ucc_hdlc: unable to register hdlc device\n");
 
1235		goto free_dev;
1236	}
1237
1238	return 0;
1239
1240free_dev:
1241	free_netdev(dev);
1242undo_uhdlc_init:
1243	iounmap(utdm->siram);
1244unmap_si_regs:
1245	iounmap(utdm->si_regs);
1246free_utdm:
1247	if (uhdlc_priv->tsa)
1248		kfree(utdm);
1249free_uhdlc_priv:
1250	kfree(uhdlc_priv);
1251	return ret;
1252}
1253
1254static int ucc_hdlc_remove(struct platform_device *pdev)
1255{
1256	struct ucc_hdlc_private *priv = dev_get_drvdata(&pdev->dev);
1257
1258	uhdlc_memclean(priv);
1259
1260	if (priv->utdm->si_regs) {
1261		iounmap(priv->utdm->si_regs);
1262		priv->utdm->si_regs = NULL;
1263	}
1264
1265	if (priv->utdm->siram) {
1266		iounmap(priv->utdm->siram);
1267		priv->utdm->siram = NULL;
1268	}
1269	kfree(priv);
1270
1271	dev_info(&pdev->dev, "UCC based hdlc module removed\n");
1272
1273	return 0;
1274}
1275
1276static const struct of_device_id fsl_ucc_hdlc_of_match[] = {
1277	{
1278	.compatible = "fsl,ucc-hdlc",
1279	},
1280	{},
1281};
1282
1283MODULE_DEVICE_TABLE(of, fsl_ucc_hdlc_of_match);
1284
1285static struct platform_driver ucc_hdlc_driver = {
1286	.probe	= ucc_hdlc_probe,
1287	.remove	= ucc_hdlc_remove,
1288	.driver	= {
1289		.name		= DRV_NAME,
1290		.pm		= HDLC_PM_OPS,
1291		.of_match_table	= fsl_ucc_hdlc_of_match,
1292	},
1293};
1294
1295module_platform_driver(ucc_hdlc_driver);
1296MODULE_LICENSE("GPL");