Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * drivers/net/ethernet/ibm/emac/mal.c
  3 *
  4 * Memory Access Layer (MAL) support
  5 *
  6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
  7 *                <benh@kernel.crashing.org>
  8 *
  9 * Based on the arch/ppc version of the driver:
 10 *
 11 * Copyright (c) 2004, 2005 Zultys Technologies.
 12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
 13 *
 14 * Based on original work by
 15 *      Benjamin Herrenschmidt <benh@kernel.crashing.org>,
 16 *      David Gibson <hermes@gibson.dropbear.id.au>,
 17 *
 18 *      Armin Kuster <akuster@mvista.com>
 19 *      Copyright 2002 MontaVista Softare Inc.
 20 *
 21 * This program is free software; you can redistribute  it and/or modify it
 22 * under  the terms of  the GNU General  Public License as published by the
 23 * Free Software Foundation;  either version 2 of the  License, or (at your
 24 * option) any later version.
 25 *
 26 */
 27
 28#include <linux/delay.h>
 29#include <linux/slab.h>
 30#include <linux/of_irq.h>
 31
 32#include "core.h"
 33#include <asm/dcr-regs.h>
 34
 35static int mal_count;
 36
 37int mal_register_commac(struct mal_instance *mal, struct mal_commac *commac)
 38{
 39	unsigned long flags;
 40
 41	spin_lock_irqsave(&mal->lock, flags);
 42
 43	MAL_DBG(mal, "reg(%08x, %08x)" NL,
 44		commac->tx_chan_mask, commac->rx_chan_mask);
 45
 46	/* Don't let multiple commacs claim the same channel(s) */
 47	if ((mal->tx_chan_mask & commac->tx_chan_mask) ||
 48	    (mal->rx_chan_mask & commac->rx_chan_mask)) {
 49		spin_unlock_irqrestore(&mal->lock, flags);
 50		printk(KERN_WARNING "mal%d: COMMAC channels conflict!\n",
 51		       mal->index);
 52		return -EBUSY;
 53	}
 54
 55	if (list_empty(&mal->list))
 56		napi_enable(&mal->napi);
 57	mal->tx_chan_mask |= commac->tx_chan_mask;
 58	mal->rx_chan_mask |= commac->rx_chan_mask;
 59	list_add(&commac->list, &mal->list);
 60
 61	spin_unlock_irqrestore(&mal->lock, flags);
 62
 63	return 0;
 64}
 65
 66void mal_unregister_commac(struct mal_instance	*mal,
 67		struct mal_commac *commac)
 68{
 69	unsigned long flags;
 70
 71	spin_lock_irqsave(&mal->lock, flags);
 72
 73	MAL_DBG(mal, "unreg(%08x, %08x)" NL,
 74		commac->tx_chan_mask, commac->rx_chan_mask);
 75
 76	mal->tx_chan_mask &= ~commac->tx_chan_mask;
 77	mal->rx_chan_mask &= ~commac->rx_chan_mask;
 78	list_del_init(&commac->list);
 79	if (list_empty(&mal->list))
 80		napi_disable(&mal->napi);
 81
 82	spin_unlock_irqrestore(&mal->lock, flags);
 83}
 84
 85int mal_set_rcbs(struct mal_instance *mal, int channel, unsigned long size)
 86{
 87	BUG_ON(channel < 0 || channel >= mal->num_rx_chans ||
 88	       size > MAL_MAX_RX_SIZE);
 89
 90	MAL_DBG(mal, "set_rbcs(%d, %lu)" NL, channel, size);
 91
 92	if (size & 0xf) {
 93		printk(KERN_WARNING
 94		       "mal%d: incorrect RX size %lu for the channel %d\n",
 95		       mal->index, size, channel);
 96		return -EINVAL;
 97	}
 98
 99	set_mal_dcrn(mal, MAL_RCBS(channel), size >> 4);
100	return 0;
101}
102
103int mal_tx_bd_offset(struct mal_instance *mal, int channel)
104{
105	BUG_ON(channel < 0 || channel >= mal->num_tx_chans);
106
107	return channel * NUM_TX_BUFF;
108}
109
110int mal_rx_bd_offset(struct mal_instance *mal, int channel)
111{
112	BUG_ON(channel < 0 || channel >= mal->num_rx_chans);
113	return mal->num_tx_chans * NUM_TX_BUFF + channel * NUM_RX_BUFF;
114}
115
116void mal_enable_tx_channel(struct mal_instance *mal, int channel)
117{
118	unsigned long flags;
119
120	spin_lock_irqsave(&mal->lock, flags);
121
122	MAL_DBG(mal, "enable_tx(%d)" NL, channel);
123
124	set_mal_dcrn(mal, MAL_TXCASR,
125		     get_mal_dcrn(mal, MAL_TXCASR) | MAL_CHAN_MASK(channel));
126
127	spin_unlock_irqrestore(&mal->lock, flags);
128}
129
130void mal_disable_tx_channel(struct mal_instance *mal, int channel)
131{
132	set_mal_dcrn(mal, MAL_TXCARR, MAL_CHAN_MASK(channel));
133
134	MAL_DBG(mal, "disable_tx(%d)" NL, channel);
135}
136
137void mal_enable_rx_channel(struct mal_instance *mal, int channel)
138{
139	unsigned long flags;
140
141	/*
142	 * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple
143	 * of 8, but enabling in MAL_RXCASR needs the divided by 8 value
144	 * for the bitmask
145	 */
146	if (!(channel % 8))
147		channel >>= 3;
148
149	spin_lock_irqsave(&mal->lock, flags);
150
151	MAL_DBG(mal, "enable_rx(%d)" NL, channel);
152
153	set_mal_dcrn(mal, MAL_RXCASR,
154		     get_mal_dcrn(mal, MAL_RXCASR) | MAL_CHAN_MASK(channel));
155
156	spin_unlock_irqrestore(&mal->lock, flags);
157}
158
159void mal_disable_rx_channel(struct mal_instance *mal, int channel)
160{
161	/*
162	 * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple
163	 * of 8, but enabling in MAL_RXCASR needs the divided by 8 value
164	 * for the bitmask
165	 */
166	if (!(channel % 8))
167		channel >>= 3;
168
169	set_mal_dcrn(mal, MAL_RXCARR, MAL_CHAN_MASK(channel));
170
171	MAL_DBG(mal, "disable_rx(%d)" NL, channel);
172}
173
174void mal_poll_add(struct mal_instance *mal, struct mal_commac *commac)
175{
176	unsigned long flags;
177
178	spin_lock_irqsave(&mal->lock, flags);
179
180	MAL_DBG(mal, "poll_add(%p)" NL, commac);
181
182	/* starts disabled */
183	set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags);
184
185	list_add_tail(&commac->poll_list, &mal->poll_list);
186
187	spin_unlock_irqrestore(&mal->lock, flags);
188}
189
190void mal_poll_del(struct mal_instance *mal, struct mal_commac *commac)
191{
192	unsigned long flags;
193
194	spin_lock_irqsave(&mal->lock, flags);
195
196	MAL_DBG(mal, "poll_del(%p)" NL, commac);
197
198	list_del(&commac->poll_list);
199
200	spin_unlock_irqrestore(&mal->lock, flags);
201}
202
203/* synchronized by mal_poll() */
204static inline void mal_enable_eob_irq(struct mal_instance *mal)
205{
206	MAL_DBG2(mal, "enable_irq" NL);
207
208	// XXX might want to cache MAL_CFG as the DCR read can be slooooow
209	set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) | MAL_CFG_EOPIE);
210}
211
212/* synchronized by NAPI state */
213static inline void mal_disable_eob_irq(struct mal_instance *mal)
214{
215	// XXX might want to cache MAL_CFG as the DCR read can be slooooow
216	set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) & ~MAL_CFG_EOPIE);
217
218	MAL_DBG2(mal, "disable_irq" NL);
219}
220
221static irqreturn_t mal_serr(int irq, void *dev_instance)
222{
223	struct mal_instance *mal = dev_instance;
224
225	u32 esr = get_mal_dcrn(mal, MAL_ESR);
226
227	/* Clear the error status register */
228	set_mal_dcrn(mal, MAL_ESR, esr);
229
230	MAL_DBG(mal, "SERR %08x" NL, esr);
231
232	if (esr & MAL_ESR_EVB) {
233		if (esr & MAL_ESR_DE) {
234			/* We ignore Descriptor error,
235			 * TXDE or RXDE interrupt will be generated anyway.
236			 */
237			return IRQ_HANDLED;
238		}
239
240		if (esr & MAL_ESR_PEIN) {
241			/* PLB error, it's probably buggy hardware or
242			 * incorrect physical address in BD (i.e. bug)
243			 */
244			if (net_ratelimit())
245				printk(KERN_ERR
246				       "mal%d: system error, "
247				       "PLB (ESR = 0x%08x)\n",
248				       mal->index, esr);
249			return IRQ_HANDLED;
250		}
251
252		/* OPB error, it's probably buggy hardware or incorrect
253		 * EBC setup
254		 */
255		if (net_ratelimit())
256			printk(KERN_ERR
257			       "mal%d: system error, OPB (ESR = 0x%08x)\n",
258			       mal->index, esr);
259	}
260	return IRQ_HANDLED;
261}
262
263static inline void mal_schedule_poll(struct mal_instance *mal)
264{
265	if (likely(napi_schedule_prep(&mal->napi))) {
266		MAL_DBG2(mal, "schedule_poll" NL);
267		spin_lock(&mal->lock);
268		mal_disable_eob_irq(mal);
269		spin_unlock(&mal->lock);
270		__napi_schedule(&mal->napi);
271	} else
272		MAL_DBG2(mal, "already in poll" NL);
273}
274
275static irqreturn_t mal_txeob(int irq, void *dev_instance)
276{
277	struct mal_instance *mal = dev_instance;
278
279	u32 r = get_mal_dcrn(mal, MAL_TXEOBISR);
280
281	MAL_DBG2(mal, "txeob %08x" NL, r);
282
283	mal_schedule_poll(mal);
284	set_mal_dcrn(mal, MAL_TXEOBISR, r);
285
286#ifdef CONFIG_PPC_DCR_NATIVE
287	if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
288		mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
289				(mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICTX));
290#endif
291
292	return IRQ_HANDLED;
293}
294
295static irqreturn_t mal_rxeob(int irq, void *dev_instance)
296{
297	struct mal_instance *mal = dev_instance;
298
299	u32 r = get_mal_dcrn(mal, MAL_RXEOBISR);
300
301	MAL_DBG2(mal, "rxeob %08x" NL, r);
302
303	mal_schedule_poll(mal);
304	set_mal_dcrn(mal, MAL_RXEOBISR, r);
305
306#ifdef CONFIG_PPC_DCR_NATIVE
307	if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
308		mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
309				(mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICRX));
310#endif
311
312	return IRQ_HANDLED;
313}
314
315static irqreturn_t mal_txde(int irq, void *dev_instance)
316{
317	struct mal_instance *mal = dev_instance;
318
319	u32 deir = get_mal_dcrn(mal, MAL_TXDEIR);
320	set_mal_dcrn(mal, MAL_TXDEIR, deir);
321
322	MAL_DBG(mal, "txde %08x" NL, deir);
323
324	if (net_ratelimit())
325		printk(KERN_ERR
326		       "mal%d: TX descriptor error (TXDEIR = 0x%08x)\n",
327		       mal->index, deir);
328
329	return IRQ_HANDLED;
330}
331
332static irqreturn_t mal_rxde(int irq, void *dev_instance)
333{
334	struct mal_instance *mal = dev_instance;
335	struct list_head *l;
336
337	u32 deir = get_mal_dcrn(mal, MAL_RXDEIR);
338
339	MAL_DBG(mal, "rxde %08x" NL, deir);
340
341	list_for_each(l, &mal->list) {
342		struct mal_commac *mc = list_entry(l, struct mal_commac, list);
343		if (deir & mc->rx_chan_mask) {
344			set_bit(MAL_COMMAC_RX_STOPPED, &mc->flags);
345			mc->ops->rxde(mc->dev);
346		}
347	}
348
349	mal_schedule_poll(mal);
350	set_mal_dcrn(mal, MAL_RXDEIR, deir);
351
352	return IRQ_HANDLED;
353}
354
355static irqreturn_t mal_int(int irq, void *dev_instance)
356{
357	struct mal_instance *mal = dev_instance;
358	u32 esr = get_mal_dcrn(mal, MAL_ESR);
359
360	if (esr & MAL_ESR_EVB) {
361		/* descriptor error */
362		if (esr & MAL_ESR_DE) {
363			if (esr & MAL_ESR_CIDT)
364				return mal_rxde(irq, dev_instance);
365			else
366				return mal_txde(irq, dev_instance);
367		} else { /* SERR */
368			return mal_serr(irq, dev_instance);
369		}
370	}
371	return IRQ_HANDLED;
372}
373
374void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac)
375{
376	/* Spinlock-type semantics: only one caller disable poll at a time */
377	while (test_and_set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags))
378		msleep(1);
379
380	/* Synchronize with the MAL NAPI poller */
381	napi_synchronize(&mal->napi);
382}
383
384void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac)
385{
386	smp_wmb();
387	clear_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags);
388
389	/* Feels better to trigger a poll here to catch up with events that
390	 * may have happened on this channel while disabled. It will most
391	 * probably be delayed until the next interrupt but that's mostly a
392	 * non-issue in the context where this is called.
393	 */
394	napi_schedule(&mal->napi);
395}
396
397static int mal_poll(struct napi_struct *napi, int budget)
398{
399	struct mal_instance *mal = container_of(napi, struct mal_instance, napi);
400	struct list_head *l;
401	int received = 0;
402	unsigned long flags;
403
404	MAL_DBG2(mal, "poll(%d)" NL, budget);
405 again:
406	/* Process TX skbs */
407	list_for_each(l, &mal->poll_list) {
408		struct mal_commac *mc =
409			list_entry(l, struct mal_commac, poll_list);
410		mc->ops->poll_tx(mc->dev);
411	}
412
413	/* Process RX skbs.
414	 *
415	 * We _might_ need something more smart here to enforce polling
416	 * fairness.
417	 */
418	list_for_each(l, &mal->poll_list) {
419		struct mal_commac *mc =
420			list_entry(l, struct mal_commac, poll_list);
421		int n;
422		if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags)))
423			continue;
424		n = mc->ops->poll_rx(mc->dev, budget);
425		if (n) {
426			received += n;
427			budget -= n;
428			if (budget <= 0)
429				goto more_work; // XXX What if this is the last one ?
430		}
431	}
432
433	/* We need to disable IRQs to protect from RXDE IRQ here */
434	spin_lock_irqsave(&mal->lock, flags);
435	__napi_complete(napi);
436	mal_enable_eob_irq(mal);
437	spin_unlock_irqrestore(&mal->lock, flags);
438
439	/* Check for "rotting" packet(s) */
440	list_for_each(l, &mal->poll_list) {
441		struct mal_commac *mc =
442			list_entry(l, struct mal_commac, poll_list);
443		if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags)))
444			continue;
445		if (unlikely(mc->ops->peek_rx(mc->dev) ||
446			     test_bit(MAL_COMMAC_RX_STOPPED, &mc->flags))) {
447			MAL_DBG2(mal, "rotting packet" NL);
448			if (!napi_reschedule(napi))
449				goto more_work;
450
451			spin_lock_irqsave(&mal->lock, flags);
452			mal_disable_eob_irq(mal);
453			spin_unlock_irqrestore(&mal->lock, flags);
454			goto again;
455		}
456		mc->ops->poll_tx(mc->dev);
457	}
458
459 more_work:
460	MAL_DBG2(mal, "poll() %d <- %d" NL, budget, received);
461	return received;
462}
463
464static void mal_reset(struct mal_instance *mal)
465{
466	int n = 10;
467
468	MAL_DBG(mal, "reset" NL);
469
470	set_mal_dcrn(mal, MAL_CFG, MAL_CFG_SR);
471
472	/* Wait for reset to complete (1 system clock) */
473	while ((get_mal_dcrn(mal, MAL_CFG) & MAL_CFG_SR) && n)
474		--n;
475
476	if (unlikely(!n))
477		printk(KERN_ERR "mal%d: reset timeout\n", mal->index);
478}
479
480int mal_get_regs_len(struct mal_instance *mal)
481{
482	return sizeof(struct emac_ethtool_regs_subhdr) +
483	    sizeof(struct mal_regs);
484}
485
486void *mal_dump_regs(struct mal_instance *mal, void *buf)
487{
488	struct emac_ethtool_regs_subhdr *hdr = buf;
489	struct mal_regs *regs = (struct mal_regs *)(hdr + 1);
490	int i;
491
492	hdr->version = mal->version;
493	hdr->index = mal->index;
494
495	regs->tx_count = mal->num_tx_chans;
496	regs->rx_count = mal->num_rx_chans;
497
498	regs->cfg = get_mal_dcrn(mal, MAL_CFG);
499	regs->esr = get_mal_dcrn(mal, MAL_ESR);
500	regs->ier = get_mal_dcrn(mal, MAL_IER);
501	regs->tx_casr = get_mal_dcrn(mal, MAL_TXCASR);
502	regs->tx_carr = get_mal_dcrn(mal, MAL_TXCARR);
503	regs->tx_eobisr = get_mal_dcrn(mal, MAL_TXEOBISR);
504	regs->tx_deir = get_mal_dcrn(mal, MAL_TXDEIR);
505	regs->rx_casr = get_mal_dcrn(mal, MAL_RXCASR);
506	regs->rx_carr = get_mal_dcrn(mal, MAL_RXCARR);
507	regs->rx_eobisr = get_mal_dcrn(mal, MAL_RXEOBISR);
508	regs->rx_deir = get_mal_dcrn(mal, MAL_RXDEIR);
509
510	for (i = 0; i < regs->tx_count; ++i)
511		regs->tx_ctpr[i] = get_mal_dcrn(mal, MAL_TXCTPR(i));
512
513	for (i = 0; i < regs->rx_count; ++i) {
514		regs->rx_ctpr[i] = get_mal_dcrn(mal, MAL_RXCTPR(i));
515		regs->rcbs[i] = get_mal_dcrn(mal, MAL_RCBS(i));
516	}
517	return regs + 1;
518}
519
520static int mal_probe(struct platform_device *ofdev)
521{
522	struct mal_instance *mal;
523	int err = 0, i, bd_size;
524	int index = mal_count++;
525	unsigned int dcr_base;
526	const u32 *prop;
527	u32 cfg;
528	unsigned long irqflags;
529	irq_handler_t hdlr_serr, hdlr_txde, hdlr_rxde;
530
531	mal = kzalloc(sizeof(struct mal_instance), GFP_KERNEL);
532	if (!mal)
533		return -ENOMEM;
534
535	mal->index = index;
536	mal->ofdev = ofdev;
537	mal->version = of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal2") ? 2 : 1;
538
539	MAL_DBG(mal, "probe" NL);
540
541	prop = of_get_property(ofdev->dev.of_node, "num-tx-chans", NULL);
542	if (prop == NULL) {
543		printk(KERN_ERR
544		       "mal%d: can't find MAL num-tx-chans property!\n",
545		       index);
546		err = -ENODEV;
547		goto fail;
548	}
549	mal->num_tx_chans = prop[0];
550
551	prop = of_get_property(ofdev->dev.of_node, "num-rx-chans", NULL);
552	if (prop == NULL) {
553		printk(KERN_ERR
554		       "mal%d: can't find MAL num-rx-chans property!\n",
555		       index);
556		err = -ENODEV;
557		goto fail;
558	}
559	mal->num_rx_chans = prop[0];
560
561	dcr_base = dcr_resource_start(ofdev->dev.of_node, 0);
562	if (dcr_base == 0) {
563		printk(KERN_ERR
564		       "mal%d: can't find DCR resource!\n", index);
565		err = -ENODEV;
566		goto fail;
567	}
568	mal->dcr_host = dcr_map(ofdev->dev.of_node, dcr_base, 0x100);
569	if (!DCR_MAP_OK(mal->dcr_host)) {
570		printk(KERN_ERR
571		       "mal%d: failed to map DCRs !\n", index);
572		err = -ENODEV;
573		goto fail;
574	}
575
576	if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-405ez")) {
577#if defined(CONFIG_IBM_EMAC_MAL_CLR_ICINTSTAT) && \
578		defined(CONFIG_IBM_EMAC_MAL_COMMON_ERR)
579		mal->features |= (MAL_FTR_CLEAR_ICINTSTAT |
580				MAL_FTR_COMMON_ERR_INT);
581#else
582		printk(KERN_ERR "%s: Support for 405EZ not enabled!\n",
583				ofdev->dev.of_node->full_name);
584		err = -ENODEV;
585		goto fail;
586#endif
587	}
588
589	mal->txeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
590	mal->rxeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 1);
591	mal->serr_irq = irq_of_parse_and_map(ofdev->dev.of_node, 2);
592
593	if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) {
594		mal->txde_irq = mal->rxde_irq = mal->serr_irq;
595	} else {
596		mal->txde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 3);
597		mal->rxde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 4);
598	}
599
600	if (!mal->txeob_irq || !mal->rxeob_irq || !mal->serr_irq ||
601	    !mal->txde_irq  || !mal->rxde_irq) {
602		printk(KERN_ERR
603		       "mal%d: failed to map interrupts !\n", index);
604		err = -ENODEV;
605		goto fail_unmap;
606	}
607
608	INIT_LIST_HEAD(&mal->poll_list);
609	INIT_LIST_HEAD(&mal->list);
610	spin_lock_init(&mal->lock);
611
612	init_dummy_netdev(&mal->dummy_dev);
613
614	netif_napi_add(&mal->dummy_dev, &mal->napi, mal_poll,
615		       CONFIG_IBM_EMAC_POLL_WEIGHT);
616
617	/* Load power-on reset defaults */
618	mal_reset(mal);
619
620	/* Set the MAL configuration register */
621	cfg = (mal->version == 2) ? MAL2_CFG_DEFAULT : MAL1_CFG_DEFAULT;
622	cfg |= MAL_CFG_PLBB | MAL_CFG_OPBBL | MAL_CFG_LEA;
623
624	/* Current Axon is not happy with priority being non-0, it can
625	 * deadlock, fix it up here
626	 */
627	if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-axon"))
628		cfg &= ~(MAL2_CFG_RPP_10 | MAL2_CFG_WPP_10);
629
630	/* Apply configuration */
631	set_mal_dcrn(mal, MAL_CFG, cfg);
632
633	/* Allocate space for BD rings */
634	BUG_ON(mal->num_tx_chans <= 0 || mal->num_tx_chans > 32);
635	BUG_ON(mal->num_rx_chans <= 0 || mal->num_rx_chans > 32);
636
637	bd_size = sizeof(struct mal_descriptor) *
638		(NUM_TX_BUFF * mal->num_tx_chans +
639		 NUM_RX_BUFF * mal->num_rx_chans);
640	mal->bd_virt = dma_zalloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
641					   GFP_KERNEL);
642	if (mal->bd_virt == NULL) {
643		err = -ENOMEM;
644		goto fail_unmap;
645	}
646
647	for (i = 0; i < mal->num_tx_chans; ++i)
648		set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma +
649			     sizeof(struct mal_descriptor) *
650			     mal_tx_bd_offset(mal, i));
651
652	for (i = 0; i < mal->num_rx_chans; ++i)
653		set_mal_dcrn(mal, MAL_RXCTPR(i), mal->bd_dma +
654			     sizeof(struct mal_descriptor) *
655			     mal_rx_bd_offset(mal, i));
656
657	if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) {
658		irqflags = IRQF_SHARED;
659		hdlr_serr = hdlr_txde = hdlr_rxde = mal_int;
660	} else {
661		irqflags = 0;
662		hdlr_serr = mal_serr;
663		hdlr_txde = mal_txde;
664		hdlr_rxde = mal_rxde;
665	}
666
667	err = request_irq(mal->serr_irq, hdlr_serr, irqflags, "MAL SERR", mal);
668	if (err)
669		goto fail2;
670	err = request_irq(mal->txde_irq, hdlr_txde, irqflags, "MAL TX DE", mal);
671	if (err)
672		goto fail3;
673	err = request_irq(mal->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal);
674	if (err)
675		goto fail4;
676	err = request_irq(mal->rxde_irq, hdlr_rxde, irqflags, "MAL RX DE", mal);
677	if (err)
678		goto fail5;
679	err = request_irq(mal->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal);
680	if (err)
681		goto fail6;
682
683	/* Enable all MAL SERR interrupt sources */
684	set_mal_dcrn(mal, MAL_IER, MAL_IER_EVENTS);
685
686	/* Enable EOB interrupt */
687	mal_enable_eob_irq(mal);
688
689	printk(KERN_INFO
690	       "MAL v%d %s, %d TX channels, %d RX channels\n",
691	       mal->version, ofdev->dev.of_node->full_name,
692	       mal->num_tx_chans, mal->num_rx_chans);
693
694	/* Advertise this instance to the rest of the world */
695	wmb();
696	platform_set_drvdata(ofdev, mal);
697
698	mal_dbg_register(mal);
699
700	return 0;
701
702 fail6:
703	free_irq(mal->rxde_irq, mal);
704 fail5:
705	free_irq(mal->txeob_irq, mal);
706 fail4:
707	free_irq(mal->txde_irq, mal);
708 fail3:
709	free_irq(mal->serr_irq, mal);
710 fail2:
711	dma_free_coherent(&ofdev->dev, bd_size, mal->bd_virt, mal->bd_dma);
712 fail_unmap:
713	dcr_unmap(mal->dcr_host, 0x100);
714 fail:
715	kfree(mal);
716
717	return err;
718}
719
720static int mal_remove(struct platform_device *ofdev)
721{
722	struct mal_instance *mal = platform_get_drvdata(ofdev);
723
724	MAL_DBG(mal, "remove" NL);
725
726	/* Synchronize with scheduled polling */
727	napi_disable(&mal->napi);
728
729	if (!list_empty(&mal->list))
730		/* This is *very* bad */
731		WARN(1, KERN_EMERG
732		       "mal%d: commac list is not empty on remove!\n",
733		       mal->index);
734
735	free_irq(mal->serr_irq, mal);
736	free_irq(mal->txde_irq, mal);
737	free_irq(mal->txeob_irq, mal);
738	free_irq(mal->rxde_irq, mal);
739	free_irq(mal->rxeob_irq, mal);
740
741	mal_reset(mal);
742
743	mal_dbg_unregister(mal);
744
745	dma_free_coherent(&ofdev->dev,
746			  sizeof(struct mal_descriptor) *
747			  (NUM_TX_BUFF * mal->num_tx_chans +
748			   NUM_RX_BUFF * mal->num_rx_chans), mal->bd_virt,
749			  mal->bd_dma);
750	kfree(mal);
751
752	return 0;
753}
754
755static const struct of_device_id mal_platform_match[] =
756{
757	{
758		.compatible	= "ibm,mcmal",
759	},
760	{
761		.compatible	= "ibm,mcmal2",
762	},
763	/* Backward compat */
764	{
765		.type		= "mcmal-dma",
766		.compatible	= "ibm,mcmal",
767	},
768	{
769		.type		= "mcmal-dma",
770		.compatible	= "ibm,mcmal2",
771	},
772	{},
773};
774
775static struct platform_driver mal_of_driver = {
776	.driver = {
777		.name = "mcmal",
778		.of_match_table = mal_platform_match,
779	},
780	.probe = mal_probe,
781	.remove = mal_remove,
782};
783
784int __init mal_init(void)
785{
786	return platform_driver_register(&mal_of_driver);
787}
788
789void mal_exit(void)
790{
791	platform_driver_unregister(&mal_of_driver);
792}