Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2
   3  Broadcom B43legacy wireless driver
   4
   5  DMA ringbuffer and descriptor allocation/management
   6
   7  Copyright (c) 2005, 2006 Michael Buesch <m@bues.ch>
   8
   9  Some code in this file is derived from the b44.c driver
  10  Copyright (C) 2002 David S. Miller
  11  Copyright (C) Pekka Pietikainen
  12
  13  This program is free software; you can redistribute it and/or modify
  14  it under the terms of the GNU General Public License as published by
  15  the Free Software Foundation; either version 2 of the License, or
  16  (at your option) any later version.
  17
  18  This program is distributed in the hope that it will be useful,
  19  but WITHOUT ANY WARRANTY; without even the implied warranty of
  20  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  21  GNU General Public License for more details.
  22
  23  You should have received a copy of the GNU General Public License
  24  along with this program; see the file COPYING.  If not, write to
  25  the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
  26  Boston, MA 02110-1301, USA.
  27
  28*/
  29
  30#include "b43legacy.h"
  31#include "dma.h"
  32#include "main.h"
  33#include "debugfs.h"
  34#include "xmit.h"
  35
  36#include <linux/dma-mapping.h>
  37#include <linux/pci.h>
  38#include <linux/delay.h>
  39#include <linux/skbuff.h>
  40#include <linux/slab.h>
  41#include <net/dst.h>
  42
  43/* 32bit DMA ops. */
  44static
  45struct b43legacy_dmadesc_generic *op32_idx2desc(
  46					struct b43legacy_dmaring *ring,
  47					int slot,
  48					struct b43legacy_dmadesc_meta **meta)
  49{
  50	struct b43legacy_dmadesc32 *desc;
  51
  52	*meta = &(ring->meta[slot]);
  53	desc = ring->descbase;
  54	desc = &(desc[slot]);
  55
  56	return (struct b43legacy_dmadesc_generic *)desc;
  57}
  58
  59static void op32_fill_descriptor(struct b43legacy_dmaring *ring,
  60				 struct b43legacy_dmadesc_generic *desc,
  61				 dma_addr_t dmaaddr, u16 bufsize,
  62				 int start, int end, int irq)
  63{
  64	struct b43legacy_dmadesc32 *descbase = ring->descbase;
  65	int slot;
  66	u32 ctl;
  67	u32 addr;
  68	u32 addrext;
  69
  70	slot = (int)(&(desc->dma32) - descbase);
  71	B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
  72
  73	addr = (u32)(dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
  74	addrext = (u32)(dmaaddr & SSB_DMA_TRANSLATION_MASK)
  75		   >> SSB_DMA_TRANSLATION_SHIFT;
  76	addr |= ring->dev->dma.translation;
  77	ctl = (bufsize - ring->frameoffset)
  78	      & B43legacy_DMA32_DCTL_BYTECNT;
  79	if (slot == ring->nr_slots - 1)
  80		ctl |= B43legacy_DMA32_DCTL_DTABLEEND;
  81	if (start)
  82		ctl |= B43legacy_DMA32_DCTL_FRAMESTART;
  83	if (end)
  84		ctl |= B43legacy_DMA32_DCTL_FRAMEEND;
  85	if (irq)
  86		ctl |= B43legacy_DMA32_DCTL_IRQ;
  87	ctl |= (addrext << B43legacy_DMA32_DCTL_ADDREXT_SHIFT)
  88	       & B43legacy_DMA32_DCTL_ADDREXT_MASK;
  89
  90	desc->dma32.control = cpu_to_le32(ctl);
  91	desc->dma32.address = cpu_to_le32(addr);
  92}
  93
  94static void op32_poke_tx(struct b43legacy_dmaring *ring, int slot)
  95{
  96	b43legacy_dma_write(ring, B43legacy_DMA32_TXINDEX,
  97			    (u32)(slot * sizeof(struct b43legacy_dmadesc32)));
  98}
  99
 100static void op32_tx_suspend(struct b43legacy_dmaring *ring)
 101{
 102	b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
 103			    b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL)
 104			    | B43legacy_DMA32_TXSUSPEND);
 105}
 106
 107static void op32_tx_resume(struct b43legacy_dmaring *ring)
 108{
 109	b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
 110			    b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL)
 111			    & ~B43legacy_DMA32_TXSUSPEND);
 112}
 113
 114static int op32_get_current_rxslot(struct b43legacy_dmaring *ring)
 115{
 116	u32 val;
 117
 118	val = b43legacy_dma_read(ring, B43legacy_DMA32_RXSTATUS);
 119	val &= B43legacy_DMA32_RXDPTR;
 120
 121	return (val / sizeof(struct b43legacy_dmadesc32));
 122}
 123
 124static void op32_set_current_rxslot(struct b43legacy_dmaring *ring,
 125				    int slot)
 126{
 127	b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX,
 128			    (u32)(slot * sizeof(struct b43legacy_dmadesc32)));
 129}
 130
 131static const struct b43legacy_dma_ops dma32_ops = {
 132	.idx2desc		= op32_idx2desc,
 133	.fill_descriptor	= op32_fill_descriptor,
 134	.poke_tx		= op32_poke_tx,
 135	.tx_suspend		= op32_tx_suspend,
 136	.tx_resume		= op32_tx_resume,
 137	.get_current_rxslot	= op32_get_current_rxslot,
 138	.set_current_rxslot	= op32_set_current_rxslot,
 139};
 140
 141/* 64bit DMA ops. */
 142static
 143struct b43legacy_dmadesc_generic *op64_idx2desc(
 144					struct b43legacy_dmaring *ring,
 145					int slot,
 146					struct b43legacy_dmadesc_meta
 147					**meta)
 148{
 149	struct b43legacy_dmadesc64 *desc;
 150
 151	*meta = &(ring->meta[slot]);
 152	desc = ring->descbase;
 153	desc = &(desc[slot]);
 154
 155	return (struct b43legacy_dmadesc_generic *)desc;
 156}
 157
 158static void op64_fill_descriptor(struct b43legacy_dmaring *ring,
 159				 struct b43legacy_dmadesc_generic *desc,
 160				 dma_addr_t dmaaddr, u16 bufsize,
 161				 int start, int end, int irq)
 162{
 163	struct b43legacy_dmadesc64 *descbase = ring->descbase;
 164	int slot;
 165	u32 ctl0 = 0;
 166	u32 ctl1 = 0;
 167	u32 addrlo;
 168	u32 addrhi;
 169	u32 addrext;
 170
 171	slot = (int)(&(desc->dma64) - descbase);
 172	B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
 173
 174	addrlo = (u32)(dmaaddr & 0xFFFFFFFF);
 175	addrhi = (((u64)dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
 176	addrext = (((u64)dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
 177		  >> SSB_DMA_TRANSLATION_SHIFT;
 178	addrhi |= ring->dev->dma.translation;
 179	if (slot == ring->nr_slots - 1)
 180		ctl0 |= B43legacy_DMA64_DCTL0_DTABLEEND;
 181	if (start)
 182		ctl0 |= B43legacy_DMA64_DCTL0_FRAMESTART;
 183	if (end)
 184		ctl0 |= B43legacy_DMA64_DCTL0_FRAMEEND;
 185	if (irq)
 186		ctl0 |= B43legacy_DMA64_DCTL0_IRQ;
 187	ctl1 |= (bufsize - ring->frameoffset)
 188		& B43legacy_DMA64_DCTL1_BYTECNT;
 189	ctl1 |= (addrext << B43legacy_DMA64_DCTL1_ADDREXT_SHIFT)
 190		& B43legacy_DMA64_DCTL1_ADDREXT_MASK;
 191
 192	desc->dma64.control0 = cpu_to_le32(ctl0);
 193	desc->dma64.control1 = cpu_to_le32(ctl1);
 194	desc->dma64.address_low = cpu_to_le32(addrlo);
 195	desc->dma64.address_high = cpu_to_le32(addrhi);
 196}
 197
 198static void op64_poke_tx(struct b43legacy_dmaring *ring, int slot)
 199{
 200	b43legacy_dma_write(ring, B43legacy_DMA64_TXINDEX,
 201			    (u32)(slot * sizeof(struct b43legacy_dmadesc64)));
 202}
 203
 204static void op64_tx_suspend(struct b43legacy_dmaring *ring)
 205{
 206	b43legacy_dma_write(ring, B43legacy_DMA64_TXCTL,
 207			    b43legacy_dma_read(ring, B43legacy_DMA64_TXCTL)
 208			    | B43legacy_DMA64_TXSUSPEND);
 209}
 210
 211static void op64_tx_resume(struct b43legacy_dmaring *ring)
 212{
 213	b43legacy_dma_write(ring, B43legacy_DMA64_TXCTL,
 214			    b43legacy_dma_read(ring, B43legacy_DMA64_TXCTL)
 215			    & ~B43legacy_DMA64_TXSUSPEND);
 216}
 217
 218static int op64_get_current_rxslot(struct b43legacy_dmaring *ring)
 219{
 220	u32 val;
 221
 222	val = b43legacy_dma_read(ring, B43legacy_DMA64_RXSTATUS);
 223	val &= B43legacy_DMA64_RXSTATDPTR;
 224
 225	return (val / sizeof(struct b43legacy_dmadesc64));
 226}
 227
 228static void op64_set_current_rxslot(struct b43legacy_dmaring *ring,
 229				    int slot)
 230{
 231	b43legacy_dma_write(ring, B43legacy_DMA64_RXINDEX,
 232			    (u32)(slot * sizeof(struct b43legacy_dmadesc64)));
 233}
 234
 235static const struct b43legacy_dma_ops dma64_ops = {
 236	.idx2desc		= op64_idx2desc,
 237	.fill_descriptor	= op64_fill_descriptor,
 238	.poke_tx		= op64_poke_tx,
 239	.tx_suspend		= op64_tx_suspend,
 240	.tx_resume		= op64_tx_resume,
 241	.get_current_rxslot	= op64_get_current_rxslot,
 242	.set_current_rxslot	= op64_set_current_rxslot,
 243};
 244
 245
 246static inline int free_slots(struct b43legacy_dmaring *ring)
 247{
 248	return (ring->nr_slots - ring->used_slots);
 249}
 250
 251static inline int next_slot(struct b43legacy_dmaring *ring, int slot)
 252{
 253	B43legacy_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
 254	if (slot == ring->nr_slots - 1)
 255		return 0;
 256	return slot + 1;
 257}
 258
 259static inline int prev_slot(struct b43legacy_dmaring *ring, int slot)
 260{
 261	B43legacy_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
 262	if (slot == 0)
 263		return ring->nr_slots - 1;
 264	return slot - 1;
 265}
 266
 267#ifdef CONFIG_B43LEGACY_DEBUG
 268static void update_max_used_slots(struct b43legacy_dmaring *ring,
 269				  int current_used_slots)
 270{
 271	if (current_used_slots <= ring->max_used_slots)
 272		return;
 273	ring->max_used_slots = current_used_slots;
 274	if (b43legacy_debug(ring->dev, B43legacy_DBG_DMAVERBOSE))
 275		b43legacydbg(ring->dev->wl,
 276		       "max_used_slots increased to %d on %s ring %d\n",
 277		       ring->max_used_slots,
 278		       ring->tx ? "TX" : "RX",
 279		       ring->index);
 280}
 281#else
 282static inline
 283void update_max_used_slots(struct b43legacy_dmaring *ring,
 284			   int current_used_slots)
 285{ }
 286#endif /* DEBUG */
 287
 288/* Request a slot for usage. */
 289static inline
 290int request_slot(struct b43legacy_dmaring *ring)
 291{
 292	int slot;
 293
 294	B43legacy_WARN_ON(!ring->tx);
 295	B43legacy_WARN_ON(ring->stopped);
 296	B43legacy_WARN_ON(free_slots(ring) == 0);
 297
 298	slot = next_slot(ring, ring->current_slot);
 299	ring->current_slot = slot;
 300	ring->used_slots++;
 301
 302	update_max_used_slots(ring, ring->used_slots);
 303
 304	return slot;
 305}
 306
 307/* Mac80211-queue to b43legacy-ring mapping */
 308static struct b43legacy_dmaring *priority_to_txring(
 309						struct b43legacy_wldev *dev,
 310						int queue_priority)
 311{
 312	struct b43legacy_dmaring *ring;
 313
 314/*FIXME: For now we always run on TX-ring-1 */
 315return dev->dma.tx_ring1;
 316
 317	/* 0 = highest priority */
 318	switch (queue_priority) {
 319	default:
 320		B43legacy_WARN_ON(1);
 321		/* fallthrough */
 322	case 0:
 323		ring = dev->dma.tx_ring3;
 324		break;
 325	case 1:
 326		ring = dev->dma.tx_ring2;
 327		break;
 328	case 2:
 329		ring = dev->dma.tx_ring1;
 330		break;
 331	case 3:
 332		ring = dev->dma.tx_ring0;
 333		break;
 334	case 4:
 335		ring = dev->dma.tx_ring4;
 336		break;
 337	case 5:
 338		ring = dev->dma.tx_ring5;
 339		break;
 340	}
 341
 342	return ring;
 343}
 344
 345/* Bcm4301-ring to mac80211-queue mapping */
 346static inline int txring_to_priority(struct b43legacy_dmaring *ring)
 347{
 348	static const u8 idx_to_prio[] =
 349		{ 3, 2, 1, 0, 4, 5, };
 350
 351/*FIXME: have only one queue, for now */
 352return 0;
 353
 354	return idx_to_prio[ring->index];
 355}
 356
 357
 358static u16 b43legacy_dmacontroller_base(enum b43legacy_dmatype type,
 359					int controller_idx)
 360{
 361	static const u16 map64[] = {
 362		B43legacy_MMIO_DMA64_BASE0,
 363		B43legacy_MMIO_DMA64_BASE1,
 364		B43legacy_MMIO_DMA64_BASE2,
 365		B43legacy_MMIO_DMA64_BASE3,
 366		B43legacy_MMIO_DMA64_BASE4,
 367		B43legacy_MMIO_DMA64_BASE5,
 368	};
 369	static const u16 map32[] = {
 370		B43legacy_MMIO_DMA32_BASE0,
 371		B43legacy_MMIO_DMA32_BASE1,
 372		B43legacy_MMIO_DMA32_BASE2,
 373		B43legacy_MMIO_DMA32_BASE3,
 374		B43legacy_MMIO_DMA32_BASE4,
 375		B43legacy_MMIO_DMA32_BASE5,
 376	};
 377
 378	if (type == B43legacy_DMA_64BIT) {
 379		B43legacy_WARN_ON(!(controller_idx >= 0 &&
 380				  controller_idx < ARRAY_SIZE(map64)));
 381		return map64[controller_idx];
 382	}
 383	B43legacy_WARN_ON(!(controller_idx >= 0 &&
 384			  controller_idx < ARRAY_SIZE(map32)));
 385	return map32[controller_idx];
 386}
 387
 388static inline
 389dma_addr_t map_descbuffer(struct b43legacy_dmaring *ring,
 390			  unsigned char *buf,
 391			  size_t len,
 392			  int tx)
 393{
 394	dma_addr_t dmaaddr;
 395
 396	if (tx)
 397		dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
 398					     buf, len,
 399					     DMA_TO_DEVICE);
 400	else
 401		dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
 402					     buf, len,
 403					     DMA_FROM_DEVICE);
 404
 405	return dmaaddr;
 406}
 407
 408static inline
 409void unmap_descbuffer(struct b43legacy_dmaring *ring,
 410		      dma_addr_t addr,
 411		      size_t len,
 412		      int tx)
 413{
 414	if (tx)
 415		dma_unmap_single(ring->dev->dev->dma_dev,
 416				     addr, len,
 417				     DMA_TO_DEVICE);
 418	else
 419		dma_unmap_single(ring->dev->dev->dma_dev,
 420				     addr, len,
 421				     DMA_FROM_DEVICE);
 422}
 423
 424static inline
 425void sync_descbuffer_for_cpu(struct b43legacy_dmaring *ring,
 426			     dma_addr_t addr,
 427			     size_t len)
 428{
 429	B43legacy_WARN_ON(ring->tx);
 430
 431	dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
 432				addr, len, DMA_FROM_DEVICE);
 433}
 434
 435static inline
 436void sync_descbuffer_for_device(struct b43legacy_dmaring *ring,
 437				dma_addr_t addr,
 438				size_t len)
 439{
 440	B43legacy_WARN_ON(ring->tx);
 441
 442	dma_sync_single_for_device(ring->dev->dev->dma_dev,
 443				   addr, len, DMA_FROM_DEVICE);
 444}
 445
 446static inline
 447void free_descriptor_buffer(struct b43legacy_dmaring *ring,
 448			    struct b43legacy_dmadesc_meta *meta,
 449			    int irq_context)
 450{
 451	if (meta->skb) {
 452		if (irq_context)
 453			dev_kfree_skb_irq(meta->skb);
 454		else
 455			dev_kfree_skb(meta->skb);
 456		meta->skb = NULL;
 457	}
 458}
 459
 460static int alloc_ringmemory(struct b43legacy_dmaring *ring)
 461{
 462	/* GFP flags must match the flags in free_ringmemory()! */
 463	ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
 464					    B43legacy_DMA_RINGMEMSIZE,
 465					    &(ring->dmabase),
 466					    GFP_KERNEL);
 467	if (!ring->descbase) {
 468		b43legacyerr(ring->dev->wl, "DMA ringmemory allocation"
 469			     " failed\n");
 470		return -ENOMEM;
 471	}
 472	memset(ring->descbase, 0, B43legacy_DMA_RINGMEMSIZE);
 473
 474	return 0;
 475}
 476
 477static void free_ringmemory(struct b43legacy_dmaring *ring)
 478{
 479	dma_free_coherent(ring->dev->dev->dma_dev, B43legacy_DMA_RINGMEMSIZE,
 480			  ring->descbase, ring->dmabase);
 481}
 482
 483/* Reset the RX DMA channel */
 484static int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev,
 485					    u16 mmio_base,
 486					    enum b43legacy_dmatype type)
 487{
 488	int i;
 489	u32 value;
 490	u16 offset;
 491
 492	might_sleep();
 493
 494	offset = (type == B43legacy_DMA_64BIT) ?
 495		 B43legacy_DMA64_RXCTL : B43legacy_DMA32_RXCTL;
 496	b43legacy_write32(dev, mmio_base + offset, 0);
 497	for (i = 0; i < 10; i++) {
 498		offset = (type == B43legacy_DMA_64BIT) ?
 499			 B43legacy_DMA64_RXSTATUS : B43legacy_DMA32_RXSTATUS;
 500		value = b43legacy_read32(dev, mmio_base + offset);
 501		if (type == B43legacy_DMA_64BIT) {
 502			value &= B43legacy_DMA64_RXSTAT;
 503			if (value == B43legacy_DMA64_RXSTAT_DISABLED) {
 504				i = -1;
 505				break;
 506			}
 507		} else {
 508			value &= B43legacy_DMA32_RXSTATE;
 509			if (value == B43legacy_DMA32_RXSTAT_DISABLED) {
 510				i = -1;
 511				break;
 512			}
 513		}
 514		msleep(1);
 515	}
 516	if (i != -1) {
 517		b43legacyerr(dev->wl, "DMA RX reset timed out\n");
 518		return -ENODEV;
 519	}
 520
 521	return 0;
 522}
 523
 524/* Reset the RX DMA channel */
 525static int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev,
 526					    u16 mmio_base,
 527					    enum b43legacy_dmatype type)
 528{
 529	int i;
 530	u32 value;
 531	u16 offset;
 532
 533	might_sleep();
 534
 535	for (i = 0; i < 10; i++) {
 536		offset = (type == B43legacy_DMA_64BIT) ?
 537			 B43legacy_DMA64_TXSTATUS : B43legacy_DMA32_TXSTATUS;
 538		value = b43legacy_read32(dev, mmio_base + offset);
 539		if (type == B43legacy_DMA_64BIT) {
 540			value &= B43legacy_DMA64_TXSTAT;
 541			if (value == B43legacy_DMA64_TXSTAT_DISABLED ||
 542			    value == B43legacy_DMA64_TXSTAT_IDLEWAIT ||
 543			    value == B43legacy_DMA64_TXSTAT_STOPPED)
 544				break;
 545		} else {
 546			value &= B43legacy_DMA32_TXSTATE;
 547			if (value == B43legacy_DMA32_TXSTAT_DISABLED ||
 548			    value == B43legacy_DMA32_TXSTAT_IDLEWAIT ||
 549			    value == B43legacy_DMA32_TXSTAT_STOPPED)
 550				break;
 551		}
 552		msleep(1);
 553	}
 554	offset = (type == B43legacy_DMA_64BIT) ? B43legacy_DMA64_TXCTL :
 555						 B43legacy_DMA32_TXCTL;
 556	b43legacy_write32(dev, mmio_base + offset, 0);
 557	for (i = 0; i < 10; i++) {
 558		offset = (type == B43legacy_DMA_64BIT) ?
 559			 B43legacy_DMA64_TXSTATUS : B43legacy_DMA32_TXSTATUS;
 560		value = b43legacy_read32(dev, mmio_base + offset);
 561		if (type == B43legacy_DMA_64BIT) {
 562			value &= B43legacy_DMA64_TXSTAT;
 563			if (value == B43legacy_DMA64_TXSTAT_DISABLED) {
 564				i = -1;
 565				break;
 566			}
 567		} else {
 568			value &= B43legacy_DMA32_TXSTATE;
 569			if (value == B43legacy_DMA32_TXSTAT_DISABLED) {
 570				i = -1;
 571				break;
 572			}
 573		}
 574		msleep(1);
 575	}
 576	if (i != -1) {
 577		b43legacyerr(dev->wl, "DMA TX reset timed out\n");
 578		return -ENODEV;
 579	}
 580	/* ensure the reset is completed. */
 581	msleep(1);
 582
 583	return 0;
 584}
 585
 586/* Check if a DMA mapping address is invalid. */
 587static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring,
 588					 dma_addr_t addr,
 589					 size_t buffersize,
 590					 bool dma_to_device)
 591{
 592	if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
 593		return 1;
 594
 595	switch (ring->type) {
 596	case B43legacy_DMA_30BIT:
 597		if ((u64)addr + buffersize > (1ULL << 30))
 598			goto address_error;
 599		break;
 600	case B43legacy_DMA_32BIT:
 601		if ((u64)addr + buffersize > (1ULL << 32))
 602			goto address_error;
 603		break;
 604	case B43legacy_DMA_64BIT:
 605		/* Currently we can't have addresses beyond 64 bits in the kernel. */
 606		break;
 607	}
 608
 609	/* The address is OK. */
 610	return 0;
 611
 612address_error:
 613	/* We can't support this address. Unmap it again. */
 614	unmap_descbuffer(ring, addr, buffersize, dma_to_device);
 615
 616	return 1;
 617}
 618
 619static int setup_rx_descbuffer(struct b43legacy_dmaring *ring,
 620			       struct b43legacy_dmadesc_generic *desc,
 621			       struct b43legacy_dmadesc_meta *meta,
 622			       gfp_t gfp_flags)
 623{
 624	struct b43legacy_rxhdr_fw3 *rxhdr;
 625	struct b43legacy_hwtxstatus *txstat;
 626	dma_addr_t dmaaddr;
 627	struct sk_buff *skb;
 628
 629	B43legacy_WARN_ON(ring->tx);
 630
 631	skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
 632	if (unlikely(!skb))
 633		return -ENOMEM;
 634	dmaaddr = map_descbuffer(ring, skb->data,
 635				 ring->rx_buffersize, 0);
 636	if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
 637		/* ugh. try to realloc in zone_dma */
 638		gfp_flags |= GFP_DMA;
 639
 640		dev_kfree_skb_any(skb);
 641
 642		skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
 643		if (unlikely(!skb))
 644			return -ENOMEM;
 645		dmaaddr = map_descbuffer(ring, skb->data,
 646					 ring->rx_buffersize, 0);
 647	}
 648
 649	if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
 650		dev_kfree_skb_any(skb);
 651		return -EIO;
 652	}
 653
 654	meta->skb = skb;
 655	meta->dmaaddr = dmaaddr;
 656	ring->ops->fill_descriptor(ring, desc, dmaaddr,
 657				   ring->rx_buffersize, 0, 0, 0);
 658
 659	rxhdr = (struct b43legacy_rxhdr_fw3 *)(skb->data);
 660	rxhdr->frame_len = 0;
 661	txstat = (struct b43legacy_hwtxstatus *)(skb->data);
 662	txstat->cookie = 0;
 663
 664	return 0;
 665}
 666
 667/* Allocate the initial descbuffers.
 668 * This is used for an RX ring only.
 669 */
 670static int alloc_initial_descbuffers(struct b43legacy_dmaring *ring)
 671{
 672	int i;
 673	int err = -ENOMEM;
 674	struct b43legacy_dmadesc_generic *desc;
 675	struct b43legacy_dmadesc_meta *meta;
 676
 677	for (i = 0; i < ring->nr_slots; i++) {
 678		desc = ring->ops->idx2desc(ring, i, &meta);
 679
 680		err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
 681		if (err) {
 682			b43legacyerr(ring->dev->wl,
 683			       "Failed to allocate initial descbuffers\n");
 684			goto err_unwind;
 685		}
 686	}
 687	mb(); /* all descbuffer setup before next line */
 688	ring->used_slots = ring->nr_slots;
 689	err = 0;
 690out:
 691	return err;
 692
 693err_unwind:
 694	for (i--; i >= 0; i--) {
 695		desc = ring->ops->idx2desc(ring, i, &meta);
 696
 697		unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
 698		dev_kfree_skb(meta->skb);
 699	}
 700	goto out;
 701}
 702
 703/* Do initial setup of the DMA controller.
 704 * Reset the controller, write the ring busaddress
 705 * and switch the "enable" bit on.
 706 */
 707static int dmacontroller_setup(struct b43legacy_dmaring *ring)
 708{
 709	int err = 0;
 710	u32 value;
 711	u32 addrext;
 712	u32 trans = ring->dev->dma.translation;
 
 713
 714	if (ring->tx) {
 715		if (ring->type == B43legacy_DMA_64BIT) {
 716			u64 ringbase = (u64)(ring->dmabase);
 717
 718			addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
 719				  >> SSB_DMA_TRANSLATION_SHIFT;
 720			value = B43legacy_DMA64_TXENABLE;
 721			value |= (addrext << B43legacy_DMA64_TXADDREXT_SHIFT)
 722				& B43legacy_DMA64_TXADDREXT_MASK;
 723			b43legacy_dma_write(ring, B43legacy_DMA64_TXCTL,
 724					    value);
 725			b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGLO,
 726					    (ringbase & 0xFFFFFFFF));
 727			b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGHI,
 728					    ((ringbase >> 32)
 729					    & ~SSB_DMA_TRANSLATION_MASK)
 730					    | trans);
 731		} else {
 732			u32 ringbase = (u32)(ring->dmabase);
 733
 734			addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
 735				  >> SSB_DMA_TRANSLATION_SHIFT;
 736			value = B43legacy_DMA32_TXENABLE;
 737			value |= (addrext << B43legacy_DMA32_TXADDREXT_SHIFT)
 738				& B43legacy_DMA32_TXADDREXT_MASK;
 739			b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
 740					    value);
 741			b43legacy_dma_write(ring, B43legacy_DMA32_TXRING,
 742					    (ringbase &
 743					    ~SSB_DMA_TRANSLATION_MASK)
 744					    | trans);
 745		}
 746	} else {
 747		err = alloc_initial_descbuffers(ring);
 748		if (err)
 749			goto out;
 750		if (ring->type == B43legacy_DMA_64BIT) {
 751			u64 ringbase = (u64)(ring->dmabase);
 752
 753			addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
 754				  >> SSB_DMA_TRANSLATION_SHIFT;
 755			value = (ring->frameoffset <<
 756				 B43legacy_DMA64_RXFROFF_SHIFT);
 757			value |= B43legacy_DMA64_RXENABLE;
 758			value |= (addrext << B43legacy_DMA64_RXADDREXT_SHIFT)
 759				& B43legacy_DMA64_RXADDREXT_MASK;
 760			b43legacy_dma_write(ring, B43legacy_DMA64_RXCTL,
 761					    value);
 762			b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGLO,
 763					    (ringbase & 0xFFFFFFFF));
 764			b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGHI,
 765					    ((ringbase >> 32) &
 766					    ~SSB_DMA_TRANSLATION_MASK) |
 767					    trans);
 768			b43legacy_dma_write(ring, B43legacy_DMA64_RXINDEX,
 769					    200);
 770		} else {
 771			u32 ringbase = (u32)(ring->dmabase);
 772
 773			addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
 774				  >> SSB_DMA_TRANSLATION_SHIFT;
 775			value = (ring->frameoffset <<
 776				 B43legacy_DMA32_RXFROFF_SHIFT);
 777			value |= B43legacy_DMA32_RXENABLE;
 778			value |= (addrext <<
 779				 B43legacy_DMA32_RXADDREXT_SHIFT)
 780				 & B43legacy_DMA32_RXADDREXT_MASK;
 781			b43legacy_dma_write(ring, B43legacy_DMA32_RXCTL,
 782					    value);
 783			b43legacy_dma_write(ring, B43legacy_DMA32_RXRING,
 784					    (ringbase &
 785					    ~SSB_DMA_TRANSLATION_MASK)
 786					    | trans);
 787			b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX,
 788					    200);
 789		}
 790	}
 791
 792out:
 793	return err;
 794}
 795
 796/* Shutdown the DMA controller. */
 797static void dmacontroller_cleanup(struct b43legacy_dmaring *ring)
 798{
 799	if (ring->tx) {
 800		b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
 801						 ring->type);
 802		if (ring->type == B43legacy_DMA_64BIT) {
 803			b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGLO, 0);
 804			b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGHI, 0);
 805		} else
 806			b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 0);
 807	} else {
 808		b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
 809						 ring->type);
 810		if (ring->type == B43legacy_DMA_64BIT) {
 811			b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGLO, 0);
 812			b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGHI, 0);
 813		} else
 814			b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, 0);
 815	}
 816}
 817
 818static void free_all_descbuffers(struct b43legacy_dmaring *ring)
 819{
 820	struct b43legacy_dmadesc_meta *meta;
 821	int i;
 822
 823	if (!ring->used_slots)
 824		return;
 825	for (i = 0; i < ring->nr_slots; i++) {
 826		ring->ops->idx2desc(ring, i, &meta);
 827
 828		if (!meta->skb) {
 829			B43legacy_WARN_ON(!ring->tx);
 830			continue;
 831		}
 832		if (ring->tx)
 833			unmap_descbuffer(ring, meta->dmaaddr,
 834					 meta->skb->len, 1);
 835		else
 836			unmap_descbuffer(ring, meta->dmaaddr,
 837					 ring->rx_buffersize, 0);
 838		free_descriptor_buffer(ring, meta, 0);
 839	}
 840}
 841
 842static u64 supported_dma_mask(struct b43legacy_wldev *dev)
 843{
 844	u32 tmp;
 845	u16 mmio_base;
 846
 847	tmp = b43legacy_read32(dev, SSB_TMSHIGH);
 848	if (tmp & SSB_TMSHIGH_DMA64)
 849		return DMA_BIT_MASK(64);
 850	mmio_base = b43legacy_dmacontroller_base(0, 0);
 851	b43legacy_write32(dev,
 852			mmio_base + B43legacy_DMA32_TXCTL,
 853			B43legacy_DMA32_TXADDREXT_MASK);
 854	tmp = b43legacy_read32(dev, mmio_base +
 855			       B43legacy_DMA32_TXCTL);
 856	if (tmp & B43legacy_DMA32_TXADDREXT_MASK)
 857		return DMA_BIT_MASK(32);
 858
 859	return DMA_BIT_MASK(30);
 860}
 861
 862static enum b43legacy_dmatype dma_mask_to_engine_type(u64 dmamask)
 863{
 864	if (dmamask == DMA_BIT_MASK(30))
 865		return B43legacy_DMA_30BIT;
 866	if (dmamask == DMA_BIT_MASK(32))
 867		return B43legacy_DMA_32BIT;
 868	if (dmamask == DMA_BIT_MASK(64))
 869		return B43legacy_DMA_64BIT;
 870	B43legacy_WARN_ON(1);
 871	return B43legacy_DMA_30BIT;
 872}
 873
 874/* Main initialization function. */
 875static
 876struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
 877						  int controller_index,
 878						  int for_tx,
 879						  enum b43legacy_dmatype type)
 880{
 881	struct b43legacy_dmaring *ring;
 882	int err;
 883	int nr_slots;
 884	dma_addr_t dma_test;
 885
 886	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
 887	if (!ring)
 888		goto out;
 889	ring->type = type;
 890	ring->dev = dev;
 891
 892	nr_slots = B43legacy_RXRING_SLOTS;
 893	if (for_tx)
 894		nr_slots = B43legacy_TXRING_SLOTS;
 895
 896	ring->meta = kcalloc(nr_slots, sizeof(struct b43legacy_dmadesc_meta),
 897			     GFP_KERNEL);
 898	if (!ring->meta)
 899		goto err_kfree_ring;
 900	if (for_tx) {
 901		ring->txhdr_cache = kcalloc(nr_slots,
 902					sizeof(struct b43legacy_txhdr_fw3),
 903					GFP_KERNEL);
 904		if (!ring->txhdr_cache)
 905			goto err_kfree_meta;
 906
 907		/* test for ability to dma to txhdr_cache */
 908		dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache,
 909					      sizeof(struct b43legacy_txhdr_fw3),
 910					      DMA_TO_DEVICE);
 911
 912		if (b43legacy_dma_mapping_error(ring, dma_test,
 913					sizeof(struct b43legacy_txhdr_fw3), 1)) {
 914			/* ugh realloc */
 915			kfree(ring->txhdr_cache);
 916			ring->txhdr_cache = kcalloc(nr_slots,
 917					sizeof(struct b43legacy_txhdr_fw3),
 918					GFP_KERNEL | GFP_DMA);
 919			if (!ring->txhdr_cache)
 920				goto err_kfree_meta;
 921
 922			dma_test = dma_map_single(dev->dev->dma_dev,
 923					ring->txhdr_cache,
 924					sizeof(struct b43legacy_txhdr_fw3),
 925					DMA_TO_DEVICE);
 926
 927			if (b43legacy_dma_mapping_error(ring, dma_test,
 928					sizeof(struct b43legacy_txhdr_fw3), 1))
 929				goto err_kfree_txhdr_cache;
 930		}
 931
 932		dma_unmap_single(dev->dev->dma_dev, dma_test,
 933				 sizeof(struct b43legacy_txhdr_fw3),
 934				 DMA_TO_DEVICE);
 935	}
 936
 937	ring->nr_slots = nr_slots;
 938	ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index);
 939	ring->index = controller_index;
 940	if (type == B43legacy_DMA_64BIT)
 941		ring->ops = &dma64_ops;
 942	else
 943		ring->ops = &dma32_ops;
 944	if (for_tx) {
 945		ring->tx = 1;
 946		ring->current_slot = -1;
 947	} else {
 948		if (ring->index == 0) {
 949			ring->rx_buffersize = B43legacy_DMA0_RX_BUFFERSIZE;
 950			ring->frameoffset = B43legacy_DMA0_RX_FRAMEOFFSET;
 951		} else if (ring->index == 3) {
 952			ring->rx_buffersize = B43legacy_DMA3_RX_BUFFERSIZE;
 953			ring->frameoffset = B43legacy_DMA3_RX_FRAMEOFFSET;
 954		} else
 955			B43legacy_WARN_ON(1);
 956	}
 957	spin_lock_init(&ring->lock);
 958#ifdef CONFIG_B43LEGACY_DEBUG
 959	ring->last_injected_overflow = jiffies;
 960#endif
 961
 962	err = alloc_ringmemory(ring);
 963	if (err)
 964		goto err_kfree_txhdr_cache;
 965	err = dmacontroller_setup(ring);
 966	if (err)
 967		goto err_free_ringmemory;
 968
 969out:
 970	return ring;
 971
 972err_free_ringmemory:
 973	free_ringmemory(ring);
 974err_kfree_txhdr_cache:
 975	kfree(ring->txhdr_cache);
 976err_kfree_meta:
 977	kfree(ring->meta);
 978err_kfree_ring:
 979	kfree(ring);
 980	ring = NULL;
 981	goto out;
 982}
 983
 984/* Main cleanup function. */
 985static void b43legacy_destroy_dmaring(struct b43legacy_dmaring *ring)
 986{
 987	if (!ring)
 988		return;
 989
 990	b43legacydbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots:"
 991		     " %d/%d\n", (unsigned int)(ring->type), ring->mmio_base,
 992		     (ring->tx) ? "TX" : "RX", ring->max_used_slots,
 993		     ring->nr_slots);
 994	/* Device IRQs are disabled prior entering this function,
 995	 * so no need to take care of concurrency with rx handler stuff.
 996	 */
 997	dmacontroller_cleanup(ring);
 998	free_all_descbuffers(ring);
 999	free_ringmemory(ring);
1000
1001	kfree(ring->txhdr_cache);
1002	kfree(ring->meta);
1003	kfree(ring);
1004}
1005
1006void b43legacy_dma_free(struct b43legacy_wldev *dev)
1007{
1008	struct b43legacy_dma *dma;
1009
1010	if (b43legacy_using_pio(dev))
1011		return;
1012	dma = &dev->dma;
1013
1014	b43legacy_destroy_dmaring(dma->rx_ring3);
1015	dma->rx_ring3 = NULL;
1016	b43legacy_destroy_dmaring(dma->rx_ring0);
1017	dma->rx_ring0 = NULL;
1018
1019	b43legacy_destroy_dmaring(dma->tx_ring5);
1020	dma->tx_ring5 = NULL;
1021	b43legacy_destroy_dmaring(dma->tx_ring4);
1022	dma->tx_ring4 = NULL;
1023	b43legacy_destroy_dmaring(dma->tx_ring3);
1024	dma->tx_ring3 = NULL;
1025	b43legacy_destroy_dmaring(dma->tx_ring2);
1026	dma->tx_ring2 = NULL;
1027	b43legacy_destroy_dmaring(dma->tx_ring1);
1028	dma->tx_ring1 = NULL;
1029	b43legacy_destroy_dmaring(dma->tx_ring0);
1030	dma->tx_ring0 = NULL;
1031}
1032
1033static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask)
1034{
1035	u64 orig_mask = mask;
1036	bool fallback = 0;
1037	int err;
1038
1039	/* Try to set the DMA mask. If it fails, try falling back to a
1040	 * lower mask, as we can always also support a lower one. */
1041	while (1) {
1042		err = dma_set_mask(dev->dev->dma_dev, mask);
1043		if (!err) {
1044			err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
1045			if (!err)
1046				break;
1047		}
1048		if (mask == DMA_BIT_MASK(64)) {
1049			mask = DMA_BIT_MASK(32);
1050			fallback = 1;
1051			continue;
1052		}
1053		if (mask == DMA_BIT_MASK(32)) {
1054			mask = DMA_BIT_MASK(30);
1055			fallback = 1;
1056			continue;
1057		}
1058		b43legacyerr(dev->wl, "The machine/kernel does not support "
1059		       "the required %u-bit DMA mask\n",
1060		       (unsigned int)dma_mask_to_engine_type(orig_mask));
1061		return -EOPNOTSUPP;
1062	}
1063	if (fallback) {
1064		b43legacyinfo(dev->wl, "DMA mask fallback from %u-bit to %u-"
1065			"bit\n",
1066			(unsigned int)dma_mask_to_engine_type(orig_mask),
1067			(unsigned int)dma_mask_to_engine_type(mask));
1068	}
1069
1070	return 0;
1071}
1072
1073int b43legacy_dma_init(struct b43legacy_wldev *dev)
1074{
1075	struct b43legacy_dma *dma = &dev->dma;
1076	struct b43legacy_dmaring *ring;
1077	int err;
1078	u64 dmamask;
1079	enum b43legacy_dmatype type;
1080
1081	dmamask = supported_dma_mask(dev);
1082	type = dma_mask_to_engine_type(dmamask);
1083	err = b43legacy_dma_set_mask(dev, dmamask);
1084	if (err) {
1085#ifdef CONFIG_B43LEGACY_PIO
1086		b43legacywarn(dev->wl, "DMA for this device not supported. "
1087			"Falling back to PIO\n");
1088		dev->__using_pio = 1;
1089		return -EAGAIN;
1090#else
1091		b43legacyerr(dev->wl, "DMA for this device not supported and "
1092		       "no PIO support compiled in\n");
1093		return -EOPNOTSUPP;
1094#endif
1095	}
1096	dma->translation = ssb_dma_translation(dev->dev);
1097
1098	err = -ENOMEM;
1099	/* setup TX DMA channels. */
1100	ring = b43legacy_setup_dmaring(dev, 0, 1, type);
1101	if (!ring)
1102		goto out;
1103	dma->tx_ring0 = ring;
1104
1105	ring = b43legacy_setup_dmaring(dev, 1, 1, type);
1106	if (!ring)
1107		goto err_destroy_tx0;
1108	dma->tx_ring1 = ring;
1109
1110	ring = b43legacy_setup_dmaring(dev, 2, 1, type);
1111	if (!ring)
1112		goto err_destroy_tx1;
1113	dma->tx_ring2 = ring;
1114
1115	ring = b43legacy_setup_dmaring(dev, 3, 1, type);
1116	if (!ring)
1117		goto err_destroy_tx2;
1118	dma->tx_ring3 = ring;
1119
1120	ring = b43legacy_setup_dmaring(dev, 4, 1, type);
1121	if (!ring)
1122		goto err_destroy_tx3;
1123	dma->tx_ring4 = ring;
1124
1125	ring = b43legacy_setup_dmaring(dev, 5, 1, type);
1126	if (!ring)
1127		goto err_destroy_tx4;
1128	dma->tx_ring5 = ring;
1129
1130	/* setup RX DMA channels. */
1131	ring = b43legacy_setup_dmaring(dev, 0, 0, type);
1132	if (!ring)
1133		goto err_destroy_tx5;
1134	dma->rx_ring0 = ring;
1135
1136	if (dev->dev->id.revision < 5) {
1137		ring = b43legacy_setup_dmaring(dev, 3, 0, type);
1138		if (!ring)
1139			goto err_destroy_rx0;
1140		dma->rx_ring3 = ring;
1141	}
1142
1143	b43legacydbg(dev->wl, "%u-bit DMA initialized\n", (unsigned int)type);
1144	err = 0;
1145out:
1146	return err;
1147
1148err_destroy_rx0:
1149	b43legacy_destroy_dmaring(dma->rx_ring0);
1150	dma->rx_ring0 = NULL;
1151err_destroy_tx5:
1152	b43legacy_destroy_dmaring(dma->tx_ring5);
1153	dma->tx_ring5 = NULL;
1154err_destroy_tx4:
1155	b43legacy_destroy_dmaring(dma->tx_ring4);
1156	dma->tx_ring4 = NULL;
1157err_destroy_tx3:
1158	b43legacy_destroy_dmaring(dma->tx_ring3);
1159	dma->tx_ring3 = NULL;
1160err_destroy_tx2:
1161	b43legacy_destroy_dmaring(dma->tx_ring2);
1162	dma->tx_ring2 = NULL;
1163err_destroy_tx1:
1164	b43legacy_destroy_dmaring(dma->tx_ring1);
1165	dma->tx_ring1 = NULL;
1166err_destroy_tx0:
1167	b43legacy_destroy_dmaring(dma->tx_ring0);
1168	dma->tx_ring0 = NULL;
1169	goto out;
1170}
1171
1172/* Generate a cookie for the TX header. */
1173static u16 generate_cookie(struct b43legacy_dmaring *ring,
1174			   int slot)
1175{
1176	u16 cookie = 0x1000;
1177
1178	/* Use the upper 4 bits of the cookie as
1179	 * DMA controller ID and store the slot number
1180	 * in the lower 12 bits.
1181	 * Note that the cookie must never be 0, as this
1182	 * is a special value used in RX path.
1183	 */
1184	switch (ring->index) {
1185	case 0:
1186		cookie = 0xA000;
1187		break;
1188	case 1:
1189		cookie = 0xB000;
1190		break;
1191	case 2:
1192		cookie = 0xC000;
1193		break;
1194	case 3:
1195		cookie = 0xD000;
1196		break;
1197	case 4:
1198		cookie = 0xE000;
1199		break;
1200	case 5:
1201		cookie = 0xF000;
1202		break;
1203	}
1204	B43legacy_WARN_ON(!(((u16)slot & 0xF000) == 0x0000));
1205	cookie |= (u16)slot;
1206
1207	return cookie;
1208}
1209
1210/* Inspect a cookie and find out to which controller/slot it belongs. */
1211static
1212struct b43legacy_dmaring *parse_cookie(struct b43legacy_wldev *dev,
1213				      u16 cookie, int *slot)
1214{
1215	struct b43legacy_dma *dma = &dev->dma;
1216	struct b43legacy_dmaring *ring = NULL;
1217
1218	switch (cookie & 0xF000) {
1219	case 0xA000:
1220		ring = dma->tx_ring0;
1221		break;
1222	case 0xB000:
1223		ring = dma->tx_ring1;
1224		break;
1225	case 0xC000:
1226		ring = dma->tx_ring2;
1227		break;
1228	case 0xD000:
1229		ring = dma->tx_ring3;
1230		break;
1231	case 0xE000:
1232		ring = dma->tx_ring4;
1233		break;
1234	case 0xF000:
1235		ring = dma->tx_ring5;
1236		break;
1237	default:
1238		B43legacy_WARN_ON(1);
1239	}
1240	*slot = (cookie & 0x0FFF);
1241	B43legacy_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots));
1242
1243	return ring;
1244}
1245
1246static int dma_tx_fragment(struct b43legacy_dmaring *ring,
1247			    struct sk_buff **in_skb)
1248{
1249	struct sk_buff *skb = *in_skb;
1250	const struct b43legacy_dma_ops *ops = ring->ops;
1251	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1252	u8 *header;
1253	int slot, old_top_slot, old_used_slots;
1254	int err;
1255	struct b43legacy_dmadesc_generic *desc;
1256	struct b43legacy_dmadesc_meta *meta;
1257	struct b43legacy_dmadesc_meta *meta_hdr;
1258	struct sk_buff *bounce_skb;
1259
1260#define SLOTS_PER_PACKET  2
1261	B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0);
1262
1263	old_top_slot = ring->current_slot;
1264	old_used_slots = ring->used_slots;
1265
1266	/* Get a slot for the header. */
1267	slot = request_slot(ring);
1268	desc = ops->idx2desc(ring, slot, &meta_hdr);
1269	memset(meta_hdr, 0, sizeof(*meta_hdr));
1270
1271	header = &(ring->txhdr_cache[slot * sizeof(
1272			       struct b43legacy_txhdr_fw3)]);
1273	err = b43legacy_generate_txhdr(ring->dev, header,
1274				 skb->data, skb->len, info,
1275				 generate_cookie(ring, slot));
1276	if (unlikely(err)) {
1277		ring->current_slot = old_top_slot;
1278		ring->used_slots = old_used_slots;
1279		return err;
1280	}
1281
1282	meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
1283					   sizeof(struct b43legacy_txhdr_fw3), 1);
1284	if (b43legacy_dma_mapping_error(ring, meta_hdr->dmaaddr,
1285					sizeof(struct b43legacy_txhdr_fw3), 1)) {
1286		ring->current_slot = old_top_slot;
1287		ring->used_slots = old_used_slots;
1288		return -EIO;
1289	}
1290	ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
1291			     sizeof(struct b43legacy_txhdr_fw3), 1, 0, 0);
1292
1293	/* Get a slot for the payload. */
1294	slot = request_slot(ring);
1295	desc = ops->idx2desc(ring, slot, &meta);
1296	memset(meta, 0, sizeof(*meta));
1297
1298	meta->skb = skb;
1299	meta->is_last_fragment = 1;
1300
1301	meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1302	/* create a bounce buffer in zone_dma on mapping failure. */
1303	if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1304		bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
1305		if (!bounce_skb) {
1306			ring->current_slot = old_top_slot;
1307			ring->used_slots = old_used_slots;
1308			err = -ENOMEM;
1309			goto out_unmap_hdr;
1310		}
1311
1312		memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
1313		memcpy(bounce_skb->cb, skb->cb, sizeof(skb->cb));
1314		bounce_skb->dev = skb->dev;
1315		skb_set_queue_mapping(bounce_skb, skb_get_queue_mapping(skb));
1316		info = IEEE80211_SKB_CB(bounce_skb);
1317
1318		dev_kfree_skb_any(skb);
1319		skb = bounce_skb;
1320		*in_skb = bounce_skb;
1321		meta->skb = skb;
1322		meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1323		if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1324			ring->current_slot = old_top_slot;
1325			ring->used_slots = old_used_slots;
1326			err = -EIO;
1327			goto out_free_bounce;
1328		}
1329	}
1330
1331	ops->fill_descriptor(ring, desc, meta->dmaaddr,
1332			     skb->len, 0, 1, 1);
1333
1334	wmb();	/* previous stuff MUST be done */
1335	/* Now transfer the whole frame. */
1336	ops->poke_tx(ring, next_slot(ring, slot));
1337	return 0;
1338
1339out_free_bounce:
1340	dev_kfree_skb_any(skb);
1341out_unmap_hdr:
1342	unmap_descbuffer(ring, meta_hdr->dmaaddr,
1343			 sizeof(struct b43legacy_txhdr_fw3), 1);
1344	return err;
1345}
1346
1347static inline
1348int should_inject_overflow(struct b43legacy_dmaring *ring)
1349{
1350#ifdef CONFIG_B43LEGACY_DEBUG
1351	if (unlikely(b43legacy_debug(ring->dev,
1352				     B43legacy_DBG_DMAOVERFLOW))) {
1353		/* Check if we should inject another ringbuffer overflow
1354		 * to test handling of this situation in the stack. */
1355		unsigned long next_overflow;
1356
1357		next_overflow = ring->last_injected_overflow + HZ;
1358		if (time_after(jiffies, next_overflow)) {
1359			ring->last_injected_overflow = jiffies;
1360			b43legacydbg(ring->dev->wl,
1361			       "Injecting TX ring overflow on "
1362			       "DMA controller %d\n", ring->index);
1363			return 1;
1364		}
1365	}
1366#endif /* CONFIG_B43LEGACY_DEBUG */
1367	return 0;
1368}
1369
1370int b43legacy_dma_tx(struct b43legacy_wldev *dev,
1371		     struct sk_buff *skb)
1372{
1373	struct b43legacy_dmaring *ring;
1374	int err = 0;
1375	unsigned long flags;
1376
1377	ring = priority_to_txring(dev, skb_get_queue_mapping(skb));
1378	spin_lock_irqsave(&ring->lock, flags);
1379	B43legacy_WARN_ON(!ring->tx);
1380
1381	if (unlikely(ring->stopped)) {
1382		/* We get here only because of a bug in mac80211.
1383		 * Because of a race, one packet may be queued after
1384		 * the queue is stopped, thus we got called when we shouldn't.
1385		 * For now, just refuse the transmit. */
1386		if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1387			b43legacyerr(dev->wl, "Packet after queue stopped\n");
1388		err = -ENOSPC;
1389		goto out_unlock;
1390	}
1391
1392	if (unlikely(WARN_ON(free_slots(ring) < SLOTS_PER_PACKET))) {
1393		/* If we get here, we have a real error with the queue
1394		 * full, but queues not stopped. */
1395		b43legacyerr(dev->wl, "DMA queue overflow\n");
1396		err = -ENOSPC;
1397		goto out_unlock;
1398	}
1399
1400	/* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing
1401	 * into the skb data or cb now. */
1402	err = dma_tx_fragment(ring, &skb);
1403	if (unlikely(err == -ENOKEY)) {
1404		/* Drop this packet, as we don't have the encryption key
1405		 * anymore and must not transmit it unencrypted. */
1406		dev_kfree_skb_any(skb);
1407		err = 0;
1408		goto out_unlock;
1409	}
1410	if (unlikely(err)) {
1411		b43legacyerr(dev->wl, "DMA tx mapping failure\n");
1412		goto out_unlock;
1413	}
1414	if ((free_slots(ring) < SLOTS_PER_PACKET) ||
1415	    should_inject_overflow(ring)) {
1416		/* This TX ring is full. */
1417		ieee80211_stop_queue(dev->wl->hw, txring_to_priority(ring));
1418		ring->stopped = 1;
 
 
1419		if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1420			b43legacydbg(dev->wl, "Stopped TX ring %d\n",
1421			       ring->index);
1422	}
1423out_unlock:
1424	spin_unlock_irqrestore(&ring->lock, flags);
1425
1426	return err;
1427}
1428
1429void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
1430				 const struct b43legacy_txstatus *status)
1431{
1432	const struct b43legacy_dma_ops *ops;
1433	struct b43legacy_dmaring *ring;
1434	struct b43legacy_dmadesc_meta *meta;
1435	int retry_limit;
1436	int slot;
 
1437
1438	ring = parse_cookie(dev, status->cookie, &slot);
1439	if (unlikely(!ring))
1440		return;
1441	B43legacy_WARN_ON(!irqs_disabled());
1442	spin_lock(&ring->lock);
1443
1444	B43legacy_WARN_ON(!ring->tx);
1445	ops = ring->ops;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1446	while (1) {
1447		B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
1448		ops->idx2desc(ring, slot, &meta);
1449
1450		if (meta->skb)
1451			unmap_descbuffer(ring, meta->dmaaddr,
1452					 meta->skb->len, 1);
1453		else
1454			unmap_descbuffer(ring, meta->dmaaddr,
1455					 sizeof(struct b43legacy_txhdr_fw3),
1456					 1);
1457
1458		if (meta->is_last_fragment) {
1459			struct ieee80211_tx_info *info;
1460			BUG_ON(!meta->skb);
1461			info = IEEE80211_SKB_CB(meta->skb);
1462
1463			/* preserve the confiured retry limit before clearing the status
1464			 * The xmit function has overwritten the rc's value with the actual
1465			 * retry limit done by the hardware */
1466			retry_limit = info->status.rates[0].count;
1467			ieee80211_tx_info_clear_status(info);
1468
1469			if (status->acked)
1470				info->flags |= IEEE80211_TX_STAT_ACK;
1471
1472			if (status->rts_count > dev->wl->hw->conf.short_frame_max_tx_count) {
1473				/*
1474				 * If the short retries (RTS, not data frame) have exceeded
1475				 * the limit, the hw will not have tried the selected rate,
1476				 * but will have used the fallback rate instead.
1477				 * Don't let the rate control count attempts for the selected
1478				 * rate in this case, otherwise the statistics will be off.
1479				 */
1480				info->status.rates[0].count = 0;
1481				info->status.rates[1].count = status->frame_count;
1482			} else {
1483				if (status->frame_count > retry_limit) {
1484					info->status.rates[0].count = retry_limit;
1485					info->status.rates[1].count = status->frame_count -
1486							retry_limit;
1487
1488				} else {
1489					info->status.rates[0].count = status->frame_count;
1490					info->status.rates[1].idx = -1;
1491				}
1492			}
1493
1494			/* Call back to inform the ieee80211 subsystem about the
1495			 * status of the transmission.
1496			 * Some fields of txstat are already filled in dma_tx().
1497			 */
1498			ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb);
1499			/* skb is freed by ieee80211_tx_status_irqsafe() */
1500			meta->skb = NULL;
1501		} else {
1502			/* No need to call free_descriptor_buffer here, as
1503			 * this is only the txhdr, which is not allocated.
1504			 */
1505			B43legacy_WARN_ON(meta->skb != NULL);
1506		}
1507
1508		/* Everything unmapped and free'd. So it's not used anymore. */
1509		ring->used_slots--;
1510
1511		if (meta->is_last_fragment)
1512			break;
1513		slot = next_slot(ring, slot);
1514	}
1515	dev->stats.last_tx = jiffies;
1516	if (ring->stopped) {
1517		B43legacy_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET);
1518		ieee80211_wake_queue(dev->wl->hw, txring_to_priority(ring));
1519		ring->stopped = 0;
 
 
 
 
 
 
 
1520		if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1521			b43legacydbg(dev->wl, "Woke up TX ring %d\n",
1522			       ring->index);
1523	}
1524
1525	spin_unlock(&ring->lock);
1526}
1527
1528static void dma_rx(struct b43legacy_dmaring *ring,
1529		   int *slot)
1530{
1531	const struct b43legacy_dma_ops *ops = ring->ops;
1532	struct b43legacy_dmadesc_generic *desc;
1533	struct b43legacy_dmadesc_meta *meta;
1534	struct b43legacy_rxhdr_fw3 *rxhdr;
1535	struct sk_buff *skb;
1536	u16 len;
1537	int err;
1538	dma_addr_t dmaaddr;
1539
1540	desc = ops->idx2desc(ring, *slot, &meta);
1541
1542	sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1543	skb = meta->skb;
1544
1545	if (ring->index == 3) {
1546		/* We received an xmit status. */
1547		struct b43legacy_hwtxstatus *hw =
1548				(struct b43legacy_hwtxstatus *)skb->data;
1549		int i = 0;
1550
1551		while (hw->cookie == 0) {
1552			if (i > 100)
1553				break;
1554			i++;
1555			udelay(2);
1556			barrier();
1557		}
1558		b43legacy_handle_hwtxstatus(ring->dev, hw);
1559		/* recycle the descriptor buffer. */
1560		sync_descbuffer_for_device(ring, meta->dmaaddr,
1561					   ring->rx_buffersize);
1562
1563		return;
1564	}
1565	rxhdr = (struct b43legacy_rxhdr_fw3 *)skb->data;
1566	len = le16_to_cpu(rxhdr->frame_len);
1567	if (len == 0) {
1568		int i = 0;
1569
1570		do {
1571			udelay(2);
1572			barrier();
1573			len = le16_to_cpu(rxhdr->frame_len);
1574		} while (len == 0 && i++ < 5);
1575		if (unlikely(len == 0)) {
1576			/* recycle the descriptor buffer. */
1577			sync_descbuffer_for_device(ring, meta->dmaaddr,
1578						   ring->rx_buffersize);
1579			goto drop;
1580		}
1581	}
1582	if (unlikely(len > ring->rx_buffersize)) {
1583		/* The data did not fit into one descriptor buffer
1584		 * and is split over multiple buffers.
1585		 * This should never happen, as we try to allocate buffers
1586		 * big enough. So simply ignore this packet.
1587		 */
1588		int cnt = 0;
1589		s32 tmp = len;
1590
1591		while (1) {
1592			desc = ops->idx2desc(ring, *slot, &meta);
1593			/* recycle the descriptor buffer. */
1594			sync_descbuffer_for_device(ring, meta->dmaaddr,
1595						   ring->rx_buffersize);
1596			*slot = next_slot(ring, *slot);
1597			cnt++;
1598			tmp -= ring->rx_buffersize;
1599			if (tmp <= 0)
1600				break;
1601		}
1602		b43legacyerr(ring->dev->wl, "DMA RX buffer too small "
1603		       "(len: %u, buffer: %u, nr-dropped: %d)\n",
1604		       len, ring->rx_buffersize, cnt);
1605		goto drop;
1606	}
1607
1608	dmaaddr = meta->dmaaddr;
1609	err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1610	if (unlikely(err)) {
1611		b43legacydbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer()"
1612			     " failed\n");
1613		sync_descbuffer_for_device(ring, dmaaddr,
1614					   ring->rx_buffersize);
1615		goto drop;
1616	}
1617
1618	unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1619	skb_put(skb, len + ring->frameoffset);
1620	skb_pull(skb, ring->frameoffset);
1621
1622	b43legacy_rx(ring->dev, skb, rxhdr);
1623drop:
1624	return;
1625}
1626
1627void b43legacy_dma_rx(struct b43legacy_dmaring *ring)
1628{
1629	const struct b43legacy_dma_ops *ops = ring->ops;
1630	int slot;
1631	int current_slot;
1632	int used_slots = 0;
1633
1634	B43legacy_WARN_ON(ring->tx);
1635	current_slot = ops->get_current_rxslot(ring);
1636	B43legacy_WARN_ON(!(current_slot >= 0 && current_slot <
1637			   ring->nr_slots));
1638
1639	slot = ring->current_slot;
1640	for (; slot != current_slot; slot = next_slot(ring, slot)) {
1641		dma_rx(ring, &slot);
1642		update_max_used_slots(ring, ++used_slots);
1643	}
1644	ops->set_current_rxslot(ring, slot);
1645	ring->current_slot = slot;
1646}
1647
1648static void b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring *ring)
1649{
1650	unsigned long flags;
1651
1652	spin_lock_irqsave(&ring->lock, flags);
1653	B43legacy_WARN_ON(!ring->tx);
1654	ring->ops->tx_suspend(ring);
1655	spin_unlock_irqrestore(&ring->lock, flags);
1656}
1657
1658static void b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring *ring)
1659{
1660	unsigned long flags;
1661
1662	spin_lock_irqsave(&ring->lock, flags);
1663	B43legacy_WARN_ON(!ring->tx);
1664	ring->ops->tx_resume(ring);
1665	spin_unlock_irqrestore(&ring->lock, flags);
1666}
1667
1668void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev)
1669{
1670	b43legacy_power_saving_ctl_bits(dev, -1, 1);
1671	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring0);
1672	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring1);
1673	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring2);
1674	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring3);
1675	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring4);
1676	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring5);
1677}
1678
1679void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev)
1680{
1681	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring5);
1682	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring4);
1683	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring3);
1684	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring2);
1685	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring1);
1686	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring0);
1687	b43legacy_power_saving_ctl_bits(dev, -1, -1);
1688}
v3.5.6
   1/*
   2
   3  Broadcom B43legacy wireless driver
   4
   5  DMA ringbuffer and descriptor allocation/management
   6
   7  Copyright (c) 2005, 2006 Michael Buesch <m@bues.ch>
   8
   9  Some code in this file is derived from the b44.c driver
  10  Copyright (C) 2002 David S. Miller
  11  Copyright (C) Pekka Pietikainen
  12
  13  This program is free software; you can redistribute it and/or modify
  14  it under the terms of the GNU General Public License as published by
  15  the Free Software Foundation; either version 2 of the License, or
  16  (at your option) any later version.
  17
  18  This program is distributed in the hope that it will be useful,
  19  but WITHOUT ANY WARRANTY; without even the implied warranty of
  20  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  21  GNU General Public License for more details.
  22
  23  You should have received a copy of the GNU General Public License
  24  along with this program; see the file COPYING.  If not, write to
  25  the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
  26  Boston, MA 02110-1301, USA.
  27
  28*/
  29
  30#include "b43legacy.h"
  31#include "dma.h"
  32#include "main.h"
  33#include "debugfs.h"
  34#include "xmit.h"
  35
  36#include <linux/dma-mapping.h>
  37#include <linux/pci.h>
  38#include <linux/delay.h>
  39#include <linux/skbuff.h>
  40#include <linux/slab.h>
  41#include <net/dst.h>
  42
  43/* 32bit DMA ops. */
  44static
  45struct b43legacy_dmadesc32 *op32_idx2desc(struct b43legacy_dmaring *ring,
  46					  int slot,
  47					  struct b43legacy_dmadesc_meta **meta)
 
  48{
  49	struct b43legacy_dmadesc32 *desc;
  50
  51	*meta = &(ring->meta[slot]);
  52	desc = ring->descbase;
  53	desc = &(desc[slot]);
  54
  55	return (struct b43legacy_dmadesc32 *)desc;
  56}
  57
  58static void op32_fill_descriptor(struct b43legacy_dmaring *ring,
  59				 struct b43legacy_dmadesc32 *desc,
  60				 dma_addr_t dmaaddr, u16 bufsize,
  61				 int start, int end, int irq)
  62{
  63	struct b43legacy_dmadesc32 *descbase = ring->descbase;
  64	int slot;
  65	u32 ctl;
  66	u32 addr;
  67	u32 addrext;
  68
  69	slot = (int)(desc - descbase);
  70	B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
  71
  72	addr = (u32)(dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
  73	addrext = (u32)(dmaaddr & SSB_DMA_TRANSLATION_MASK)
  74		   >> SSB_DMA_TRANSLATION_SHIFT;
  75	addr |= ring->dev->dma.translation;
  76	ctl = (bufsize - ring->frameoffset)
  77	      & B43legacy_DMA32_DCTL_BYTECNT;
  78	if (slot == ring->nr_slots - 1)
  79		ctl |= B43legacy_DMA32_DCTL_DTABLEEND;
  80	if (start)
  81		ctl |= B43legacy_DMA32_DCTL_FRAMESTART;
  82	if (end)
  83		ctl |= B43legacy_DMA32_DCTL_FRAMEEND;
  84	if (irq)
  85		ctl |= B43legacy_DMA32_DCTL_IRQ;
  86	ctl |= (addrext << B43legacy_DMA32_DCTL_ADDREXT_SHIFT)
  87	       & B43legacy_DMA32_DCTL_ADDREXT_MASK;
  88
  89	desc->control = cpu_to_le32(ctl);
  90	desc->address = cpu_to_le32(addr);
  91}
  92
  93static void op32_poke_tx(struct b43legacy_dmaring *ring, int slot)
  94{
  95	b43legacy_dma_write(ring, B43legacy_DMA32_TXINDEX,
  96			    (u32)(slot * sizeof(struct b43legacy_dmadesc32)));
  97}
  98
  99static void op32_tx_suspend(struct b43legacy_dmaring *ring)
 100{
 101	b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
 102			    b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL)
 103			    | B43legacy_DMA32_TXSUSPEND);
 104}
 105
 106static void op32_tx_resume(struct b43legacy_dmaring *ring)
 107{
 108	b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
 109			    b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL)
 110			    & ~B43legacy_DMA32_TXSUSPEND);
 111}
 112
 113static int op32_get_current_rxslot(struct b43legacy_dmaring *ring)
 114{
 115	u32 val;
 116
 117	val = b43legacy_dma_read(ring, B43legacy_DMA32_RXSTATUS);
 118	val &= B43legacy_DMA32_RXDPTR;
 119
 120	return (val / sizeof(struct b43legacy_dmadesc32));
 121}
 122
 123static void op32_set_current_rxslot(struct b43legacy_dmaring *ring,
 124				    int slot)
 125{
 126	b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX,
 127			    (u32)(slot * sizeof(struct b43legacy_dmadesc32)));
 128}
 129
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 130static inline int free_slots(struct b43legacy_dmaring *ring)
 131{
 132	return (ring->nr_slots - ring->used_slots);
 133}
 134
 135static inline int next_slot(struct b43legacy_dmaring *ring, int slot)
 136{
 137	B43legacy_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
 138	if (slot == ring->nr_slots - 1)
 139		return 0;
 140	return slot + 1;
 141}
 142
 143static inline int prev_slot(struct b43legacy_dmaring *ring, int slot)
 144{
 145	B43legacy_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
 146	if (slot == 0)
 147		return ring->nr_slots - 1;
 148	return slot - 1;
 149}
 150
 151#ifdef CONFIG_B43LEGACY_DEBUG
 152static void update_max_used_slots(struct b43legacy_dmaring *ring,
 153				  int current_used_slots)
 154{
 155	if (current_used_slots <= ring->max_used_slots)
 156		return;
 157	ring->max_used_slots = current_used_slots;
 158	if (b43legacy_debug(ring->dev, B43legacy_DBG_DMAVERBOSE))
 159		b43legacydbg(ring->dev->wl,
 160		       "max_used_slots increased to %d on %s ring %d\n",
 161		       ring->max_used_slots,
 162		       ring->tx ? "TX" : "RX",
 163		       ring->index);
 164}
 165#else
 166static inline
 167void update_max_used_slots(struct b43legacy_dmaring *ring,
 168			   int current_used_slots)
 169{ }
 170#endif /* DEBUG */
 171
 172/* Request a slot for usage. */
 173static inline
 174int request_slot(struct b43legacy_dmaring *ring)
 175{
 176	int slot;
 177
 178	B43legacy_WARN_ON(!ring->tx);
 179	B43legacy_WARN_ON(ring->stopped);
 180	B43legacy_WARN_ON(free_slots(ring) == 0);
 181
 182	slot = next_slot(ring, ring->current_slot);
 183	ring->current_slot = slot;
 184	ring->used_slots++;
 185
 186	update_max_used_slots(ring, ring->used_slots);
 187
 188	return slot;
 189}
 190
 191/* Mac80211-queue to b43legacy-ring mapping */
 192static struct b43legacy_dmaring *priority_to_txring(
 193						struct b43legacy_wldev *dev,
 194						int queue_priority)
 195{
 196	struct b43legacy_dmaring *ring;
 197
 198/*FIXME: For now we always run on TX-ring-1 */
 199return dev->dma.tx_ring1;
 200
 201	/* 0 = highest priority */
 202	switch (queue_priority) {
 203	default:
 204		B43legacy_WARN_ON(1);
 205		/* fallthrough */
 206	case 0:
 207		ring = dev->dma.tx_ring3;
 208		break;
 209	case 1:
 210		ring = dev->dma.tx_ring2;
 211		break;
 212	case 2:
 213		ring = dev->dma.tx_ring1;
 214		break;
 215	case 3:
 216		ring = dev->dma.tx_ring0;
 217		break;
 218	case 4:
 219		ring = dev->dma.tx_ring4;
 220		break;
 221	case 5:
 222		ring = dev->dma.tx_ring5;
 223		break;
 224	}
 225
 226	return ring;
 227}
 228
 229/* Bcm4301-ring to mac80211-queue mapping */
 230static inline int txring_to_priority(struct b43legacy_dmaring *ring)
 231{
 232	static const u8 idx_to_prio[] =
 233		{ 3, 2, 1, 0, 4, 5, };
 234
 235/*FIXME: have only one queue, for now */
 236return 0;
 237
 238	return idx_to_prio[ring->index];
 239}
 240
 241
 242static u16 b43legacy_dmacontroller_base(enum b43legacy_dmatype type,
 243					int controller_idx)
 244{
 
 
 
 
 
 
 
 
 245	static const u16 map32[] = {
 246		B43legacy_MMIO_DMA32_BASE0,
 247		B43legacy_MMIO_DMA32_BASE1,
 248		B43legacy_MMIO_DMA32_BASE2,
 249		B43legacy_MMIO_DMA32_BASE3,
 250		B43legacy_MMIO_DMA32_BASE4,
 251		B43legacy_MMIO_DMA32_BASE5,
 252	};
 253
 
 
 
 
 
 254	B43legacy_WARN_ON(!(controller_idx >= 0 &&
 255			  controller_idx < ARRAY_SIZE(map32)));
 256	return map32[controller_idx];
 257}
 258
 259static inline
 260dma_addr_t map_descbuffer(struct b43legacy_dmaring *ring,
 261			  unsigned char *buf,
 262			  size_t len,
 263			  int tx)
 264{
 265	dma_addr_t dmaaddr;
 266
 267	if (tx)
 268		dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
 269					     buf, len,
 270					     DMA_TO_DEVICE);
 271	else
 272		dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
 273					     buf, len,
 274					     DMA_FROM_DEVICE);
 275
 276	return dmaaddr;
 277}
 278
 279static inline
 280void unmap_descbuffer(struct b43legacy_dmaring *ring,
 281		      dma_addr_t addr,
 282		      size_t len,
 283		      int tx)
 284{
 285	if (tx)
 286		dma_unmap_single(ring->dev->dev->dma_dev,
 287				     addr, len,
 288				     DMA_TO_DEVICE);
 289	else
 290		dma_unmap_single(ring->dev->dev->dma_dev,
 291				     addr, len,
 292				     DMA_FROM_DEVICE);
 293}
 294
 295static inline
 296void sync_descbuffer_for_cpu(struct b43legacy_dmaring *ring,
 297			     dma_addr_t addr,
 298			     size_t len)
 299{
 300	B43legacy_WARN_ON(ring->tx);
 301
 302	dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
 303				addr, len, DMA_FROM_DEVICE);
 304}
 305
 306static inline
 307void sync_descbuffer_for_device(struct b43legacy_dmaring *ring,
 308				dma_addr_t addr,
 309				size_t len)
 310{
 311	B43legacy_WARN_ON(ring->tx);
 312
 313	dma_sync_single_for_device(ring->dev->dev->dma_dev,
 314				   addr, len, DMA_FROM_DEVICE);
 315}
 316
 317static inline
 318void free_descriptor_buffer(struct b43legacy_dmaring *ring,
 319			    struct b43legacy_dmadesc_meta *meta,
 320			    int irq_context)
 321{
 322	if (meta->skb) {
 323		if (irq_context)
 324			dev_kfree_skb_irq(meta->skb);
 325		else
 326			dev_kfree_skb(meta->skb);
 327		meta->skb = NULL;
 328	}
 329}
 330
 331static int alloc_ringmemory(struct b43legacy_dmaring *ring)
 332{
 333	/* GFP flags must match the flags in free_ringmemory()! */
 334	ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
 335					    B43legacy_DMA_RINGMEMSIZE,
 336					    &(ring->dmabase),
 337					    GFP_KERNEL);
 338	if (!ring->descbase) {
 339		b43legacyerr(ring->dev->wl, "DMA ringmemory allocation"
 340			     " failed\n");
 341		return -ENOMEM;
 342	}
 343	memset(ring->descbase, 0, B43legacy_DMA_RINGMEMSIZE);
 344
 345	return 0;
 346}
 347
 348static void free_ringmemory(struct b43legacy_dmaring *ring)
 349{
 350	dma_free_coherent(ring->dev->dev->dma_dev, B43legacy_DMA_RINGMEMSIZE,
 351			  ring->descbase, ring->dmabase);
 352}
 353
 354/* Reset the RX DMA channel */
 355static int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev,
 356					    u16 mmio_base,
 357					    enum b43legacy_dmatype type)
 358{
 359	int i;
 360	u32 value;
 361	u16 offset;
 362
 363	might_sleep();
 364
 365	offset = B43legacy_DMA32_RXCTL;
 
 366	b43legacy_write32(dev, mmio_base + offset, 0);
 367	for (i = 0; i < 10; i++) {
 368		offset = B43legacy_DMA32_RXSTATUS;
 
 369		value = b43legacy_read32(dev, mmio_base + offset);
 370		value &= B43legacy_DMA32_RXSTATE;
 371		if (value == B43legacy_DMA32_RXSTAT_DISABLED) {
 372			i = -1;
 373			break;
 
 
 
 
 
 
 
 
 374		}
 375		msleep(1);
 376	}
 377	if (i != -1) {
 378		b43legacyerr(dev->wl, "DMA RX reset timed out\n");
 379		return -ENODEV;
 380	}
 381
 382	return 0;
 383}
 384
 385/* Reset the RX DMA channel */
 386static int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev,
 387					    u16 mmio_base,
 388					    enum b43legacy_dmatype type)
 389{
 390	int i;
 391	u32 value;
 392	u16 offset;
 393
 394	might_sleep();
 395
 396	for (i = 0; i < 10; i++) {
 397		offset = B43legacy_DMA32_TXSTATUS;
 
 398		value = b43legacy_read32(dev, mmio_base + offset);
 399		value &= B43legacy_DMA32_TXSTATE;
 400		if (value == B43legacy_DMA32_TXSTAT_DISABLED ||
 401		    value == B43legacy_DMA32_TXSTAT_IDLEWAIT ||
 402		    value == B43legacy_DMA32_TXSTAT_STOPPED)
 403			break;
 
 
 
 
 
 
 
 
 404		msleep(1);
 405	}
 406	offset = B43legacy_DMA32_TXCTL;
 
 407	b43legacy_write32(dev, mmio_base + offset, 0);
 408	for (i = 0; i < 10; i++) {
 409		offset = B43legacy_DMA32_TXSTATUS;
 
 410		value = b43legacy_read32(dev, mmio_base + offset);
 411		value &= B43legacy_DMA32_TXSTATE;
 412		if (value == B43legacy_DMA32_TXSTAT_DISABLED) {
 413			i = -1;
 414			break;
 
 
 
 
 
 
 
 
 415		}
 416		msleep(1);
 417	}
 418	if (i != -1) {
 419		b43legacyerr(dev->wl, "DMA TX reset timed out\n");
 420		return -ENODEV;
 421	}
 422	/* ensure the reset is completed. */
 423	msleep(1);
 424
 425	return 0;
 426}
 427
 428/* Check if a DMA mapping address is invalid. */
 429static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring,
 430					 dma_addr_t addr,
 431					 size_t buffersize,
 432					 bool dma_to_device)
 433{
 434	if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
 435		return 1;
 436
 437	switch (ring->type) {
 438	case B43legacy_DMA_30BIT:
 439		if ((u64)addr + buffersize > (1ULL << 30))
 440			goto address_error;
 441		break;
 442	case B43legacy_DMA_32BIT:
 443		if ((u64)addr + buffersize > (1ULL << 32))
 444			goto address_error;
 445		break;
 
 
 
 446	}
 447
 448	/* The address is OK. */
 449	return 0;
 450
 451address_error:
 452	/* We can't support this address. Unmap it again. */
 453	unmap_descbuffer(ring, addr, buffersize, dma_to_device);
 454
 455	return 1;
 456}
 457
 458static int setup_rx_descbuffer(struct b43legacy_dmaring *ring,
 459			       struct b43legacy_dmadesc32 *desc,
 460			       struct b43legacy_dmadesc_meta *meta,
 461			       gfp_t gfp_flags)
 462{
 463	struct b43legacy_rxhdr_fw3 *rxhdr;
 464	struct b43legacy_hwtxstatus *txstat;
 465	dma_addr_t dmaaddr;
 466	struct sk_buff *skb;
 467
 468	B43legacy_WARN_ON(ring->tx);
 469
 470	skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
 471	if (unlikely(!skb))
 472		return -ENOMEM;
 473	dmaaddr = map_descbuffer(ring, skb->data,
 474				 ring->rx_buffersize, 0);
 475	if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
 476		/* ugh. try to realloc in zone_dma */
 477		gfp_flags |= GFP_DMA;
 478
 479		dev_kfree_skb_any(skb);
 480
 481		skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
 482		if (unlikely(!skb))
 483			return -ENOMEM;
 484		dmaaddr = map_descbuffer(ring, skb->data,
 485					 ring->rx_buffersize, 0);
 486	}
 487
 488	if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
 489		dev_kfree_skb_any(skb);
 490		return -EIO;
 491	}
 492
 493	meta->skb = skb;
 494	meta->dmaaddr = dmaaddr;
 495	op32_fill_descriptor(ring, desc, dmaaddr, ring->rx_buffersize, 0, 0, 0);
 
 496
 497	rxhdr = (struct b43legacy_rxhdr_fw3 *)(skb->data);
 498	rxhdr->frame_len = 0;
 499	txstat = (struct b43legacy_hwtxstatus *)(skb->data);
 500	txstat->cookie = 0;
 501
 502	return 0;
 503}
 504
 505/* Allocate the initial descbuffers.
 506 * This is used for an RX ring only.
 507 */
 508static int alloc_initial_descbuffers(struct b43legacy_dmaring *ring)
 509{
 510	int i;
 511	int err = -ENOMEM;
 512	struct b43legacy_dmadesc32 *desc;
 513	struct b43legacy_dmadesc_meta *meta;
 514
 515	for (i = 0; i < ring->nr_slots; i++) {
 516		desc = op32_idx2desc(ring, i, &meta);
 517
 518		err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
 519		if (err) {
 520			b43legacyerr(ring->dev->wl,
 521			       "Failed to allocate initial descbuffers\n");
 522			goto err_unwind;
 523		}
 524	}
 525	mb(); /* all descbuffer setup before next line */
 526	ring->used_slots = ring->nr_slots;
 527	err = 0;
 528out:
 529	return err;
 530
 531err_unwind:
 532	for (i--; i >= 0; i--) {
 533		desc = op32_idx2desc(ring, i, &meta);
 534
 535		unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
 536		dev_kfree_skb(meta->skb);
 537	}
 538	goto out;
 539}
 540
 541/* Do initial setup of the DMA controller.
 542 * Reset the controller, write the ring busaddress
 543 * and switch the "enable" bit on.
 544 */
 545static int dmacontroller_setup(struct b43legacy_dmaring *ring)
 546{
 547	int err = 0;
 548	u32 value;
 549	u32 addrext;
 550	u32 trans = ring->dev->dma.translation;
 551	u32 ringbase = (u32)(ring->dmabase);
 552
 553	if (ring->tx) {
 554		addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
 555			  >> SSB_DMA_TRANSLATION_SHIFT;
 556		value = B43legacy_DMA32_TXENABLE;
 557		value |= (addrext << B43legacy_DMA32_TXADDREXT_SHIFT)
 558			& B43legacy_DMA32_TXADDREXT_MASK;
 559		b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, value);
 560		b43legacy_dma_write(ring, B43legacy_DMA32_TXRING,
 561				    (ringbase & ~SSB_DMA_TRANSLATION_MASK)
 562				    | trans);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 563	} else {
 564		err = alloc_initial_descbuffers(ring);
 565		if (err)
 566			goto out;
 
 
 567
 568		addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
 569			  >> SSB_DMA_TRANSLATION_SHIFT;
 570		value = (ring->frameoffset <<
 571			 B43legacy_DMA32_RXFROFF_SHIFT);
 572		value |= B43legacy_DMA32_RXENABLE;
 573		value |= (addrext << B43legacy_DMA32_RXADDREXT_SHIFT)
 574			 & B43legacy_DMA32_RXADDREXT_MASK;
 575		b43legacy_dma_write(ring, B43legacy_DMA32_RXCTL, value);
 576		b43legacy_dma_write(ring, B43legacy_DMA32_RXRING,
 577				    (ringbase & ~SSB_DMA_TRANSLATION_MASK)
 578				    | trans);
 579		b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX, 200);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 580	}
 581
 582out:
 583	return err;
 584}
 585
 586/* Shutdown the DMA controller. */
 587static void dmacontroller_cleanup(struct b43legacy_dmaring *ring)
 588{
 589	if (ring->tx) {
 590		b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
 591						 ring->type);
 592		b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 0);
 
 
 
 
 593	} else {
 594		b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
 595						 ring->type);
 596		b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, 0);
 
 
 
 
 597	}
 598}
 599
 600static void free_all_descbuffers(struct b43legacy_dmaring *ring)
 601{
 602	struct b43legacy_dmadesc_meta *meta;
 603	int i;
 604
 605	if (!ring->used_slots)
 606		return;
 607	for (i = 0; i < ring->nr_slots; i++) {
 608		op32_idx2desc(ring, i, &meta);
 609
 610		if (!meta->skb) {
 611			B43legacy_WARN_ON(!ring->tx);
 612			continue;
 613		}
 614		if (ring->tx)
 615			unmap_descbuffer(ring, meta->dmaaddr,
 616					 meta->skb->len, 1);
 617		else
 618			unmap_descbuffer(ring, meta->dmaaddr,
 619					 ring->rx_buffersize, 0);
 620		free_descriptor_buffer(ring, meta, 0);
 621	}
 622}
 623
 624static u64 supported_dma_mask(struct b43legacy_wldev *dev)
 625{
 626	u32 tmp;
 627	u16 mmio_base;
 628
 
 
 
 629	mmio_base = b43legacy_dmacontroller_base(0, 0);
 630	b43legacy_write32(dev,
 631			mmio_base + B43legacy_DMA32_TXCTL,
 632			B43legacy_DMA32_TXADDREXT_MASK);
 633	tmp = b43legacy_read32(dev, mmio_base +
 634			       B43legacy_DMA32_TXCTL);
 635	if (tmp & B43legacy_DMA32_TXADDREXT_MASK)
 636		return DMA_BIT_MASK(32);
 637
 638	return DMA_BIT_MASK(30);
 639}
 640
 641static enum b43legacy_dmatype dma_mask_to_engine_type(u64 dmamask)
 642{
 643	if (dmamask == DMA_BIT_MASK(30))
 644		return B43legacy_DMA_30BIT;
 645	if (dmamask == DMA_BIT_MASK(32))
 646		return B43legacy_DMA_32BIT;
 
 
 647	B43legacy_WARN_ON(1);
 648	return B43legacy_DMA_30BIT;
 649}
 650
 651/* Main initialization function. */
 652static
 653struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
 654						  int controller_index,
 655						  int for_tx,
 656						  enum b43legacy_dmatype type)
 657{
 658	struct b43legacy_dmaring *ring;
 659	int err;
 660	int nr_slots;
 661	dma_addr_t dma_test;
 662
 663	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
 664	if (!ring)
 665		goto out;
 666	ring->type = type;
 667	ring->dev = dev;
 668
 669	nr_slots = B43legacy_RXRING_SLOTS;
 670	if (for_tx)
 671		nr_slots = B43legacy_TXRING_SLOTS;
 672
 673	ring->meta = kcalloc(nr_slots, sizeof(struct b43legacy_dmadesc_meta),
 674			     GFP_KERNEL);
 675	if (!ring->meta)
 676		goto err_kfree_ring;
 677	if (for_tx) {
 678		ring->txhdr_cache = kcalloc(nr_slots,
 679					sizeof(struct b43legacy_txhdr_fw3),
 680					GFP_KERNEL);
 681		if (!ring->txhdr_cache)
 682			goto err_kfree_meta;
 683
 684		/* test for ability to dma to txhdr_cache */
 685		dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache,
 686					      sizeof(struct b43legacy_txhdr_fw3),
 687					      DMA_TO_DEVICE);
 688
 689		if (b43legacy_dma_mapping_error(ring, dma_test,
 690					sizeof(struct b43legacy_txhdr_fw3), 1)) {
 691			/* ugh realloc */
 692			kfree(ring->txhdr_cache);
 693			ring->txhdr_cache = kcalloc(nr_slots,
 694					sizeof(struct b43legacy_txhdr_fw3),
 695					GFP_KERNEL | GFP_DMA);
 696			if (!ring->txhdr_cache)
 697				goto err_kfree_meta;
 698
 699			dma_test = dma_map_single(dev->dev->dma_dev,
 700					ring->txhdr_cache,
 701					sizeof(struct b43legacy_txhdr_fw3),
 702					DMA_TO_DEVICE);
 703
 704			if (b43legacy_dma_mapping_error(ring, dma_test,
 705					sizeof(struct b43legacy_txhdr_fw3), 1))
 706				goto err_kfree_txhdr_cache;
 707		}
 708
 709		dma_unmap_single(dev->dev->dma_dev, dma_test,
 710				 sizeof(struct b43legacy_txhdr_fw3),
 711				 DMA_TO_DEVICE);
 712	}
 713
 714	ring->nr_slots = nr_slots;
 715	ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index);
 716	ring->index = controller_index;
 
 
 
 
 717	if (for_tx) {
 718		ring->tx = true;
 719		ring->current_slot = -1;
 720	} else {
 721		if (ring->index == 0) {
 722			ring->rx_buffersize = B43legacy_DMA0_RX_BUFFERSIZE;
 723			ring->frameoffset = B43legacy_DMA0_RX_FRAMEOFFSET;
 724		} else if (ring->index == 3) {
 725			ring->rx_buffersize = B43legacy_DMA3_RX_BUFFERSIZE;
 726			ring->frameoffset = B43legacy_DMA3_RX_FRAMEOFFSET;
 727		} else
 728			B43legacy_WARN_ON(1);
 729	}
 
 730#ifdef CONFIG_B43LEGACY_DEBUG
 731	ring->last_injected_overflow = jiffies;
 732#endif
 733
 734	err = alloc_ringmemory(ring);
 735	if (err)
 736		goto err_kfree_txhdr_cache;
 737	err = dmacontroller_setup(ring);
 738	if (err)
 739		goto err_free_ringmemory;
 740
 741out:
 742	return ring;
 743
 744err_free_ringmemory:
 745	free_ringmemory(ring);
 746err_kfree_txhdr_cache:
 747	kfree(ring->txhdr_cache);
 748err_kfree_meta:
 749	kfree(ring->meta);
 750err_kfree_ring:
 751	kfree(ring);
 752	ring = NULL;
 753	goto out;
 754}
 755
 756/* Main cleanup function. */
 757static void b43legacy_destroy_dmaring(struct b43legacy_dmaring *ring)
 758{
 759	if (!ring)
 760		return;
 761
 762	b43legacydbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots:"
 763		     " %d/%d\n", (unsigned int)(ring->type), ring->mmio_base,
 764		     (ring->tx) ? "TX" : "RX", ring->max_used_slots,
 765		     ring->nr_slots);
 766	/* Device IRQs are disabled prior entering this function,
 767	 * so no need to take care of concurrency with rx handler stuff.
 768	 */
 769	dmacontroller_cleanup(ring);
 770	free_all_descbuffers(ring);
 771	free_ringmemory(ring);
 772
 773	kfree(ring->txhdr_cache);
 774	kfree(ring->meta);
 775	kfree(ring);
 776}
 777
 778void b43legacy_dma_free(struct b43legacy_wldev *dev)
 779{
 780	struct b43legacy_dma *dma;
 781
 782	if (b43legacy_using_pio(dev))
 783		return;
 784	dma = &dev->dma;
 785
 786	b43legacy_destroy_dmaring(dma->rx_ring3);
 787	dma->rx_ring3 = NULL;
 788	b43legacy_destroy_dmaring(dma->rx_ring0);
 789	dma->rx_ring0 = NULL;
 790
 791	b43legacy_destroy_dmaring(dma->tx_ring5);
 792	dma->tx_ring5 = NULL;
 793	b43legacy_destroy_dmaring(dma->tx_ring4);
 794	dma->tx_ring4 = NULL;
 795	b43legacy_destroy_dmaring(dma->tx_ring3);
 796	dma->tx_ring3 = NULL;
 797	b43legacy_destroy_dmaring(dma->tx_ring2);
 798	dma->tx_ring2 = NULL;
 799	b43legacy_destroy_dmaring(dma->tx_ring1);
 800	dma->tx_ring1 = NULL;
 801	b43legacy_destroy_dmaring(dma->tx_ring0);
 802	dma->tx_ring0 = NULL;
 803}
 804
 805static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask)
 806{
 807	u64 orig_mask = mask;
 808	bool fallback = false;
 809	int err;
 810
 811	/* Try to set the DMA mask. If it fails, try falling back to a
 812	 * lower mask, as we can always also support a lower one. */
 813	while (1) {
 814		err = dma_set_mask(dev->dev->dma_dev, mask);
 815		if (!err) {
 816			err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
 817			if (!err)
 818				break;
 819		}
 820		if (mask == DMA_BIT_MASK(64)) {
 821			mask = DMA_BIT_MASK(32);
 822			fallback = true;
 823			continue;
 824		}
 825		if (mask == DMA_BIT_MASK(32)) {
 826			mask = DMA_BIT_MASK(30);
 827			fallback = true;
 828			continue;
 829		}
 830		b43legacyerr(dev->wl, "The machine/kernel does not support "
 831		       "the required %u-bit DMA mask\n",
 832		       (unsigned int)dma_mask_to_engine_type(orig_mask));
 833		return -EOPNOTSUPP;
 834	}
 835	if (fallback) {
 836		b43legacyinfo(dev->wl, "DMA mask fallback from %u-bit to %u-"
 837			"bit\n",
 838			(unsigned int)dma_mask_to_engine_type(orig_mask),
 839			(unsigned int)dma_mask_to_engine_type(mask));
 840	}
 841
 842	return 0;
 843}
 844
 845int b43legacy_dma_init(struct b43legacy_wldev *dev)
 846{
 847	struct b43legacy_dma *dma = &dev->dma;
 848	struct b43legacy_dmaring *ring;
 849	int err;
 850	u64 dmamask;
 851	enum b43legacy_dmatype type;
 852
 853	dmamask = supported_dma_mask(dev);
 854	type = dma_mask_to_engine_type(dmamask);
 855	err = b43legacy_dma_set_mask(dev, dmamask);
 856	if (err) {
 857#ifdef CONFIG_B43LEGACY_PIO
 858		b43legacywarn(dev->wl, "DMA for this device not supported. "
 859			"Falling back to PIO\n");
 860		dev->__using_pio = true;
 861		return -EAGAIN;
 862#else
 863		b43legacyerr(dev->wl, "DMA for this device not supported and "
 864		       "no PIO support compiled in\n");
 865		return -EOPNOTSUPP;
 866#endif
 867	}
 868	dma->translation = ssb_dma_translation(dev->dev);
 869
 870	err = -ENOMEM;
 871	/* setup TX DMA channels. */
 872	ring = b43legacy_setup_dmaring(dev, 0, 1, type);
 873	if (!ring)
 874		goto out;
 875	dma->tx_ring0 = ring;
 876
 877	ring = b43legacy_setup_dmaring(dev, 1, 1, type);
 878	if (!ring)
 879		goto err_destroy_tx0;
 880	dma->tx_ring1 = ring;
 881
 882	ring = b43legacy_setup_dmaring(dev, 2, 1, type);
 883	if (!ring)
 884		goto err_destroy_tx1;
 885	dma->tx_ring2 = ring;
 886
 887	ring = b43legacy_setup_dmaring(dev, 3, 1, type);
 888	if (!ring)
 889		goto err_destroy_tx2;
 890	dma->tx_ring3 = ring;
 891
 892	ring = b43legacy_setup_dmaring(dev, 4, 1, type);
 893	if (!ring)
 894		goto err_destroy_tx3;
 895	dma->tx_ring4 = ring;
 896
 897	ring = b43legacy_setup_dmaring(dev, 5, 1, type);
 898	if (!ring)
 899		goto err_destroy_tx4;
 900	dma->tx_ring5 = ring;
 901
 902	/* setup RX DMA channels. */
 903	ring = b43legacy_setup_dmaring(dev, 0, 0, type);
 904	if (!ring)
 905		goto err_destroy_tx5;
 906	dma->rx_ring0 = ring;
 907
 908	if (dev->dev->id.revision < 5) {
 909		ring = b43legacy_setup_dmaring(dev, 3, 0, type);
 910		if (!ring)
 911			goto err_destroy_rx0;
 912		dma->rx_ring3 = ring;
 913	}
 914
 915	b43legacydbg(dev->wl, "%u-bit DMA initialized\n", (unsigned int)type);
 916	err = 0;
 917out:
 918	return err;
 919
 920err_destroy_rx0:
 921	b43legacy_destroy_dmaring(dma->rx_ring0);
 922	dma->rx_ring0 = NULL;
 923err_destroy_tx5:
 924	b43legacy_destroy_dmaring(dma->tx_ring5);
 925	dma->tx_ring5 = NULL;
 926err_destroy_tx4:
 927	b43legacy_destroy_dmaring(dma->tx_ring4);
 928	dma->tx_ring4 = NULL;
 929err_destroy_tx3:
 930	b43legacy_destroy_dmaring(dma->tx_ring3);
 931	dma->tx_ring3 = NULL;
 932err_destroy_tx2:
 933	b43legacy_destroy_dmaring(dma->tx_ring2);
 934	dma->tx_ring2 = NULL;
 935err_destroy_tx1:
 936	b43legacy_destroy_dmaring(dma->tx_ring1);
 937	dma->tx_ring1 = NULL;
 938err_destroy_tx0:
 939	b43legacy_destroy_dmaring(dma->tx_ring0);
 940	dma->tx_ring0 = NULL;
 941	goto out;
 942}
 943
 944/* Generate a cookie for the TX header. */
 945static u16 generate_cookie(struct b43legacy_dmaring *ring,
 946			   int slot)
 947{
 948	u16 cookie = 0x1000;
 949
 950	/* Use the upper 4 bits of the cookie as
 951	 * DMA controller ID and store the slot number
 952	 * in the lower 12 bits.
 953	 * Note that the cookie must never be 0, as this
 954	 * is a special value used in RX path.
 955	 */
 956	switch (ring->index) {
 957	case 0:
 958		cookie = 0xA000;
 959		break;
 960	case 1:
 961		cookie = 0xB000;
 962		break;
 963	case 2:
 964		cookie = 0xC000;
 965		break;
 966	case 3:
 967		cookie = 0xD000;
 968		break;
 969	case 4:
 970		cookie = 0xE000;
 971		break;
 972	case 5:
 973		cookie = 0xF000;
 974		break;
 975	}
 976	B43legacy_WARN_ON(!(((u16)slot & 0xF000) == 0x0000));
 977	cookie |= (u16)slot;
 978
 979	return cookie;
 980}
 981
 982/* Inspect a cookie and find out to which controller/slot it belongs. */
 983static
 984struct b43legacy_dmaring *parse_cookie(struct b43legacy_wldev *dev,
 985				      u16 cookie, int *slot)
 986{
 987	struct b43legacy_dma *dma = &dev->dma;
 988	struct b43legacy_dmaring *ring = NULL;
 989
 990	switch (cookie & 0xF000) {
 991	case 0xA000:
 992		ring = dma->tx_ring0;
 993		break;
 994	case 0xB000:
 995		ring = dma->tx_ring1;
 996		break;
 997	case 0xC000:
 998		ring = dma->tx_ring2;
 999		break;
1000	case 0xD000:
1001		ring = dma->tx_ring3;
1002		break;
1003	case 0xE000:
1004		ring = dma->tx_ring4;
1005		break;
1006	case 0xF000:
1007		ring = dma->tx_ring5;
1008		break;
1009	default:
1010		B43legacy_WARN_ON(1);
1011	}
1012	*slot = (cookie & 0x0FFF);
1013	B43legacy_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots));
1014
1015	return ring;
1016}
1017
1018static int dma_tx_fragment(struct b43legacy_dmaring *ring,
1019			    struct sk_buff **in_skb)
1020{
1021	struct sk_buff *skb = *in_skb;
 
1022	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1023	u8 *header;
1024	int slot, old_top_slot, old_used_slots;
1025	int err;
1026	struct b43legacy_dmadesc32 *desc;
1027	struct b43legacy_dmadesc_meta *meta;
1028	struct b43legacy_dmadesc_meta *meta_hdr;
1029	struct sk_buff *bounce_skb;
1030
1031#define SLOTS_PER_PACKET  2
1032	B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0);
1033
1034	old_top_slot = ring->current_slot;
1035	old_used_slots = ring->used_slots;
1036
1037	/* Get a slot for the header. */
1038	slot = request_slot(ring);
1039	desc = op32_idx2desc(ring, slot, &meta_hdr);
1040	memset(meta_hdr, 0, sizeof(*meta_hdr));
1041
1042	header = &(ring->txhdr_cache[slot * sizeof(
1043			       struct b43legacy_txhdr_fw3)]);
1044	err = b43legacy_generate_txhdr(ring->dev, header,
1045				 skb->data, skb->len, info,
1046				 generate_cookie(ring, slot));
1047	if (unlikely(err)) {
1048		ring->current_slot = old_top_slot;
1049		ring->used_slots = old_used_slots;
1050		return err;
1051	}
1052
1053	meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
1054					   sizeof(struct b43legacy_txhdr_fw3), 1);
1055	if (b43legacy_dma_mapping_error(ring, meta_hdr->dmaaddr,
1056					sizeof(struct b43legacy_txhdr_fw3), 1)) {
1057		ring->current_slot = old_top_slot;
1058		ring->used_slots = old_used_slots;
1059		return -EIO;
1060	}
1061	op32_fill_descriptor(ring, desc, meta_hdr->dmaaddr,
1062			     sizeof(struct b43legacy_txhdr_fw3), 1, 0, 0);
1063
1064	/* Get a slot for the payload. */
1065	slot = request_slot(ring);
1066	desc = op32_idx2desc(ring, slot, &meta);
1067	memset(meta, 0, sizeof(*meta));
1068
1069	meta->skb = skb;
1070	meta->is_last_fragment = true;
1071
1072	meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1073	/* create a bounce buffer in zone_dma on mapping failure. */
1074	if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1075		bounce_skb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
1076		if (!bounce_skb) {
1077			ring->current_slot = old_top_slot;
1078			ring->used_slots = old_used_slots;
1079			err = -ENOMEM;
1080			goto out_unmap_hdr;
1081		}
1082
1083		memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
1084		memcpy(bounce_skb->cb, skb->cb, sizeof(skb->cb));
1085		bounce_skb->dev = skb->dev;
1086		skb_set_queue_mapping(bounce_skb, skb_get_queue_mapping(skb));
1087		info = IEEE80211_SKB_CB(bounce_skb);
1088
1089		dev_kfree_skb_any(skb);
1090		skb = bounce_skb;
1091		*in_skb = bounce_skb;
1092		meta->skb = skb;
1093		meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1094		if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1095			ring->current_slot = old_top_slot;
1096			ring->used_slots = old_used_slots;
1097			err = -EIO;
1098			goto out_free_bounce;
1099		}
1100	}
1101
1102	op32_fill_descriptor(ring, desc, meta->dmaaddr,
1103			     skb->len, 0, 1, 1);
1104
1105	wmb();	/* previous stuff MUST be done */
1106	/* Now transfer the whole frame. */
1107	op32_poke_tx(ring, next_slot(ring, slot));
1108	return 0;
1109
1110out_free_bounce:
1111	dev_kfree_skb_any(skb);
1112out_unmap_hdr:
1113	unmap_descbuffer(ring, meta_hdr->dmaaddr,
1114			 sizeof(struct b43legacy_txhdr_fw3), 1);
1115	return err;
1116}
1117
1118static inline
1119int should_inject_overflow(struct b43legacy_dmaring *ring)
1120{
1121#ifdef CONFIG_B43LEGACY_DEBUG
1122	if (unlikely(b43legacy_debug(ring->dev,
1123				     B43legacy_DBG_DMAOVERFLOW))) {
1124		/* Check if we should inject another ringbuffer overflow
1125		 * to test handling of this situation in the stack. */
1126		unsigned long next_overflow;
1127
1128		next_overflow = ring->last_injected_overflow + HZ;
1129		if (time_after(jiffies, next_overflow)) {
1130			ring->last_injected_overflow = jiffies;
1131			b43legacydbg(ring->dev->wl,
1132			       "Injecting TX ring overflow on "
1133			       "DMA controller %d\n", ring->index);
1134			return 1;
1135		}
1136	}
1137#endif /* CONFIG_B43LEGACY_DEBUG */
1138	return 0;
1139}
1140
1141int b43legacy_dma_tx(struct b43legacy_wldev *dev,
1142		     struct sk_buff *skb)
1143{
1144	struct b43legacy_dmaring *ring;
1145	int err = 0;
 
1146
1147	ring = priority_to_txring(dev, skb_get_queue_mapping(skb));
 
1148	B43legacy_WARN_ON(!ring->tx);
1149
1150	if (unlikely(ring->stopped)) {
1151		/* We get here only because of a bug in mac80211.
1152		 * Because of a race, one packet may be queued after
1153		 * the queue is stopped, thus we got called when we shouldn't.
1154		 * For now, just refuse the transmit. */
1155		if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1156			b43legacyerr(dev->wl, "Packet after queue stopped\n");
1157		return -ENOSPC;
 
1158	}
1159
1160	if (unlikely(WARN_ON(free_slots(ring) < SLOTS_PER_PACKET))) {
1161		/* If we get here, we have a real error with the queue
1162		 * full, but queues not stopped. */
1163		b43legacyerr(dev->wl, "DMA queue overflow\n");
1164		return -ENOSPC;
 
1165	}
1166
1167	/* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing
1168	 * into the skb data or cb now. */
1169	err = dma_tx_fragment(ring, &skb);
1170	if (unlikely(err == -ENOKEY)) {
1171		/* Drop this packet, as we don't have the encryption key
1172		 * anymore and must not transmit it unencrypted. */
1173		dev_kfree_skb_any(skb);
1174		return 0;
 
1175	}
1176	if (unlikely(err)) {
1177		b43legacyerr(dev->wl, "DMA tx mapping failure\n");
1178		return err;
1179	}
1180	if ((free_slots(ring) < SLOTS_PER_PACKET) ||
1181	    should_inject_overflow(ring)) {
1182		/* This TX ring is full. */
1183		unsigned int skb_mapping = skb_get_queue_mapping(skb);
1184		ieee80211_stop_queue(dev->wl->hw, skb_mapping);
1185		dev->wl->tx_queue_stopped[skb_mapping] = 1;
1186		ring->stopped = true;
1187		if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1188			b43legacydbg(dev->wl, "Stopped TX ring %d\n",
1189			       ring->index);
1190	}
 
 
 
1191	return err;
1192}
1193
1194void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
1195				 const struct b43legacy_txstatus *status)
1196{
 
1197	struct b43legacy_dmaring *ring;
1198	struct b43legacy_dmadesc_meta *meta;
1199	int retry_limit;
1200	int slot;
1201	int firstused;
1202
1203	ring = parse_cookie(dev, status->cookie, &slot);
1204	if (unlikely(!ring))
1205		return;
 
 
 
1206	B43legacy_WARN_ON(!ring->tx);
1207
1208	/* Sanity check: TX packets are processed in-order on one ring.
1209	 * Check if the slot deduced from the cookie really is the first
1210	 * used slot. */
1211	firstused = ring->current_slot - ring->used_slots + 1;
1212	if (firstused < 0)
1213		firstused = ring->nr_slots + firstused;
1214	if (unlikely(slot != firstused)) {
1215		/* This possibly is a firmware bug and will result in
1216		 * malfunction, memory leaks and/or stall of DMA functionality.
1217		 */
1218		b43legacydbg(dev->wl, "Out of order TX status report on DMA "
1219			     "ring %d. Expected %d, but got %d\n",
1220			     ring->index, firstused, slot);
1221		return;
1222	}
1223
1224	while (1) {
1225		B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
1226		op32_idx2desc(ring, slot, &meta);
1227
1228		if (meta->skb)
1229			unmap_descbuffer(ring, meta->dmaaddr,
1230					 meta->skb->len, 1);
1231		else
1232			unmap_descbuffer(ring, meta->dmaaddr,
1233					 sizeof(struct b43legacy_txhdr_fw3),
1234					 1);
1235
1236		if (meta->is_last_fragment) {
1237			struct ieee80211_tx_info *info;
1238			BUG_ON(!meta->skb);
1239			info = IEEE80211_SKB_CB(meta->skb);
1240
1241			/* preserve the confiured retry limit before clearing the status
1242			 * The xmit function has overwritten the rc's value with the actual
1243			 * retry limit done by the hardware */
1244			retry_limit = info->status.rates[0].count;
1245			ieee80211_tx_info_clear_status(info);
1246
1247			if (status->acked)
1248				info->flags |= IEEE80211_TX_STAT_ACK;
1249
1250			if (status->rts_count > dev->wl->hw->conf.short_frame_max_tx_count) {
1251				/*
1252				 * If the short retries (RTS, not data frame) have exceeded
1253				 * the limit, the hw will not have tried the selected rate,
1254				 * but will have used the fallback rate instead.
1255				 * Don't let the rate control count attempts for the selected
1256				 * rate in this case, otherwise the statistics will be off.
1257				 */
1258				info->status.rates[0].count = 0;
1259				info->status.rates[1].count = status->frame_count;
1260			} else {
1261				if (status->frame_count > retry_limit) {
1262					info->status.rates[0].count = retry_limit;
1263					info->status.rates[1].count = status->frame_count -
1264							retry_limit;
1265
1266				} else {
1267					info->status.rates[0].count = status->frame_count;
1268					info->status.rates[1].idx = -1;
1269				}
1270			}
1271
1272			/* Call back to inform the ieee80211 subsystem about the
1273			 * status of the transmission.
1274			 * Some fields of txstat are already filled in dma_tx().
1275			 */
1276			ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb);
1277			/* skb is freed by ieee80211_tx_status_irqsafe() */
1278			meta->skb = NULL;
1279		} else {
1280			/* No need to call free_descriptor_buffer here, as
1281			 * this is only the txhdr, which is not allocated.
1282			 */
1283			B43legacy_WARN_ON(meta->skb != NULL);
1284		}
1285
1286		/* Everything unmapped and free'd. So it's not used anymore. */
1287		ring->used_slots--;
1288
1289		if (meta->is_last_fragment)
1290			break;
1291		slot = next_slot(ring, slot);
1292	}
1293	dev->stats.last_tx = jiffies;
1294	if (ring->stopped) {
1295		B43legacy_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET);
1296		ring->stopped = false;
1297	}
1298
1299	if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
1300		dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
1301	} else {
1302		/* If the driver queue is running wake the corresponding
1303		 * mac80211 queue. */
1304		ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
1305		if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1306			b43legacydbg(dev->wl, "Woke up TX ring %d\n",
1307				     ring->index);
1308	}
1309	/* Add work to the queue. */
1310	ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work);
1311}
1312
1313static void dma_rx(struct b43legacy_dmaring *ring,
1314		   int *slot)
1315{
1316	struct b43legacy_dmadesc32 *desc;
 
1317	struct b43legacy_dmadesc_meta *meta;
1318	struct b43legacy_rxhdr_fw3 *rxhdr;
1319	struct sk_buff *skb;
1320	u16 len;
1321	int err;
1322	dma_addr_t dmaaddr;
1323
1324	desc = op32_idx2desc(ring, *slot, &meta);
1325
1326	sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1327	skb = meta->skb;
1328
1329	if (ring->index == 3) {
1330		/* We received an xmit status. */
1331		struct b43legacy_hwtxstatus *hw =
1332				(struct b43legacy_hwtxstatus *)skb->data;
1333		int i = 0;
1334
1335		while (hw->cookie == 0) {
1336			if (i > 100)
1337				break;
1338			i++;
1339			udelay(2);
1340			barrier();
1341		}
1342		b43legacy_handle_hwtxstatus(ring->dev, hw);
1343		/* recycle the descriptor buffer. */
1344		sync_descbuffer_for_device(ring, meta->dmaaddr,
1345					   ring->rx_buffersize);
1346
1347		return;
1348	}
1349	rxhdr = (struct b43legacy_rxhdr_fw3 *)skb->data;
1350	len = le16_to_cpu(rxhdr->frame_len);
1351	if (len == 0) {
1352		int i = 0;
1353
1354		do {
1355			udelay(2);
1356			barrier();
1357			len = le16_to_cpu(rxhdr->frame_len);
1358		} while (len == 0 && i++ < 5);
1359		if (unlikely(len == 0)) {
1360			/* recycle the descriptor buffer. */
1361			sync_descbuffer_for_device(ring, meta->dmaaddr,
1362						   ring->rx_buffersize);
1363			goto drop;
1364		}
1365	}
1366	if (unlikely(len > ring->rx_buffersize)) {
1367		/* The data did not fit into one descriptor buffer
1368		 * and is split over multiple buffers.
1369		 * This should never happen, as we try to allocate buffers
1370		 * big enough. So simply ignore this packet.
1371		 */
1372		int cnt = 0;
1373		s32 tmp = len;
1374
1375		while (1) {
1376			desc = op32_idx2desc(ring, *slot, &meta);
1377			/* recycle the descriptor buffer. */
1378			sync_descbuffer_for_device(ring, meta->dmaaddr,
1379						   ring->rx_buffersize);
1380			*slot = next_slot(ring, *slot);
1381			cnt++;
1382			tmp -= ring->rx_buffersize;
1383			if (tmp <= 0)
1384				break;
1385		}
1386		b43legacyerr(ring->dev->wl, "DMA RX buffer too small "
1387		       "(len: %u, buffer: %u, nr-dropped: %d)\n",
1388		       len, ring->rx_buffersize, cnt);
1389		goto drop;
1390	}
1391
1392	dmaaddr = meta->dmaaddr;
1393	err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1394	if (unlikely(err)) {
1395		b43legacydbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer()"
1396			     " failed\n");
1397		sync_descbuffer_for_device(ring, dmaaddr,
1398					   ring->rx_buffersize);
1399		goto drop;
1400	}
1401
1402	unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1403	skb_put(skb, len + ring->frameoffset);
1404	skb_pull(skb, ring->frameoffset);
1405
1406	b43legacy_rx(ring->dev, skb, rxhdr);
1407drop:
1408	return;
1409}
1410
1411void b43legacy_dma_rx(struct b43legacy_dmaring *ring)
1412{
 
1413	int slot;
1414	int current_slot;
1415	int used_slots = 0;
1416
1417	B43legacy_WARN_ON(ring->tx);
1418	current_slot = op32_get_current_rxslot(ring);
1419	B43legacy_WARN_ON(!(current_slot >= 0 && current_slot <
1420			   ring->nr_slots));
1421
1422	slot = ring->current_slot;
1423	for (; slot != current_slot; slot = next_slot(ring, slot)) {
1424		dma_rx(ring, &slot);
1425		update_max_used_slots(ring, ++used_slots);
1426	}
1427	op32_set_current_rxslot(ring, slot);
1428	ring->current_slot = slot;
1429}
1430
1431static void b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring *ring)
1432{
 
 
 
1433	B43legacy_WARN_ON(!ring->tx);
1434	op32_tx_suspend(ring);
 
1435}
1436
1437static void b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring *ring)
1438{
 
 
 
1439	B43legacy_WARN_ON(!ring->tx);
1440	op32_tx_resume(ring);
 
1441}
1442
1443void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev)
1444{
1445	b43legacy_power_saving_ctl_bits(dev, -1, 1);
1446	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring0);
1447	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring1);
1448	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring2);
1449	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring3);
1450	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring4);
1451	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring5);
1452}
1453
1454void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev)
1455{
1456	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring5);
1457	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring4);
1458	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring3);
1459	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring2);
1460	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring1);
1461	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring0);
1462	b43legacy_power_saving_ctl_bits(dev, -1, -1);
1463}