diff options
Diffstat (limited to 'drivers/net/wireless/b43/dma.c')
| -rw-r--r-- | drivers/net/wireless/b43/dma.c | 570 |
1 files changed, 302 insertions, 268 deletions
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index 027be275e03..1d7982afc0a 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c @@ -4,7 +4,7 @@ DMA ringbuffer and descriptor allocation/management - Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de> + Copyright (c) 2005, 2006 Michael Buesch <m@bues.ch> Some code in this file is derived from the b44.c driver Copyright (C) 2002 David S. Miller @@ -38,6 +38,7 @@ #include <linux/delay.h> #include <linux/skbuff.h> #include <linux/etherdevice.h> +#include <linux/slab.h> #include <asm/div64.h> @@ -46,6 +47,38 @@ * into separate slots. */ #define TX_SLOTS_PER_FRAME 2 +static u32 b43_dma_address(struct b43_dma *dma, dma_addr_t dmaaddr, + enum b43_addrtype addrtype) +{ + u32 uninitialized_var(addr); + + switch (addrtype) { + case B43_DMA_ADDR_LOW: + addr = lower_32_bits(dmaaddr); + if (dma->translation_in_low) { + addr &= ~SSB_DMA_TRANSLATION_MASK; + addr |= dma->translation; + } + break; + case B43_DMA_ADDR_HIGH: + addr = upper_32_bits(dmaaddr); + if (!dma->translation_in_low) { + addr &= ~SSB_DMA_TRANSLATION_MASK; + addr |= dma->translation; + } + break; + case B43_DMA_ADDR_EXT: + if (dma->translation_in_low) + addr = lower_32_bits(dmaaddr); + else + addr = upper_32_bits(dmaaddr); + addr &= SSB_DMA_TRANSLATION_MASK; + addr >>= SSB_DMA_TRANSLATION_SHIFT; + break; + } + + return addr; +} /* 32bit DMA ops. */ static @@ -76,10 +109,9 @@ static void op32_fill_descriptor(struct b43_dmaring *ring, slot = (int)(&(desc->dma32) - descbase); B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); - addr = (u32) (dmaaddr & ~SSB_DMA_TRANSLATION_MASK); - addrext = (u32) (dmaaddr & SSB_DMA_TRANSLATION_MASK) - >> SSB_DMA_TRANSLATION_SHIFT; - addr |= ssb_dma_translation(ring->dev->dev); + addr = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_LOW); + addrext = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_EXT); + ctl = bufsize & B43_DMA32_DCTL_BYTECNT; if (slot == ring->nr_slots - 1) ctl |= B43_DMA32_DCTL_DTABLEEND; @@ -169,11 +201,10 @@ static void op64_fill_descriptor(struct b43_dmaring *ring, slot = (int)(&(desc->dma64) - descbase); B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); - addrlo = (u32) (dmaaddr & 0xFFFFFFFF); - addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK); - addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK) - >> SSB_DMA_TRANSLATION_SHIFT; - addrhi |= (ssb_dma_translation(ring->dev->dev) << 1); + addrlo = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_LOW); + addrhi = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_HIGH); + addrext = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_EXT); + if (slot == ring->nr_slots - 1) ctl0 |= B43_DMA64_DCTL0_DTABLEEND; if (start) @@ -332,11 +363,11 @@ static inline dma_addr_t dmaaddr; if (tx) { - dmaaddr = ssb_dma_map_single(ring->dev->dev, - buf, len, DMA_TO_DEVICE); + dmaaddr = dma_map_single(ring->dev->dev->dma_dev, + buf, len, DMA_TO_DEVICE); } else { - dmaaddr = ssb_dma_map_single(ring->dev->dev, - buf, len, DMA_FROM_DEVICE); + dmaaddr = dma_map_single(ring->dev->dev->dma_dev, + buf, len, DMA_FROM_DEVICE); } return dmaaddr; @@ -347,11 +378,11 @@ static inline dma_addr_t addr, size_t len, int tx) { if (tx) { - ssb_dma_unmap_single(ring->dev->dev, - addr, len, DMA_TO_DEVICE); + dma_unmap_single(ring->dev->dev->dma_dev, + addr, len, DMA_TO_DEVICE); } else { - ssb_dma_unmap_single(ring->dev->dev, - addr, len, DMA_FROM_DEVICE); + dma_unmap_single(ring->dev->dev->dma_dev, + addr, len, DMA_FROM_DEVICE); } } @@ -360,7 +391,7 @@ static inline dma_addr_t addr, size_t len) { B43_WARN_ON(ring->tx); - ssb_dma_sync_single_for_cpu(ring->dev->dev, + dma_sync_single_for_cpu(ring->dev->dev->dma_dev, addr, len, DMA_FROM_DEVICE); } @@ -369,8 +400,8 @@ static inline dma_addr_t addr, size_t len) { B43_WARN_ON(ring->tx); - ssb_dma_sync_single_for_device(ring->dev->dev, - addr, len, DMA_FROM_DEVICE); + dma_sync_single_for_device(ring->dev->dev->dma_dev, + addr, len, DMA_FROM_DEVICE); } static inline @@ -378,165 +409,43 @@ static inline struct b43_dmadesc_meta *meta) { if (meta->skb) { - dev_kfree_skb_any(meta->skb); + if (ring->tx) + ieee80211_free_txskb(ring->dev->wl->hw, meta->skb); + else + dev_kfree_skb_any(meta->skb); meta->skb = NULL; } } -/* Check if a DMA region fits the device constraints. - * Returns true, if the region is OK for usage with this device. */ -static inline bool b43_dma_address_ok(struct b43_dmaring *ring, - dma_addr_t addr, size_t size) -{ - switch (ring->type) { - case B43_DMA_30BIT: - if ((u64)addr + size > (1ULL << 30)) - return 0; - break; - case B43_DMA_32BIT: - if ((u64)addr + size > (1ULL << 32)) - return 0; - break; - case B43_DMA_64BIT: - /* Currently we can't have addresses beyond - * 64bit in the kernel. */ - break; - } - return 1; -} - -#define is_4k_aligned(addr) (((u64)(addr) & 0x0FFFull) == 0) -#define is_8k_aligned(addr) (((u64)(addr) & 0x1FFFull) == 0) - -static void b43_unmap_and_free_ringmem(struct b43_dmaring *ring, void *base, - dma_addr_t dmaaddr, size_t size) -{ - ssb_dma_unmap_single(ring->dev->dev, dmaaddr, size, DMA_TO_DEVICE); - free_pages((unsigned long)base, get_order(size)); -} - -static void * __b43_get_and_map_ringmem(struct b43_dmaring *ring, - dma_addr_t *dmaaddr, size_t size, - gfp_t gfp_flags) -{ - void *base; - - base = (void *)__get_free_pages(gfp_flags, get_order(size)); - if (!base) - return NULL; - memset(base, 0, size); - *dmaaddr = ssb_dma_map_single(ring->dev->dev, base, size, - DMA_TO_DEVICE); - if (ssb_dma_mapping_error(ring->dev->dev, *dmaaddr)) { - free_pages((unsigned long)base, get_order(size)); - return NULL; - } - - return base; -} - -static void * b43_get_and_map_ringmem(struct b43_dmaring *ring, - dma_addr_t *dmaaddr, size_t size) -{ - void *base; - - base = __b43_get_and_map_ringmem(ring, dmaaddr, size, - GFP_KERNEL); - if (!base) { - b43err(ring->dev->wl, "Failed to allocate or map pages " - "for DMA ringmemory\n"); - return NULL; - } - if (!b43_dma_address_ok(ring, *dmaaddr, size)) { - /* The memory does not fit our device constraints. - * Retry with GFP_DMA set to get lower memory. */ - b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size); - base = __b43_get_and_map_ringmem(ring, dmaaddr, size, - GFP_KERNEL | GFP_DMA); - if (!base) { - b43err(ring->dev->wl, "Failed to allocate or map pages " - "in the GFP_DMA region for DMA ringmemory\n"); - return NULL; - } - if (!b43_dma_address_ok(ring, *dmaaddr, size)) { - b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size); - b43err(ring->dev->wl, "Failed to allocate DMA " - "ringmemory that fits device constraints\n"); - return NULL; - } - } - /* We expect the memory to be 4k aligned, at least. */ - if (B43_WARN_ON(!is_4k_aligned(*dmaaddr))) { - b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size); - return NULL; - } - - return base; -} - static int alloc_ringmemory(struct b43_dmaring *ring) { - unsigned int required; - void *base; - dma_addr_t dmaaddr; - - /* There are several requirements to the descriptor ring memory: - * - The memory region needs to fit the address constraints for the - * device (same as for frame buffers). - * - For 30/32bit DMA devices, the descriptor ring must be 4k aligned. - * - For 64bit DMA devices, the descriptor ring must be 8k aligned. + /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K + * alignment and 8K buffers for 64-bit DMA with 8K alignment. + * In practice we could use smaller buffers for the latter, but the + * alignment is really important because of the hardware bug. If bit + * 0x00001000 is used in DMA address, some hardware (like BCM4331) + * copies that bit into B43_DMA64_RXSTATUS and we get false values from + * B43_DMA64_RXSTATDPTR. Let's just use 8K buffers even if we don't use + * more than 256 slots for ring. */ + u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ? + B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE; - if (ring->type == B43_DMA_64BIT) - required = ring->nr_slots * sizeof(struct b43_dmadesc64); - else - required = ring->nr_slots * sizeof(struct b43_dmadesc32); - if (B43_WARN_ON(required > 0x1000)) - return -ENOMEM; - - ring->alloc_descsize = 0x1000; - base = b43_get_and_map_ringmem(ring, &dmaaddr, ring->alloc_descsize); - if (!base) + ring->descbase = dma_zalloc_coherent(ring->dev->dev->dma_dev, + ring_mem_size, &(ring->dmabase), + GFP_KERNEL); + if (!ring->descbase) return -ENOMEM; - ring->alloc_descbase = base; - ring->alloc_dmabase = dmaaddr; - - if ((ring->type != B43_DMA_64BIT) || is_8k_aligned(dmaaddr)) { - /* We're on <=32bit DMA, or we already got 8k aligned memory. - * That's all we need, so we're fine. */ - ring->descbase = base; - ring->dmabase = dmaaddr; - return 0; - } - b43_unmap_and_free_ringmem(ring, base, dmaaddr, ring->alloc_descsize); - - /* Ok, we failed at the 8k alignment requirement. - * Try to force-align the memory region now. */ - ring->alloc_descsize = 0x2000; - base = b43_get_and_map_ringmem(ring, &dmaaddr, ring->alloc_descsize); - if (!base) - return -ENOMEM; - ring->alloc_descbase = base; - ring->alloc_dmabase = dmaaddr; - - if (is_8k_aligned(dmaaddr)) { - /* We're already 8k aligned. That Ok, too. */ - ring->descbase = base; - ring->dmabase = dmaaddr; - return 0; - } - /* Force-align it to 8k */ - ring->descbase = (void *)((u8 *)base + 0x1000); - ring->dmabase = dmaaddr + 0x1000; - B43_WARN_ON(!is_8k_aligned(ring->dmabase)); return 0; } static void free_ringmemory(struct b43_dmaring *ring) { - b43_unmap_and_free_ringmem(ring, ring->alloc_descbase, - ring->alloc_dmabase, ring->alloc_descsize); + u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ? + B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE; + dma_free_coherent(ring->dev->dev->dma_dev, ring_mem_size, + ring->descbase, ring->dmabase); } /* Reset the RX DMA channel */ @@ -643,17 +552,32 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring, dma_addr_t addr, size_t buffersize, bool dma_to_device) { - if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr))) + if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr))) return 1; - if (!b43_dma_address_ok(ring, addr, buffersize)) { - /* We can't support this address. Unmap it again. */ - unmap_descbuffer(ring, addr, buffersize, dma_to_device); - return 1; + switch (ring->type) { + case B43_DMA_30BIT: + if ((u64)addr + buffersize > (1ULL << 30)) + goto address_error; + break; + case B43_DMA_32BIT: + if ((u64)addr + buffersize > (1ULL << 32)) + goto address_error; + break; + case B43_DMA_64BIT: + /* Currently we can't have addresses beyond + * 64bit in the kernel. */ + break; } /* The address is OK. */ return 0; + +address_error: + /* We can't support this address. Unmap it again. */ + unmap_descbuffer(ring, addr, buffersize, dma_to_device); + + return 1; } static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb) @@ -715,9 +639,6 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring, meta->dmaaddr = dmaaddr; ring->ops->fill_descriptor(ring, desc, dmaaddr, ring->rx_buffersize, 0, 0, 0); - ssb_dma_sync_single_for_device(ring->dev->dev, - ring->alloc_dmabase, - ring->alloc_descsize, DMA_TO_DEVICE); return 0; } @@ -766,36 +687,37 @@ static int dmacontroller_setup(struct b43_dmaring *ring) int err = 0; u32 value; u32 addrext; - u32 trans = ssb_dma_translation(ring->dev->dev); + bool parity = ring->dev->dma.parity; + u32 addrlo; + u32 addrhi; if (ring->tx) { if (ring->type == B43_DMA_64BIT) { u64 ringbase = (u64) (ring->dmabase); + addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT); + addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW); + addrhi = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_HIGH); - addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) - >> SSB_DMA_TRANSLATION_SHIFT; value = B43_DMA64_TXENABLE; value |= (addrext << B43_DMA64_TXADDREXT_SHIFT) & B43_DMA64_TXADDREXT_MASK; + if (!parity) + value |= B43_DMA64_TXPARITYDISABLE; b43_dma_write(ring, B43_DMA64_TXCTL, value); - b43_dma_write(ring, B43_DMA64_TXRINGLO, - (ringbase & 0xFFFFFFFF)); - b43_dma_write(ring, B43_DMA64_TXRINGHI, - ((ringbase >> 32) & - ~SSB_DMA_TRANSLATION_MASK) - | (trans << 1)); + b43_dma_write(ring, B43_DMA64_TXRINGLO, addrlo); + b43_dma_write(ring, B43_DMA64_TXRINGHI, addrhi); } else { u32 ringbase = (u32) (ring->dmabase); + addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT); + addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW); - addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) - >> SSB_DMA_TRANSLATION_SHIFT; value = B43_DMA32_TXENABLE; value |= (addrext << B43_DMA32_TXADDREXT_SHIFT) & B43_DMA32_TXADDREXT_MASK; + if (!parity) + value |= B43_DMA32_TXPARITYDISABLE; b43_dma_write(ring, B43_DMA32_TXCTL, value); - b43_dma_write(ring, B43_DMA32_TXRING, - (ringbase & ~SSB_DMA_TRANSLATION_MASK) - | trans); + b43_dma_write(ring, B43_DMA32_TXRING, addrlo); } } else { err = alloc_initial_descbuffers(ring); @@ -803,35 +725,34 @@ static int dmacontroller_setup(struct b43_dmaring *ring) goto out; if (ring->type == B43_DMA_64BIT) { u64 ringbase = (u64) (ring->dmabase); + addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT); + addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW); + addrhi = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_HIGH); - addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) - >> SSB_DMA_TRANSLATION_SHIFT; value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT); value |= B43_DMA64_RXENABLE; value |= (addrext << B43_DMA64_RXADDREXT_SHIFT) & B43_DMA64_RXADDREXT_MASK; + if (!parity) + value |= B43_DMA64_RXPARITYDISABLE; b43_dma_write(ring, B43_DMA64_RXCTL, value); - b43_dma_write(ring, B43_DMA64_RXRINGLO, - (ringbase & 0xFFFFFFFF)); - b43_dma_write(ring, B43_DMA64_RXRINGHI, - ((ringbase >> 32) & - ~SSB_DMA_TRANSLATION_MASK) - | (trans << 1)); + b43_dma_write(ring, B43_DMA64_RXRINGLO, addrlo); + b43_dma_write(ring, B43_DMA64_RXRINGHI, addrhi); b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots * sizeof(struct b43_dmadesc64)); } else { u32 ringbase = (u32) (ring->dmabase); + addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT); + addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW); - addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) - >> SSB_DMA_TRANSLATION_SHIFT; value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT); value |= B43_DMA32_RXENABLE; value |= (addrext << B43_DMA32_RXADDREXT_SHIFT) & B43_DMA32_RXADDREXT_MASK; + if (!parity) + value |= B43_DMA32_RXPARITYDISABLE; b43_dma_write(ring, B43_DMA32_RXCTL, value); - b43_dma_write(ring, B43_DMA32_RXRING, - (ringbase & ~SSB_DMA_TRANSLATION_MASK) - | trans); + b43_dma_write(ring, B43_DMA32_RXRING, addrlo); b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots * sizeof(struct b43_dmadesc32)); } @@ -865,14 +786,14 @@ static void dmacontroller_cleanup(struct b43_dmaring *ring) static void free_all_descbuffers(struct b43_dmaring *ring) { - struct b43_dmadesc_generic *desc; struct b43_dmadesc_meta *meta; int i; if (!ring->used_slots) return; for (i = 0; i < ring->nr_slots; i++) { - desc = ring->ops->idx2desc(ring, i, &meta); + /* get meta - ignore returned value */ + ring->ops->idx2desc(ring, i, &meta); if (!meta->skb || b43_dma_ptr_is_poisoned(meta->skb)) { B43_WARN_ON(!ring->tx); @@ -894,9 +815,23 @@ static u64 supported_dma_mask(struct b43_wldev *dev) u32 tmp; u16 mmio_base; - tmp = b43_read32(dev, SSB_TMSHIGH); - if (tmp & SSB_TMSHIGH_DMA64) - return DMA_BIT_MASK(64); + switch (dev->dev->bus_type) { +#ifdef CONFIG_B43_BCMA + case B43_BUS_BCMA: + tmp = bcma_aread32(dev->dev->bdev, BCMA_IOST); + if (tmp & BCMA_IOST_DMA64) + return DMA_BIT_MASK(64); + break; +#endif +#ifdef CONFIG_B43_SSB + case B43_BUS_SSB: + tmp = ssb_read32(dev->dev->sdev, SSB_TMSHIGH); + if (tmp & SSB_TMSHIGH_DMA64) + return DMA_BIT_MASK(64); + break; +#endif + } + mmio_base = b43_dmacontroller_base(0, 0); b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK); tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL); @@ -953,12 +888,21 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, else ring->ops = &dma32_ops; if (for_tx) { - ring->tx = 1; + ring->tx = true; ring->current_slot = -1; } else { if (ring->index == 0) { - ring->rx_buffersize = B43_DMA0_RX_BUFFERSIZE; - ring->frameoffset = B43_DMA0_RX_FRAMEOFFSET; + switch (dev->fw.hdr_format) { + case B43_FW_HDR_598: + ring->rx_buffersize = B43_DMA0_RX_FW598_BUFSIZE; + ring->frameoffset = B43_DMA0_RX_FW598_FO; + break; + case B43_FW_HDR_410: + case B43_FW_HDR_351: + ring->rx_buffersize = B43_DMA0_RX_FW351_BUFSIZE; + ring->frameoffset = B43_DMA0_RX_FW351_FO; + break; + } } else B43_WARN_ON(1); } @@ -977,10 +921,10 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, goto err_kfree_meta; /* test for ability to dma to txhdr_cache */ - dma_test = ssb_dma_map_single(dev->dev, - ring->txhdr_cache, - b43_txhdr_size(dev), - DMA_TO_DEVICE); + dma_test = dma_map_single(dev->dev->dma_dev, + ring->txhdr_cache, + b43_txhdr_size(dev), + DMA_TO_DEVICE); if (b43_dma_mapping_error(ring, dma_test, b43_txhdr_size(dev), 1)) { @@ -992,10 +936,10 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, if (!ring->txhdr_cache) goto err_kfree_meta; - dma_test = ssb_dma_map_single(dev->dev, - ring->txhdr_cache, - b43_txhdr_size(dev), - DMA_TO_DEVICE); + dma_test = dma_map_single(dev->dev->dma_dev, + ring->txhdr_cache, + b43_txhdr_size(dev), + DMA_TO_DEVICE); if (b43_dma_mapping_error(ring, dma_test, b43_txhdr_size(dev), 1)) { @@ -1006,9 +950,9 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, } } - ssb_dma_unmap_single(dev->dev, - dma_test, b43_txhdr_size(dev), - DMA_TO_DEVICE); + dma_unmap_single(dev->dev->dma_dev, + dma_test, b43_txhdr_size(dev), + DMA_TO_DEVICE); } err = alloc_ringmemory(ring); @@ -1115,23 +1059,23 @@ void b43_dma_free(struct b43_wldev *dev) static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask) { u64 orig_mask = mask; - bool fallback = 0; + bool fallback = false; int err; /* Try to set the DMA mask. If it fails, try falling back to a * lower mask, as we can always also support a lower one. */ while (1) { - err = ssb_dma_set_mask(dev->dev, mask); + err = dma_set_mask_and_coherent(dev->dev->dma_dev, mask); if (!err) break; if (mask == DMA_BIT_MASK(64)) { mask = DMA_BIT_MASK(32); - fallback = 1; + fallback = true; continue; } if (mask == DMA_BIT_MASK(32)) { mask = DMA_BIT_MASK(30); - fallback = 1; + fallback = true; continue; } b43err(dev->wl, "The machine/kernel does not support " @@ -1148,6 +1092,25 @@ static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask) return 0; } +/* Some hardware with 64-bit DMA seems to be bugged and looks for translation + * bit in low address word instead of high one. + */ +static bool b43_dma_translation_in_low_word(struct b43_wldev *dev, + enum b43_dmatype type) +{ + if (type != B43_DMA_64BIT) + return 1; + +#ifdef CONFIG_B43_SSB + if (dev->dev->bus_type == B43_BUS_SSB && + dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI && + !(pci_is_pcie(dev->dev->sdev->bus->host_pci) && + ssb_read32(dev->dev->sdev, SSB_TMSHIGH) & SSB_TMSHIGH_DMA64)) + return 1; +#endif + return 0; +} + int b43_dma_init(struct b43_wldev *dev) { struct b43_dma *dma = &dev->dma; @@ -1161,6 +1124,27 @@ int b43_dma_init(struct b43_wldev *dev) if (err) return err; + switch (dev->dev->bus_type) { +#ifdef CONFIG_B43_BCMA + case B43_BUS_BCMA: + dma->translation = bcma_core_dma_translation(dev->dev->bdev); + break; +#endif +#ifdef CONFIG_B43_SSB + case B43_BUS_SSB: + dma->translation = ssb_dma_translation(dev->dev->sdev); + break; +#endif + } + dma->translation_in_low = b43_dma_translation_in_low_word(dev, type); + + dma->parity = true; +#ifdef CONFIG_B43_BCMA + /* TODO: find out which SSB devices need disabling parity */ + if (dev->dev->bus_type == B43_BUS_BCMA) + dma->parity = false; +#endif + err = -ENOMEM; /* setup TX DMA channels. */ dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type); @@ -1189,7 +1173,7 @@ int b43_dma_init(struct b43_wldev *dev) goto err_destroy_mcast; /* No support for the TX status DMA ring. */ - B43_WARN_ON(dev->dev->id.revision < 5); + B43_WARN_ON(dev->dev->core_rev < 5); b43dbg(dev->wl, "%u-bit DMA initialized\n", (unsigned int)type); @@ -1318,20 +1302,20 @@ static int dma_tx_fragment(struct b43_dmaring *ring, memset(meta, 0, sizeof(*meta)); meta->skb = skb; - meta->is_last_fragment = 1; + meta->is_last_fragment = true; priv_info->bouncebuffer = NULL; meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); /* create a bounce buffer in zone_dma on mapping failure. */ if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { - priv_info->bouncebuffer = kmalloc(skb->len, GFP_ATOMIC | GFP_DMA); + priv_info->bouncebuffer = kmemdup(skb->data, skb->len, + GFP_ATOMIC | GFP_DMA); if (!priv_info->bouncebuffer) { ring->current_slot = old_top_slot; ring->used_slots = old_used_slots; err = -ENOMEM; goto out_unmap_hdr; } - memcpy(priv_info->bouncebuffer, skb->data, skb->len); meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1); if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { @@ -1354,9 +1338,6 @@ static int dma_tx_fragment(struct b43_dmaring *ring, } /* Now transfer the whole frame. */ wmb(); - ssb_dma_sync_single_for_device(ring->dev->dev, - ring->alloc_dmabase, - ring->alloc_descsize, DMA_TO_DEVICE); ops->poke_tx(ring, next_slot(ring, slot)); return 0; @@ -1468,7 +1449,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) if (unlikely(err == -ENOKEY)) { /* Drop this packet, as we don't have the encryption key * anymore and must not transmit it unencrypted. */ - dev_kfree_skb_any(skb); + ieee80211_free_txskb(dev->wl->hw, skb); err = 0; goto out; } @@ -1476,12 +1457,13 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) b43err(dev->wl, "DMA tx mapping failure\n"); goto out; } - ring->nr_tx_packets++; if ((free_slots(ring) < TX_SLOTS_PER_FRAME) || should_inject_overflow(ring)) { /* This TX ring is full. */ - ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb)); - ring->stopped = 1; + unsigned int skb_mapping = skb_get_queue_mapping(skb); + ieee80211_stop_queue(dev->wl->hw, skb_mapping); + dev->wl->tx_queue_stopped[skb_mapping] = 1; + ring->stopped = true; if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index); } @@ -1496,10 +1478,13 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, { const struct b43_dma_ops *ops; struct b43_dmaring *ring; - struct b43_dmadesc_generic *desc; struct b43_dmadesc_meta *meta; + static const struct b43_txstatus fake; /* filled with 0 */ + const struct b43_txstatus *txstat; int slot, firstused; bool frame_succeed; + int skip; + static u8 err_out1, err_out2; ring = parse_cookie(dev, status->cookie, &slot); if (unlikely(!ring)) @@ -1512,19 +1497,43 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, firstused = ring->current_slot - ring->used_slots + 1; if (firstused < 0) firstused = ring->nr_slots + firstused; + + skip = 0; if (unlikely(slot != firstused)) { /* This possibly is a firmware bug and will result in - * malfunction, memory leaks and/or stall of DMA functionality. */ - b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. " - "Expected %d, but got %d\n", - ring->index, firstused, slot); - return; + * malfunction, memory leaks and/or stall of DMA functionality. + */ + if (slot == next_slot(ring, next_slot(ring, firstused))) { + /* If a single header/data pair was missed, skip over + * the first two slots in an attempt to recover. + */ + slot = firstused; + skip = 2; + if (!err_out1) { + /* Report the error once. */ + b43dbg(dev->wl, + "Skip on DMA ring %d slot %d.\n", + ring->index, slot); + err_out1 = 1; + } + } else { + /* More than a single header/data pair were missed. + * Report this error once. + */ + if (!err_out2) + b43dbg(dev->wl, + "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n", + ring->index, firstused, slot); + err_out2 = 1; + return; + } } ops = ring->ops; while (1) { B43_WARN_ON(slot < 0 || slot >= ring->nr_slots); - desc = ops->idx2desc(ring, slot, &meta); + /* get meta - ignore returned value */ + ops->idx2desc(ring, slot, &meta); if (b43_dma_ptr_is_poisoned(meta->skb)) { b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) " @@ -1532,11 +1541,13 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, slot, firstused, ring->index); break; } + if (meta->skb) { struct b43_private_tx_info *priv_info = - b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb)); + b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb)); - unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1); + unmap_descbuffer(ring, meta->dmaaddr, + meta->skb->len, 1); kfree(priv_info->bouncebuffer); priv_info->bouncebuffer = NULL; } else { @@ -1548,8 +1559,9 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, struct ieee80211_tx_info *info; if (unlikely(!meta->skb)) { - /* This is a scatter-gather fragment of a frame, so - * the skb pointer must not be NULL. */ + /* This is a scatter-gather fragment of a frame, + * so the skb pointer must not be NULL. + */ b43dbg(dev->wl, "TX status unexpected NULL skb " "at slot %d (first=%d) on ring %d\n", slot, firstused, ring->index); @@ -1560,9 +1572,18 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, /* * Call back to inform the ieee80211 subsystem about - * the status of the transmission. + * the status of the transmission. When skipping over + * a missed TX status report, use a status structure + * filled with zeros to indicate that the frame was not + * sent (frame_count 0) and not acknowledged */ - frame_succeed = b43_fill_txstatus_report(dev, info, status); + if (unlikely(skip)) + txstat = &fake; + else + txstat = status; + + frame_succeed = b43_fill_txstatus_report(dev, info, + txstat); #ifdef CONFIG_B43_DEBUG if (frame_succeed) ring->nr_succeed_tx_packets++; @@ -1590,37 +1611,32 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, /* Everything unmapped and free'd. So it's not used anymore. */ ring->used_slots--; - if (meta->is_last_fragment) { + if (meta->is_last_fragment && !skip) { /* This is the last scatter-gather * fragment of the frame. We are done. */ break; } slot = next_slot(ring, slot); + if (skip > 0) + --skip; } if (ring->stopped) { B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME); + ring->stopped = false; + } + + if (dev->wl->tx_queue_stopped[ring->queue_prio]) { + dev->wl->tx_queue_stopped[ring->queue_prio] = 0; + } else { + /* If the driver queue is running wake the corresponding + * mac80211 queue. */ ieee80211_wake_queue(dev->wl->hw, ring->queue_prio); - ring->stopped = 0; if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index); } } -} - -void b43_dma_get_tx_stats(struct b43_wldev *dev, - struct ieee80211_tx_queue_stats *stats) -{ - const int nr_queues = dev->wl->hw->queues; - struct b43_dmaring *ring; - int i; - - for (i = 0; i < nr_queues; i++) { - ring = select_ring_by_priority(dev, i); - - stats[i].len = ring->used_slots / TX_SLOTS_PER_FRAME; - stats[i].limit = ring->nr_slots / TX_SLOTS_PER_FRAME; - stats[i].count = ring->nr_tx_packets; - } + /* Add work to the queue. */ + ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work); } static void dma_rx(struct b43_dmaring *ring, int *slot) @@ -1661,7 +1677,7 @@ static void dma_rx(struct b43_dmaring *ring, int *slot) dmaaddr = meta->dmaaddr; goto drop_recycle_buffer; } - if (unlikely(len > ring->rx_buffersize)) { + if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) { /* The data did not fit into one descriptor buffer * and is split over multiple buffers. * This should never happen, as we try to allocate buffers @@ -1709,6 +1725,25 @@ drop_recycle_buffer: sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize); } +void b43_dma_handle_rx_overflow(struct b43_dmaring *ring) +{ + int current_slot, previous_slot; + + B43_WARN_ON(ring->tx); + + /* Device has filled all buffers, drop all packets and let TCP + * decrease speed. + * Decrement RX index by one will let the device to see all slots + * as free again + */ + /* + *TODO: How to increase rx_drop in mac80211? + */ + current_slot = ring->ops->get_current_rxslot(ring); + previous_slot = prev_slot(ring, current_slot); + ring->ops->set_current_rxslot(ring, previous_slot); +} + void b43_dma_rx(struct b43_dmaring *ring) { const struct b43_dma_ops *ops = ring->ops; @@ -1724,6 +1759,7 @@ void b43_dma_rx(struct b43_dmaring *ring) dma_rx(ring, &slot); update_max_used_slots(ring, ++used_slots); } + wmb(); ops->set_current_rxslot(ring, slot); ring->current_slot = slot; } @@ -1760,7 +1796,6 @@ void b43_dma_tx_resume(struct b43_wldev *dev) b43_power_saving_ctl_bits(dev, 0); } -#ifdef CONFIG_B43_PIO static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type, u16 mmio_base, bool enable) { @@ -1794,4 +1829,3 @@ void b43_dma_direct_fifo_rx(struct b43_wldev *dev, mmio_base = b43_dmacontroller_base(type, engine_index); direct_fifo_rx(dev, type, mmio_base, enable); } -#endif /* CONFIG_B43_PIO */ |
