diff options
Diffstat (limited to 'drivers/net/wireless/b43/dma.c')
| -rw-r--r-- | drivers/net/wireless/b43/dma.c | 58 |
1 files changed, 32 insertions, 26 deletions
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index ca70267d8dd..1d7982afc0a 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c @@ -409,15 +409,16 @@ static inline struct b43_dmadesc_meta *meta) { if (meta->skb) { - dev_kfree_skb_any(meta->skb); + if (ring->tx) + ieee80211_free_txskb(ring->dev->wl->hw, meta->skb); + else + dev_kfree_skb_any(meta->skb); meta->skb = NULL; } } static int alloc_ringmemory(struct b43_dmaring *ring) { - gfp_t flags = GFP_KERNEL; - /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K * alignment and 8K buffers for 64-bit DMA with 8K alignment. * In practice we could use smaller buffers for the latter, but the @@ -430,14 +431,11 @@ static int alloc_ringmemory(struct b43_dmaring *ring) u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ? B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE; - ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, - ring_mem_size, &(ring->dmabase), - flags); - if (!ring->descbase) { - b43err(ring->dev->wl, "DMA ringmemory allocation failed\n"); + ring->descbase = dma_zalloc_coherent(ring->dev->dev->dma_dev, + ring_mem_size, &(ring->dmabase), + GFP_KERNEL); + if (!ring->descbase) return -ENOMEM; - } - memset(ring->descbase, 0, ring_mem_size); return 0; } @@ -890,7 +888,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, else ring->ops = &dma32_ops; if (for_tx) { - ring->tx = 1; + ring->tx = true; ring->current_slot = -1; } else { if (ring->index == 0) { @@ -1061,26 +1059,23 @@ void b43_dma_free(struct b43_wldev *dev) static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask) { u64 orig_mask = mask; - bool fallback = 0; + bool fallback = false; int err; /* Try to set the DMA mask. If it fails, try falling back to a * lower mask, as we can always also support a lower one. */ while (1) { - err = dma_set_mask(dev->dev->dma_dev, mask); - if (!err) { - err = dma_set_coherent_mask(dev->dev->dma_dev, mask); - if (!err) - break; - } + err = dma_set_mask_and_coherent(dev->dev->dma_dev, mask); + if (!err) + break; if (mask == DMA_BIT_MASK(64)) { mask = DMA_BIT_MASK(32); - fallback = 1; + fallback = true; continue; } if (mask == DMA_BIT_MASK(32)) { mask = DMA_BIT_MASK(30); - fallback = 1; + fallback = true; continue; } b43err(dev->wl, "The machine/kernel does not support " @@ -1109,7 +1104,7 @@ static bool b43_dma_translation_in_low_word(struct b43_wldev *dev, #ifdef CONFIG_B43_SSB if (dev->dev->bus_type == B43_BUS_SSB && dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI && - !(dev->dev->sdev->bus->host_pci->is_pcie && + !(pci_is_pcie(dev->dev->sdev->bus->host_pci) && ssb_read32(dev->dev->sdev, SSB_TMSHIGH) & SSB_TMSHIGH_DMA64)) return 1; #endif @@ -1307,7 +1302,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring, memset(meta, 0, sizeof(*meta)); meta->skb = skb; - meta->is_last_fragment = 1; + meta->is_last_fragment = true; priv_info->bouncebuffer = NULL; meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); @@ -1454,7 +1449,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) if (unlikely(err == -ENOKEY)) { /* Drop this packet, as we don't have the encryption key * anymore and must not transmit it unencrypted. */ - dev_kfree_skb_any(skb); + ieee80211_free_txskb(dev->wl->hw, skb); err = 0; goto out; } @@ -1465,8 +1460,10 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) if ((free_slots(ring) < TX_SLOTS_PER_FRAME) || should_inject_overflow(ring)) { /* This TX ring is full. */ - ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb)); - ring->stopped = 1; + unsigned int skb_mapping = skb_get_queue_mapping(skb); + ieee80211_stop_queue(dev->wl->hw, skb_mapping); + dev->wl->tx_queue_stopped[skb_mapping] = 1; + ring->stopped = true; if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index); } @@ -1625,12 +1622,21 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, } if (ring->stopped) { B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME); + ring->stopped = false; + } + + if (dev->wl->tx_queue_stopped[ring->queue_prio]) { + dev->wl->tx_queue_stopped[ring->queue_prio] = 0; + } else { + /* If the driver queue is running wake the corresponding + * mac80211 queue. */ ieee80211_wake_queue(dev->wl->hw, ring->queue_prio); - ring->stopped = 0; if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index); } } + /* Add work to the queue. */ + ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work); } static void dma_rx(struct b43_dmaring *ring, int *slot) |
