aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ibmveth.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ibmveth.c')
-rw-r--r--drivers/net/ibmveth.c200
1 files changed, 115 insertions, 85 deletions
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index a2c4dd4fb22..e5246f227c9 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -96,7 +96,7 @@ static void ibmveth_proc_unregister_driver(void);
static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter);
static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
-static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter*);
+static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
#ifdef CONFIG_PROC_FS
#define IBMVETH_PROC_DIR "net/ibmveth"
@@ -181,6 +181,7 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
atomic_set(&pool->available, 0);
pool->producer_index = 0;
pool->consumer_index = 0;
+ pool->active = 0;
return 0;
}
@@ -236,7 +237,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
if(lpar_rc != H_Success) {
- pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
+ pool->free_map[free_index] = index;
pool->skbuff[index] = NULL;
pool->consumer_index--;
dma_unmap_single(&adapter->vdev->dev,
@@ -255,37 +256,19 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
atomic_add(buffers_added, &(pool->available));
}
-/* check if replenishing is needed. */
-static inline int ibmveth_is_replenishing_needed(struct ibmveth_adapter *adapter)
-{
- return ((atomic_read(&adapter->rx_buff_pool[0].available) < adapter->rx_buff_pool[0].threshold) ||
- (atomic_read(&adapter->rx_buff_pool[1].available) < adapter->rx_buff_pool[1].threshold) ||
- (atomic_read(&adapter->rx_buff_pool[2].available) < adapter->rx_buff_pool[2].threshold));
-}
-
-/* kick the replenish tasklet if we need replenishing and it isn't already running */
-static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter *adapter)
-{
- if(ibmveth_is_replenishing_needed(adapter) &&
- (atomic_dec_if_positive(&adapter->not_replenishing) == 0)) {
- schedule_work(&adapter->replenish_task);
- }
-}
-
-/* replenish tasklet routine */
+/* replenish routine */
static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
{
+ int i;
+
adapter->replenish_task_cycles++;
- ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[0]);
- ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[1]);
- ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[2]);
+ for(i = 0; i < IbmVethNumBufferPools; i++)
+ if(adapter->rx_buff_pool[i].active)
+ ibmveth_replenish_buffer_pool(adapter,
+ &adapter->rx_buff_pool[i]);
adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
-
- atomic_inc(&adapter->not_replenishing);
-
- ibmveth_schedule_replenishing(adapter);
}
/* empty and free ana buffer pool - also used to do cleanup in error paths */
@@ -293,10 +276,8 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibm
{
int i;
- if(pool->free_map) {
- kfree(pool->free_map);
- pool->free_map = NULL;
- }
+ kfree(pool->free_map);
+ pool->free_map = NULL;
if(pool->skbuff && pool->dma_addr) {
for(i = 0; i < pool->size; ++i) {
@@ -321,6 +302,7 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibm
kfree(pool->skbuff);
pool->skbuff = NULL;
}
+ pool->active = 0;
}
/* remove a buffer from a pool */
@@ -379,6 +361,12 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
ibmveth_assert(pool < IbmVethNumBufferPools);
ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
+ if(!adapter->rx_buff_pool[pool].active) {
+ ibmveth_rxq_harvest_buffer(adapter);
+ ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
+ return;
+ }
+
desc.desc = 0;
desc.fields.valid = 1;
desc.fields.length = adapter->rx_buff_pool[pool].buff_size;
@@ -409,6 +397,8 @@ static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
{
+ int i;
+
if(adapter->buffer_list_addr != NULL) {
if(!dma_mapping_error(adapter->buffer_list_dma)) {
dma_unmap_single(&adapter->vdev->dev,
@@ -443,26 +433,24 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
adapter->rx_queue.queue_addr = NULL;
}
- ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[0]);
- ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[1]);
- ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[2]);
+ for(i = 0; i<IbmVethNumBufferPools; i++)
+ ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[i]);
}
static int ibmveth_open(struct net_device *netdev)
{
struct ibmveth_adapter *adapter = netdev->priv;
u64 mac_address = 0;
- int rxq_entries;
+ int rxq_entries = 1;
unsigned long lpar_rc;
int rc;
union ibmveth_buf_desc rxq_desc;
+ int i;
ibmveth_debug_printk("open starting\n");
- rxq_entries =
- adapter->rx_buff_pool[0].size +
- adapter->rx_buff_pool[1].size +
- adapter->rx_buff_pool[2].size + 1;
+ for(i = 0; i<IbmVethNumBufferPools; i++)
+ rxq_entries += adapter->rx_buff_pool[i].size;
adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
@@ -502,14 +490,8 @@ static int ibmveth_open(struct net_device *netdev)
adapter->rx_queue.num_slots = rxq_entries;
adapter->rx_queue.toggle = 1;
- if(ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[0]) ||
- ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[1]) ||
- ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[2]))
- {
- ibmveth_error_printk("unable to allocate buffer pools\n");
- ibmveth_cleanup(adapter);
- return -ENOMEM;
- }
+ /* call change_mtu to init the buffer pools based in initial mtu */
+ ibmveth_change_mtu(netdev, netdev->mtu);
memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
mac_address = mac_address >> 16;
@@ -532,7 +514,7 @@ static int ibmveth_open(struct net_device *netdev)
if(lpar_rc != H_Success) {
ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc);
- ibmveth_error_printk("buffer TCE:0x%x filter TCE:0x%x rxq desc:0x%lx MAC:0x%lx\n",
+ ibmveth_error_printk("buffer TCE:0x%lx filter TCE:0x%lx rxq desc:0x%lx MAC:0x%lx\n",
adapter->buffer_list_dma,
adapter->filter_list_dma,
rxq_desc.desc,
@@ -552,10 +534,10 @@ static int ibmveth_open(struct net_device *netdev)
return rc;
}
- netif_start_queue(netdev);
+ ibmveth_debug_printk("initial replenish cycle\n");
+ ibmveth_replenish_task(adapter);
- ibmveth_debug_printk("scheduling initial replenish cycle\n");
- ibmveth_schedule_replenishing(adapter);
+ netif_start_queue(netdev);
ibmveth_debug_printk("open complete\n");
@@ -573,9 +555,6 @@ static int ibmveth_close(struct net_device *netdev)
free_irq(netdev->irq, netdev);
- cancel_delayed_work(&adapter->replenish_task);
- flush_scheduled_work();
-
do {
lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
} while (H_isLongBusy(lpar_rc) || (lpar_rc == H_Busy));
@@ -640,12 +619,18 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
unsigned long lpar_rc;
int nfrags = 0, curfrag;
unsigned long correlator;
+ unsigned long flags;
unsigned int retry_count;
+ unsigned int tx_dropped = 0;
+ unsigned int tx_bytes = 0;
+ unsigned int tx_packets = 0;
+ unsigned int tx_send_failed = 0;
+ unsigned int tx_map_failed = 0;
+
if ((skb_shinfo(skb)->nr_frags + 1) > IbmVethMaxSendFrags) {
- adapter->stats.tx_dropped++;
- dev_kfree_skb(skb);
- return 0;
+ tx_dropped++;
+ goto out;
}
memset(&desc, 0, sizeof(desc));
@@ -664,10 +649,9 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
if(dma_mapping_error(desc[0].fields.address)) {
ibmveth_error_printk("tx: unable to map initial fragment\n");
- adapter->tx_map_failed++;
- adapter->stats.tx_dropped++;
- dev_kfree_skb(skb);
- return 0;
+ tx_map_failed++;
+ tx_dropped++;
+ goto out;
}
curfrag = nfrags;
@@ -684,8 +668,8 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
if(dma_mapping_error(desc[curfrag+1].fields.address)) {
ibmveth_error_printk("tx: unable to map fragment %d\n", curfrag);
- adapter->tx_map_failed++;
- adapter->stats.tx_dropped++;
+ tx_map_failed++;
+ tx_dropped++;
/* Free all the mappings we just created */
while(curfrag < nfrags) {
dma_unmap_single(&adapter->vdev->dev,
@@ -694,8 +678,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
DMA_TO_DEVICE);
curfrag++;
}
- dev_kfree_skb(skb);
- return 0;
+ goto out;
}
}
@@ -720,11 +703,12 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
ibmveth_error_printk("tx: desc[%i] valid=%d, len=%d, address=0x%d\n", i,
desc[i].fields.valid, desc[i].fields.length, desc[i].fields.address);
}
- adapter->tx_send_failed++;
- adapter->stats.tx_dropped++;
+ tx_send_failed++;
+ tx_dropped++;
} else {
- adapter->stats.tx_packets++;
- adapter->stats.tx_bytes += skb->len;
+ tx_packets++;
+ tx_bytes += skb->len;
+ netdev->trans_start = jiffies;
}
do {
@@ -733,6 +717,14 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
desc[nfrags].fields.length, DMA_TO_DEVICE);
} while(--nfrags >= 0);
+out: spin_lock_irqsave(&adapter->stats_lock, flags);
+ adapter->stats.tx_dropped += tx_dropped;
+ adapter->stats.tx_bytes += tx_bytes;
+ adapter->stats.tx_packets += tx_packets;
+ adapter->tx_send_failed += tx_send_failed;
+ adapter->tx_map_failed += tx_map_failed;
+ spin_unlock_irqrestore(&adapter->stats_lock, flags);
+
dev_kfree_skb(skb);
return 0;
}
@@ -776,13 +768,14 @@ static int ibmveth_poll(struct net_device *netdev, int *budget)
adapter->stats.rx_packets++;
adapter->stats.rx_bytes += length;
frames_processed++;
+ netdev->last_rx = jiffies;
}
} else {
more_work = 0;
}
} while(more_work && (frames_processed < max_frames_to_process));
- ibmveth_schedule_replenishing(adapter);
+ ibmveth_replenish_task(adapter);
if(more_work) {
/* more work to do - return that we are not done yet */
@@ -883,17 +876,54 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
{
- if ((new_mtu < 68) || (new_mtu > (1<<20)))
+ struct ibmveth_adapter *adapter = dev->priv;
+ int i;
+ int prev_smaller = 1;
+
+ if ((new_mtu < 68) ||
+ (new_mtu > (pool_size[IbmVethNumBufferPools-1]) - IBMVETH_BUFF_OH))
return -EINVAL;
+
+ for(i = 0; i<IbmVethNumBufferPools; i++) {
+ int activate = 0;
+ if (new_mtu > (pool_size[i] - IBMVETH_BUFF_OH)) {
+ activate = 1;
+ prev_smaller= 1;
+ } else {
+ if (prev_smaller)
+ activate = 1;
+ prev_smaller= 0;
+ }
+
+ if (activate && !adapter->rx_buff_pool[i].active) {
+ struct ibmveth_buff_pool *pool =
+ &adapter->rx_buff_pool[i];
+ if(ibmveth_alloc_buffer_pool(pool)) {
+ ibmveth_error_printk("unable to alloc pool\n");
+ return -ENOMEM;
+ }
+ adapter->rx_buff_pool[i].active = 1;
+ } else if (!activate && adapter->rx_buff_pool[i].active) {
+ adapter->rx_buff_pool[i].active = 0;
+ h_free_logical_lan_buffer(adapter->vdev->unit_address,
+ (u64)pool_size[i]);
+ }
+
+ }
+
+ /* kick the interrupt handler so that the new buffer pools get
+ replenished or deallocated */
+ ibmveth_interrupt(dev->irq, dev, NULL);
+
dev->mtu = new_mtu;
return 0;
}
static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
{
- int rc;
+ int rc, i;
struct net_device *netdev;
- struct ibmveth_adapter *adapter;
+ struct ibmveth_adapter *adapter = NULL;
unsigned char *mac_addr_p;
unsigned int *mcastFilterSize_p;
@@ -960,23 +990,21 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
netdev->ethtool_ops = &netdev_ethtool_ops;
netdev->change_mtu = ibmveth_change_mtu;
SET_NETDEV_DEV(netdev, &dev->dev);
+ netdev->features |= NETIF_F_LLTX;
+ spin_lock_init(&adapter->stats_lock);
memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
- ibmveth_init_buffer_pool(&adapter->rx_buff_pool[0], 0, IbmVethPool0DftCnt, IbmVethPool0DftSize);
- ibmveth_init_buffer_pool(&adapter->rx_buff_pool[1], 1, IbmVethPool1DftCnt, IbmVethPool1DftSize);
- ibmveth_init_buffer_pool(&adapter->rx_buff_pool[2], 2, IbmVethPool2DftCnt, IbmVethPool2DftSize);
+ for(i = 0; i<IbmVethNumBufferPools; i++)
+ ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
+ pool_count[i], pool_size[i]);
ibmveth_debug_printk("adapter @ 0x%p\n", adapter);
- INIT_WORK(&adapter->replenish_task, (void*)ibmveth_replenish_task, (void*)adapter);
-
adapter->buffer_list_dma = DMA_ERROR_CODE;
adapter->filter_list_dma = DMA_ERROR_CODE;
adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
- atomic_set(&adapter->not_replenishing, 1);
-
ibmveth_debug_printk("registering netdev...\n");
rc = register_netdev(netdev);
@@ -1146,14 +1174,16 @@ static struct vio_device_id ibmveth_device_table[] __devinitdata= {
{ "network", "IBM,l-lan"},
{ "", "" }
};
-
MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
static struct vio_driver ibmveth_driver = {
- .name = (char *)ibmveth_driver_name,
- .id_table = ibmveth_device_table,
- .probe = ibmveth_probe,
- .remove = ibmveth_remove
+ .id_table = ibmveth_device_table,
+ .probe = ibmveth_probe,
+ .remove = ibmveth_remove,
+ .driver = {
+ .name = ibmveth_driver_name,
+ .owner = THIS_MODULE,
+ }
};
static int __init ibmveth_module_init(void)