aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/ibm/ehea/ehea_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/ibm/ehea/ehea_main.c')
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c684
1 files changed, 245 insertions, 439 deletions
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index dfefe809c48..a0b418e007a 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -1,5 +1,5 @@
/*
- * linux/drivers/net/ehea/ehea_main.c
+ * linux/drivers/net/ethernet/ibm/ehea/ehea_main.c
*
* eHEA ethernet device driver for IBM eServer System p
*
@@ -28,6 +28,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/device.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/tcp.h>
@@ -61,10 +62,7 @@ static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
static int sq_entries = EHEA_DEF_ENTRIES_SQ;
-static int use_mcs;
-static int use_lro;
-static int lro_max_aggr = EHEA_LRO_MAX_AGGR;
-static int num_tx_qps = EHEA_NUM_TX_QP;
+static int use_mcs = 1;
static int prop_carrier_state;
module_param(msg_level, int, 0);
@@ -74,45 +72,49 @@ module_param(rq3_entries, int, 0);
module_param(sq_entries, int, 0);
module_param(prop_carrier_state, int, 0);
module_param(use_mcs, int, 0);
-module_param(use_lro, int, 0);
-module_param(lro_max_aggr, int, 0);
-module_param(num_tx_qps, int, 0);
-MODULE_PARM_DESC(num_tx_qps, "Number of TX-QPS");
MODULE_PARM_DESC(msg_level, "msg_level");
MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
"port to stack. 1:yes, 0:no. Default = 0 ");
MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
- "[2^x - 1], x = [6..14]. Default = "
+ "[2^x - 1], x = [7..14]. Default = "
__MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
- "[2^x - 1], x = [6..14]. Default = "
+ "[2^x - 1], x = [7..14]. Default = "
__MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
- "[2^x - 1], x = [6..14]. Default = "
+ "[2^x - 1], x = [7..14]. Default = "
__MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
- "[2^x - 1], x = [6..14]. Default = "
+ "[2^x - 1], x = [7..14]. Default = "
__MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
-MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 0 ");
-
-MODULE_PARM_DESC(lro_max_aggr, " LRO: Max packets to be aggregated. Default = "
- __MODULE_STRING(EHEA_LRO_MAX_AGGR));
-MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, "
- "Default = 0");
+MODULE_PARM_DESC(use_mcs, " Multiple receive queues, 1: enable, 0: disable, "
+ "Default = 1");
static int port_name_cnt;
static LIST_HEAD(adapter_list);
static unsigned long ehea_driver_flags;
static DEFINE_MUTEX(dlpar_mem_lock);
-struct ehea_fw_handle_array ehea_fw_handles;
-struct ehea_bcmc_reg_array ehea_bcmc_regs;
+static struct ehea_fw_handle_array ehea_fw_handles;
+static struct ehea_bcmc_reg_array ehea_bcmc_regs;
-static int __devinit ehea_probe_adapter(struct platform_device *dev,
- const struct of_device_id *id);
+static int ehea_probe_adapter(struct platform_device *dev);
-static int __devexit ehea_remove(struct platform_device *dev);
+static int ehea_remove(struct platform_device *dev);
+
+static struct of_device_id ehea_module_device_table[] = {
+ {
+ .name = "lhea",
+ .compatible = "IBM,lhea",
+ },
+ {
+ .type = "network",
+ .compatible = "IBM,lhea-ethernet",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ehea_module_device_table);
static struct of_device_id ehea_device_table[] = {
{
@@ -121,9 +123,8 @@ static struct of_device_id ehea_device_table[] = {
},
{},
};
-MODULE_DEVICE_TABLE(of, ehea_device_table);
-static struct of_platform_driver ehea_driver = {
+static struct platform_driver ehea_driver = {
.driver = {
.name = "ehea",
.owner = THIS_MODULE,
@@ -144,7 +145,7 @@ void ehea_dump(void *adr, int len, char *msg)
}
}
-void ehea_schedule_port_reset(struct ehea_port *port)
+static void ehea_schedule_port_reset(struct ehea_port *port)
{
if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
schedule_work(&port->reset_task);
@@ -173,7 +174,7 @@ static void ehea_update_firmware_handles(void)
continue;
num_ports++;
- num_portres += port->num_def_qps + port->num_add_tx_qps;
+ num_portres += port->num_def_qps;
}
}
@@ -199,9 +200,7 @@ static void ehea_update_firmware_handles(void)
(num_ports == 0))
continue;
- for (l = 0;
- l < port->num_def_qps + port->num_add_tx_qps;
- l++) {
+ for (l = 0; l < port->num_def_qps; l++) {
struct ehea_port_res *pr = &port->port_res[l];
arr[i].adh = adapter->handle;
@@ -303,16 +302,18 @@ static void ehea_update_bcmc_registrations(void)
arr[i].adh = adapter->handle;
arr[i].port_id = port->logical_port_id;
- arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
- EHEA_BCMC_MULTICAST |
+ arr[i].reg_type = EHEA_BCMC_MULTICAST |
EHEA_BCMC_UNTAGGED;
+ if (mc_entry->macaddr == 0)
+ arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
arr[i++].macaddr = mc_entry->macaddr;
arr[i].adh = adapter->handle;
arr[i].port_id = port->logical_port_id;
- arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
- EHEA_BCMC_MULTICAST |
+ arr[i].reg_type = EHEA_BCMC_MULTICAST |
EHEA_BCMC_VLANID_ALL;
+ if (mc_entry->macaddr == 0)
+ arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
arr[i++].macaddr = mc_entry->macaddr;
num_registrations -= 2;
}
@@ -327,10 +328,10 @@ out:
spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
}
-static struct net_device_stats *ehea_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *ehea_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct ehea_port *port = netdev_priv(dev);
- struct net_device_stats *stats = &port->stats;
u64 rx_packets = 0, tx_packets = 0, rx_bytes = 0, tx_bytes = 0;
int i;
@@ -339,7 +340,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
rx_bytes += port->port_res[i].rx_bytes;
}
- for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ for (i = 0; i < port->num_def_qps; i++) {
tx_packets += port->port_res[i].tx_packets;
tx_bytes += port->port_res[i].tx_bytes;
}
@@ -349,7 +350,9 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
stats->tx_bytes = tx_bytes;
stats->rx_packets = rx_packets;
- return &port->stats;
+ stats->multicast = port->stats.multicast;
+ stats->rx_errors = port->stats.rx_errors;
+ return stats;
}
static void ehea_update_stats(struct work_struct *work)
@@ -357,7 +360,7 @@ static void ehea_update_stats(struct work_struct *work)
struct ehea_port *port =
container_of(work, struct ehea_port, stats_work.work);
struct net_device *dev = port->netdev;
- struct net_device_stats *stats = &port->stats;
+ struct rtnl_link_stats64 *stats = &port->stats;
struct hcp_ehea_port_cb2 *cb2;
u64 hret;
@@ -384,7 +387,8 @@ static void ehea_update_stats(struct work_struct *work)
out_herr:
free_page((unsigned long)cb2);
resched:
- schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
+ schedule_delayed_work(&port->stats_work,
+ round_jiffies_relative(msecs_to_jiffies(1000)));
}
static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
@@ -410,7 +414,6 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
skb_arr_rq1[index] = netdev_alloc_skb(dev,
EHEA_L_PKT_SIZE);
if (!skb_arr_rq1[index]) {
- netdev_info(dev, "Unable to allocate enough skb in the array\n");
pr->rq1_skba.os_skbs = fill_wqes - i;
break;
}
@@ -440,10 +443,8 @@ static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
for (i = 0; i < nr_rq1a; i++) {
skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
- if (!skb_arr_rq1[i]) {
- netdev_info(dev, "Not enough memory to allocate skb array\n");
+ if (!skb_arr_rq1[i])
break;
- }
}
/* Ring doorbell */
ehea_update_rq1a(pr->qp, i - 1);
@@ -490,7 +491,7 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr,
skb_arr[index] = skb;
tmp_addr = ehea_map_vaddr(skb->data);
if (tmp_addr == -1) {
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
q_skba->os_skbs = fill_wqes - i;
ret = 0;
break;
@@ -551,7 +552,8 @@ static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
}
static inline void ehea_fill_skb(struct net_device *dev,
- struct sk_buff *skb, struct ehea_cqe *cqe)
+ struct sk_buff *skb, struct ehea_cqe *cqe,
+ struct ehea_port_res *pr)
{
int length = cqe->num_bytes_transfered - 4; /*remove CRC */
@@ -565,6 +567,8 @@ static inline void ehea_fill_skb(struct net_device *dev,
skb->csum = csum_unfold(~cqe->inet_checksum_value);
} else
skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ skb_record_rx_queue(skb, pr - &pr->port->port_res[0]);
}
static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
@@ -657,49 +661,6 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
return 0;
}
-static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
- void **tcph, u64 *hdr_flags, void *priv)
-{
- struct ehea_cqe *cqe = priv;
- unsigned int ip_len;
- struct iphdr *iph;
-
- /* non tcp/udp packets */
- if (!cqe->header_length)
- return -1;
-
- /* non tcp packet */
- skb_reset_network_header(skb);
- iph = ip_hdr(skb);
- if (iph->protocol != IPPROTO_TCP)
- return -1;
-
- ip_len = ip_hdrlen(skb);
- skb_set_transport_header(skb, ip_len);
- *tcph = tcp_hdr(skb);
-
- /* check if ip header and tcp header are complete */
- if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
- return -1;
-
- *hdr_flags = LRO_IPV4 | LRO_TCP;
- *iphdr = iph;
-
- return 0;
-}
-
-static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe,
- struct sk_buff *skb)
-{
- if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
- __vlan_hwaccel_put_tag(skb, cqe->vlan_tag);
-
- if (skb->dev->features & NETIF_F_LRO)
- lro_receive_skb(&pr->lro_mgr, skb, cqe);
- else
- netif_receive_skb(skb);
-}
-
static int ehea_proc_rwqes(struct net_device *dev,
struct ehea_port_res *pr,
int budget)
@@ -743,14 +704,12 @@ static int ehea_proc_rwqes(struct net_device *dev,
skb = netdev_alloc_skb(dev,
EHEA_L_PKT_SIZE);
- if (!skb) {
- netdev_err(dev, "Not enough memory to allocate skb\n");
+ if (!skb)
break;
- }
}
skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
cqe->num_bytes_transfered - 4);
- ehea_fill_skb(dev, skb, cqe);
+ ehea_fill_skb(dev, skb, cqe, pr);
} else if (rq == 2) {
/* RQ2 */
skb = get_skb_by_index(skb_arr_rq2,
@@ -760,7 +719,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
"rq2: skb=NULL\n");
break;
}
- ehea_fill_skb(dev, skb, cqe);
+ ehea_fill_skb(dev, skb, cqe, pr);
processed_rq2++;
} else {
/* RQ3 */
@@ -771,12 +730,17 @@ static int ehea_proc_rwqes(struct net_device *dev,
"rq3: skb=NULL\n");
break;
}
- ehea_fill_skb(dev, skb, cqe);
+ ehea_fill_skb(dev, skb, cqe, pr);
processed_rq3++;
}
processed_bytes += skb->len;
- ehea_proc_skb(pr, cqe, skb);
+
+ if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ cqe->vlan_tag);
+
+ napi_gro_receive(&pr->napi, skb);
} else {
pr->p_stats.poll_receive_errors++;
port_reset = ehea_treat_poll_error(pr, rq, cqe,
@@ -787,8 +751,6 @@ static int ehea_proc_rwqes(struct net_device *dev,
}
cqe = ehea_poll_rq1(qp, &wqe_index);
}
- if (dev->features & NETIF_F_LRO)
- lro_flush_all(&pr->lro_mgr);
pr->rx_packets += processed;
pr->rx_bytes += processed_bytes;
@@ -806,7 +768,7 @@ static void reset_sq_restart_flag(struct ehea_port *port)
{
int i;
- for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ for (i = 0; i < port->num_def_qps; i++) {
struct ehea_port_res *pr = &port->port_res[i];
pr->sq_restart_flag = 0;
}
@@ -819,7 +781,7 @@ static void check_sqs(struct ehea_port *port)
int swqe_index;
int i, k;
- for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ for (i = 0; i < port->num_def_qps; i++) {
struct ehea_port_res *pr = &port->port_res[i];
int ret;
k = 0;
@@ -857,7 +819,8 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
int cqe_counter = 0;
int swqe_av = 0;
int index;
- unsigned long flags;
+ struct netdev_queue *txq = netdev_get_tx_queue(pr->port->netdev,
+ pr - &pr->port->port_res[0]);
cqe = ehea_poll_cq(send_cq);
while (cqe && (quota > 0)) {
@@ -894,7 +857,7 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
skb = pr->sq_skba.arr[index];
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
pr->sq_skba.arr[index] = NULL;
}
@@ -907,20 +870,20 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
ehea_update_feca(send_cq, cqe_counter);
atomic_add(swqe_av, &pr->swqe_avail);
- spin_lock_irqsave(&pr->netif_queue, flags);
-
- if (pr->queue_stopped && (atomic_read(&pr->swqe_avail)
- >= pr->swqe_refill_th)) {
- netif_wake_queue(pr->port->netdev);
- pr->queue_stopped = 0;
+ if (unlikely(netif_tx_queue_stopped(txq) &&
+ (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))) {
+ __netif_tx_lock(txq, smp_processor_id());
+ if (netif_tx_queue_stopped(txq) &&
+ (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))
+ netif_tx_wake_queue(txq);
+ __netif_tx_unlock(txq);
}
- spin_unlock_irqrestore(&pr->netif_queue, flags);
+
wake_up(&pr->port->swqe_avail_wq);
return cqe;
}
-#define EHEA_NAPI_POLL_NUM_BEFORE_IRQ 16
#define EHEA_POLL_MAX_CQES 65535
static int ehea_poll(struct napi_struct *napi, int budget)
@@ -930,18 +893,13 @@ static int ehea_poll(struct napi_struct *napi, int budget)
struct net_device *dev = pr->port->netdev;
struct ehea_cqe *cqe;
struct ehea_cqe *cqe_skb = NULL;
- int force_irq, wqe_index;
+ int wqe_index;
int rx = 0;
- force_irq = (pr->poll_counter > EHEA_NAPI_POLL_NUM_BEFORE_IRQ);
cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
+ rx += ehea_proc_rwqes(dev, pr, budget - rx);
- if (!force_irq)
- rx += ehea_proc_rwqes(dev, pr, budget - rx);
-
- while ((rx != budget) || force_irq) {
- pr->poll_counter = 0;
- force_irq = 0;
+ while (rx != budget) {
napi_complete(napi);
ehea_reset_cq_ep(pr->recv_cq);
ehea_reset_cq_ep(pr->send_cq);
@@ -961,7 +919,6 @@ static int ehea_poll(struct napi_struct *napi, int budget)
rx += ehea_proc_rwqes(dev, pr, budget - rx);
}
- pr->poll_counter++;
return rx;
}
@@ -1113,13 +1070,6 @@ int ehea_sense_port_attr(struct ehea_port *port)
goto out_free;
}
- port->num_tx_qps = num_tx_qps;
-
- if (port->num_def_qps >= port->num_tx_qps)
- port->num_add_tx_qps = 0;
- else
- port->num_add_tx_qps = port->num_tx_qps - port->num_def_qps;
-
ret = 0;
out_free:
if (ret || netif_msg_probe(port))
@@ -1251,7 +1201,7 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
netif_info(port, link, dev,
"Logical port down\n");
netif_carrier_off(dev);
- netif_stop_queue(dev);
+ netif_tx_disable(dev);
}
if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
@@ -1282,7 +1232,7 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
case EHEA_EC_PORT_MALFUNC:
netdev_info(dev, "Port malfunction\n");
netif_carrier_off(dev);
- netif_stop_queue(dev);
+ netif_tx_disable(dev);
break;
default:
netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe);
@@ -1348,7 +1298,7 @@ static int ehea_reg_interrupts(struct net_device *dev)
ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
ehea_qp_aff_irq_handler,
- IRQF_DISABLED, port->int_aff_name, port);
+ 0, port->int_aff_name, port);
if (ret) {
netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n",
port->qp_eq->attr.ist1);
@@ -1360,14 +1310,13 @@ static int ehea_reg_interrupts(struct net_device *dev)
port->qp_eq->attr.ist1);
- for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ for (i = 0; i < port->num_def_qps; i++) {
pr = &port->port_res[i];
snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
"%s-queue%d", dev->name, i);
ret = ibmebus_request_irq(pr->eq->attr.ist1,
ehea_recv_irq_handler,
- IRQF_DISABLED, pr->int_send_name,
- pr);
+ 0, pr->int_send_name, pr);
if (ret) {
netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
i, pr->eq->attr.ist1);
@@ -1403,7 +1352,7 @@ static void ehea_free_interrupts(struct net_device *dev)
/* send */
- for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ for (i = 0; i < port->num_def_qps; i++) {
pr = &port->port_res[i];
ibmebus_free_irq(pr->eq->attr.ist1, pr);
netif_info(port, intr, dev,
@@ -1466,7 +1415,7 @@ out:
return ret;
}
-int ehea_gen_smrs(struct ehea_port_res *pr)
+static int ehea_gen_smrs(struct ehea_port_res *pr)
{
int ret;
struct ehea_adapter *adapter = pr->port->adapter;
@@ -1488,7 +1437,7 @@ out:
return -EIO;
}
-int ehea_rem_smrs(struct ehea_port_res *pr)
+static int ehea_rem_smrs(struct ehea_port_res *pr)
{
if ((ehea_rem_mr(&pr->send_mr)) ||
(ehea_rem_mr(&pr->recv_mr)))
@@ -1534,8 +1483,6 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
pr->rx_packets = rx_packets;
pr->port = port;
- spin_lock_init(&pr->xmit_lock);
- spin_lock_init(&pr->netif_queue);
pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
if (!pr->eq) {
@@ -1626,15 +1573,6 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
- pr->lro_mgr.max_aggr = pr->port->lro_max_aggr;
- pr->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
- pr->lro_mgr.lro_arr = pr->lro_desc;
- pr->lro_mgr.get_skb_header = get_skb_hdr;
- pr->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
- pr->lro_mgr.dev = port->netdev;
- pr->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
- pr->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
-
ret = 0;
goto out;
@@ -1691,96 +1629,35 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
return ret;
}
-/*
- * The write_* functions store information in swqe which is used by
- * the hardware to calculate the ip/tcp/udp checksum
- */
-
-static inline void write_ip_start_end(struct ehea_swqe *swqe,
- const struct sk_buff *skb)
-{
- swqe->ip_start = skb_network_offset(skb);
- swqe->ip_end = (u8)(swqe->ip_start + ip_hdrlen(skb) - 1);
-}
-
-static inline void write_tcp_offset_end(struct ehea_swqe *swqe,
- const struct sk_buff *skb)
-{
- swqe->tcp_offset =
- (u8)(swqe->ip_end + 1 + offsetof(struct tcphdr, check));
-
- swqe->tcp_end = (u16)skb->len - 1;
-}
-
-static inline void write_udp_offset_end(struct ehea_swqe *swqe,
- const struct sk_buff *skb)
-{
- swqe->tcp_offset =
- (u8)(swqe->ip_end + 1 + offsetof(struct udphdr, check));
-
- swqe->tcp_end = (u16)skb->len - 1;
-}
-
-
-static void write_swqe2_TSO(struct sk_buff *skb,
- struct ehea_swqe *swqe, u32 lkey)
-{
- struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
- u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
- int skb_data_size = skb_headlen(skb);
- int headersize;
-
- /* Packet is TCP with TSO enabled */
- swqe->tx_control |= EHEA_SWQE_TSO;
- swqe->mss = skb_shinfo(skb)->gso_size;
- /* copy only eth/ip/tcp headers to immediate data and
- * the rest of skb->data to sg1entry
- */
- headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
-
- skb_data_size = skb_headlen(skb);
-
- if (skb_data_size >= headersize) {
- /* copy immediate data */
- skb_copy_from_linear_data(skb, imm_data, headersize);
- swqe->immediate_data_length = headersize;
-
- if (skb_data_size > headersize) {
- /* set sg1entry data */
- sg1entry->l_key = lkey;
- sg1entry->len = skb_data_size - headersize;
- sg1entry->vaddr =
- ehea_map_vaddr(skb->data + headersize);
- swqe->descriptors++;
- }
- } else
- pr_err("cannot handle fragmented headers\n");
-}
-
-static void write_swqe2_nonTSO(struct sk_buff *skb,
- struct ehea_swqe *swqe, u32 lkey)
+static void write_swqe2_immediate(struct sk_buff *skb, struct ehea_swqe *swqe,
+ u32 lkey)
{
int skb_data_size = skb_headlen(skb);
u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
+ unsigned int immediate_len = SWQE2_MAX_IMM;
- /* Packet is any nonTSO type
- *
- * Copy as much as possible skb->data to immediate data and
- * the rest to sg1entry
- */
- if (skb_data_size >= SWQE2_MAX_IMM) {
- /* copy immediate data */
- skb_copy_from_linear_data(skb, imm_data, SWQE2_MAX_IMM);
+ swqe->descriptors = 0;
- swqe->immediate_data_length = SWQE2_MAX_IMM;
+ if (skb_is_gso(skb)) {
+ swqe->tx_control |= EHEA_SWQE_TSO;
+ swqe->mss = skb_shinfo(skb)->gso_size;
+ /*
+ * For TSO packets we only copy the headers into the
+ * immediate area.
+ */
+ immediate_len = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
+ }
- if (skb_data_size > SWQE2_MAX_IMM) {
- /* copy sg1entry data */
+ if (skb_is_gso(skb) || skb_data_size >= SWQE2_MAX_IMM) {
+ skb_copy_from_linear_data(skb, imm_data, immediate_len);
+ swqe->immediate_data_length = immediate_len;
+
+ if (skb_data_size > immediate_len) {
sg1entry->l_key = lkey;
- sg1entry->len = skb_data_size - SWQE2_MAX_IMM;
+ sg1entry->len = skb_data_size - immediate_len;
sg1entry->vaddr =
- ehea_map_vaddr(skb->data + SWQE2_MAX_IMM);
+ ehea_map_vaddr(skb->data + immediate_len);
swqe->descriptors++;
}
} else {
@@ -1799,13 +1676,9 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
nfrags = skb_shinfo(skb)->nr_frags;
sg1entry = &swqe->u.immdata_desc.sg_entry;
sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
- swqe->descriptors = 0;
sg1entry_contains_frag_data = 0;
- if ((dev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size)
- write_swqe2_TSO(skb, swqe, lkey);
- else
- write_swqe2_nonTSO(skb, swqe, lkey);
+ write_swqe2_immediate(skb, swqe, lkey);
/* write descriptors */
if (nfrags > 0) {
@@ -1815,7 +1688,7 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
/* copy sg1entry data */
sg1entry->l_key = lkey;
- sg1entry->len = frag->size;
+ sg1entry->len = skb_frag_size(frag);
sg1entry->vaddr =
ehea_map_vaddr(skb_frag_address(frag));
swqe->descriptors++;
@@ -1828,7 +1701,7 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
sgentry = &sg_list[i - sg1entry_contains_frag_data];
sgentry->l_key = lkey;
- sgentry->len = frag->size;
+ sgentry->len = skb_frag_size(frag);
sgentry->vaddr = ehea_map_vaddr(skb_frag_address(frag));
swqe->descriptors++;
}
@@ -1974,8 +1847,9 @@ static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
u64 hret;
u8 reg_type;
- reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
- | EHEA_BCMC_UNTAGGED;
+ reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_UNTAGGED;
+ if (mc_mac_addr == 0)
+ reg_type |= EHEA_BCMC_SCOPE_ALL;
hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
port->logical_port_id,
@@ -1983,8 +1857,9 @@ static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
if (hret)
goto out;
- reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
- | EHEA_BCMC_VLANID_ALL;
+ reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_VLANID_ALL;
+ if (mc_mac_addr == 0)
+ reg_type |= EHEA_BCMC_SCOPE_ALL;
hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
port->logical_port_id,
@@ -2034,7 +1909,7 @@ static void ehea_allmulti(struct net_device *dev, int enable)
netdev_err(dev,
"failed enabling IFF_ALLMULTI\n");
}
- } else
+ } else {
if (!enable) {
/* Disable ALLMULTI */
hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
@@ -2044,6 +1919,7 @@ static void ehea_allmulti(struct net_device *dev, int enable)
netdev_err(dev,
"failed disabling IFF_ALLMULTI\n");
}
+ }
}
static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
@@ -2052,10 +1928,8 @@ static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
u64 hret;
ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
- if (!ehea_mcl_entry) {
- pr_err("no mem for mcl_entry\n");
+ if (!ehea_mcl_entry)
return;
- }
INIT_LIST_HEAD(&ehea_mcl_entry->list);
@@ -2077,11 +1951,7 @@ static void ehea_set_multicast_list(struct net_device *dev)
struct netdev_hw_addr *ha;
int ret;
- if (port->promisc) {
- ehea_promiscuous(dev, 1);
- return;
- }
- ehea_promiscuous(dev, 0);
+ ehea_promiscuous(dev, !!(dev->flags & IFF_PROMISC));
if (dev->flags & IFF_ALLMULTI) {
ehea_allmulti(dev, 1);
@@ -2120,41 +1990,44 @@ static int ehea_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
- struct ehea_swqe *swqe, u32 lkey)
+static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe)
{
- if (skb->protocol == htons(ETH_P_IP)) {
- const struct iphdr *iph = ip_hdr(skb);
+ swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC;
- /* IPv4 */
- swqe->tx_control |= EHEA_SWQE_CRC
- | EHEA_SWQE_IP_CHECKSUM
- | EHEA_SWQE_TCP_CHECKSUM
- | EHEA_SWQE_IMM_DATA_PRESENT
- | EHEA_SWQE_DESCRIPTORS_PRESENT;
+ if (skb->protocol != htons(ETH_P_IP))
+ return;
- write_ip_start_end(swqe, skb);
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ swqe->tx_control |= EHEA_SWQE_IP_CHECKSUM;
- if (iph->protocol == IPPROTO_UDP) {
- if ((iph->frag_off & IP_MF) ||
- (iph->frag_off & IP_OFFSET))
- /* IP fragment, so don't change cs */
- swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM;
- else
- write_udp_offset_end(swqe, skb);
- } else if (iph->protocol == IPPROTO_TCP) {
- write_tcp_offset_end(swqe, skb);
- }
+ swqe->ip_start = skb_network_offset(skb);
+ swqe->ip_end = swqe->ip_start + ip_hdrlen(skb) - 1;
- /* icmp (big data) and ip segmentation packets (all other ip
- packets) do not require any special handling */
+ switch (ip_hdr(skb)->protocol) {
+ case IPPROTO_UDP:
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
- } else {
- /* Other Ethernet Protocol */
- swqe->tx_control |= EHEA_SWQE_CRC
- | EHEA_SWQE_IMM_DATA_PRESENT
- | EHEA_SWQE_DESCRIPTORS_PRESENT;
+ swqe->tcp_offset = swqe->ip_end + 1 +
+ offsetof(struct udphdr, check);
+ break;
+
+ case IPPROTO_TCP:
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
+
+ swqe->tcp_offset = swqe->ip_end + 1 +
+ offsetof(struct tcphdr, check);
+ break;
}
+}
+
+static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
+ struct ehea_swqe *swqe, u32 lkey)
+{
+ swqe->tx_control |= EHEA_SWQE_DESCRIPTORS_PRESENT;
+
+ xmit_common(skb, swqe);
write_swqe2_data(skb, dev, swqe, lkey);
}
@@ -2162,105 +2035,30 @@ static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
struct ehea_swqe *swqe)
{
- int nfrags = skb_shinfo(skb)->nr_frags;
u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
- skb_frag_t *frag;
- int i;
-
- if (skb->protocol == htons(ETH_P_IP)) {
- const struct iphdr *iph = ip_hdr(skb);
-
- /* IPv4 */
- write_ip_start_end(swqe, skb);
- if (iph->protocol == IPPROTO_TCP) {
- swqe->tx_control |= EHEA_SWQE_CRC
- | EHEA_SWQE_IP_CHECKSUM
- | EHEA_SWQE_TCP_CHECKSUM
- | EHEA_SWQE_IMM_DATA_PRESENT;
+ xmit_common(skb, swqe);
- write_tcp_offset_end(swqe, skb);
-
- } else if (iph->protocol == IPPROTO_UDP) {
- if ((iph->frag_off & IP_MF) ||
- (iph->frag_off & IP_OFFSET))
- /* IP fragment, so don't change cs */
- swqe->tx_control |= EHEA_SWQE_CRC
- | EHEA_SWQE_IMM_DATA_PRESENT;
- else {
- swqe->tx_control |= EHEA_SWQE_CRC
- | EHEA_SWQE_IP_CHECKSUM
- | EHEA_SWQE_TCP_CHECKSUM
- | EHEA_SWQE_IMM_DATA_PRESENT;
-
- write_udp_offset_end(swqe, skb);
- }
- } else {
- /* icmp (big data) and
- ip segmentation packets (all other ip packets) */
- swqe->tx_control |= EHEA_SWQE_CRC
- | EHEA_SWQE_IP_CHECKSUM
- | EHEA_SWQE_IMM_DATA_PRESENT;
- }
- } else {
- /* Other Ethernet Protocol */
- swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IMM_DATA_PRESENT;
- }
- /* copy (immediate) data */
- if (nfrags == 0) {
- /* data is in a single piece */
+ if (!skb->data_len)
skb_copy_from_linear_data(skb, imm_data, skb->len);
- } else {
- /* first copy data from the skb->data buffer ... */
- skb_copy_from_linear_data(skb, imm_data,
- skb_headlen(skb));
- imm_data += skb_headlen(skb);
+ else
+ skb_copy_bits(skb, 0, imm_data, skb->len);
- /* ... then copy data from the fragments */
- for (i = 0; i < nfrags; i++) {
- frag = &skb_shinfo(skb)->frags[i];
- memcpy(imm_data, skb_frag_address(frag), frag->size);
- imm_data += frag->size;
- }
- }
swqe->immediate_data_length = skb->len;
- dev_kfree_skb(skb);
-}
-
-static inline int ehea_hash_skb(struct sk_buff *skb, int num_qps)
-{
- struct tcphdr *tcp;
- u32 tmp;
-
- if ((skb->protocol == htons(ETH_P_IP)) &&
- (ip_hdr(skb)->protocol == IPPROTO_TCP)) {
- tcp = (struct tcphdr *)(skb_network_header(skb) +
- (ip_hdr(skb)->ihl * 4));
- tmp = (tcp->source + (tcp->dest << 16)) % 31;
- tmp += ip_hdr(skb)->daddr % 31;
- return tmp % num_qps;
- } else
- return 0;
+ dev_consume_skb_any(skb);
}
static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ehea_port *port = netdev_priv(dev);
struct ehea_swqe *swqe;
- unsigned long flags;
u32 lkey;
int swqe_index;
struct ehea_port_res *pr;
+ struct netdev_queue *txq;
- pr = &port->port_res[ehea_hash_skb(skb, port->num_tx_qps)];
-
- if (!spin_trylock(&pr->xmit_lock))
- return NETDEV_TX_BUSY;
-
- if (pr->queue_stopped) {
- spin_unlock(&pr->xmit_lock);
- return NETDEV_TX_BUSY;
- }
+ pr = &port->port_res[skb_get_queue_mapping(skb)];
+ txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
swqe = ehea_get_swqe(pr->qp, &swqe_index);
memset(swqe, 0, SWQE_HEADER_SIZE);
@@ -2310,38 +2108,33 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
ehea_dump(swqe, 512, "swqe");
if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
- netif_stop_queue(dev);
+ netif_tx_stop_queue(txq);
swqe->tx_control |= EHEA_SWQE_PURGE;
}
ehea_post_swqe(pr->qp, swqe);
if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
- spin_lock_irqsave(&pr->netif_queue, flags);
- if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
- pr->p_stats.queue_stopped++;
- netif_stop_queue(dev);
- pr->queue_stopped = 1;
- }
- spin_unlock_irqrestore(&pr->netif_queue, flags);
+ pr->p_stats.queue_stopped++;
+ netif_tx_stop_queue(txq);
}
- dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
- spin_unlock(&pr->xmit_lock);
return NETDEV_TX_OK;
}
-static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int ehea_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct ehea_port *port = netdev_priv(dev);
struct ehea_adapter *adapter = port->adapter;
struct hcp_ehea_port_cb1 *cb1;
int index;
u64 hret;
+ int err = 0;
cb1 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb1) {
pr_err("no mem for cb1\n");
+ err = -ENOMEM;
goto out;
}
@@ -2349,6 +2142,7 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
if (hret != H_SUCCESS) {
pr_err("query_ehea_port failed\n");
+ err = -EINVAL;
goto out;
}
@@ -2357,24 +2151,28 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
- if (hret != H_SUCCESS)
+ if (hret != H_SUCCESS) {
pr_err("modify_ehea_port failed\n");
+ err = -EINVAL;
+ }
out:
free_page((unsigned long)cb1);
- return;
+ return err;
}
-static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int ehea_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct ehea_port *port = netdev_priv(dev);
struct ehea_adapter *adapter = port->adapter;
struct hcp_ehea_port_cb1 *cb1;
int index;
u64 hret;
+ int err = 0;
cb1 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb1) {
pr_err("no mem for cb1\n");
+ err = -ENOMEM;
goto out;
}
@@ -2382,6 +2180,7 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
if (hret != H_SUCCESS) {
pr_err("query_ehea_port failed\n");
+ err = -EINVAL;
goto out;
}
@@ -2390,13 +2189,16 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
- if (hret != H_SUCCESS)
+ if (hret != H_SUCCESS) {
pr_err("modify_ehea_port failed\n");
+ err = -EINVAL;
+ }
out:
free_page((unsigned long)cb1);
+ return err;
}
-int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
+static int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
{
int ret = -EIO;
u64 hret;
@@ -2471,8 +2273,7 @@ out:
return ret;
}
-static int ehea_port_res_setup(struct ehea_port *port, int def_qps,
- int add_tx_qps)
+static int ehea_port_res_setup(struct ehea_port *port, int def_qps)
{
int ret, i;
struct port_res_cfg pr_cfg, pr_cfg_small_rx;
@@ -2505,7 +2306,7 @@ static int ehea_port_res_setup(struct ehea_port *port, int def_qps,
if (ret)
goto out_clean_pr;
}
- for (i = def_qps; i < def_qps + add_tx_qps; i++) {
+ for (i = def_qps; i < def_qps; i++) {
ret = ehea_init_port_res(port, &port->port_res[i],
&pr_cfg_small_rx, i);
if (ret)
@@ -2528,7 +2329,7 @@ static int ehea_clean_all_portres(struct ehea_port *port)
int ret = 0;
int i;
- for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
+ for (i = 0; i < port->num_def_qps; i++)
ret |= ehea_clean_portres(port, &port->port_res[i]);
ret |= ehea_destroy_eq(port->qp_eq);
@@ -2560,8 +2361,7 @@ static int ehea_up(struct net_device *dev)
if (port->state == EHEA_PORT_UP)
return 0;
- ret = ehea_port_res_setup(port, port->num_def_qps,
- port->num_add_tx_qps);
+ ret = ehea_port_res_setup(port, port->num_def_qps);
if (ret) {
netdev_err(dev, "port_res_failed\n");
goto out;
@@ -2580,7 +2380,7 @@ static int ehea_up(struct net_device *dev)
goto out_clean_pr;
}
- for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ for (i = 0; i < port->num_def_qps; i++) {
ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
if (ret) {
netdev_err(dev, "activate_qp failed\n");
@@ -2626,7 +2426,7 @@ static void port_napi_disable(struct ehea_port *port)
{
int i;
- for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
+ for (i = 0; i < port->num_def_qps; i++)
napi_disable(&port->port_res[i].napi);
}
@@ -2634,7 +2434,7 @@ static void port_napi_enable(struct ehea_port *port)
{
int i;
- for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
+ for (i = 0; i < port->num_def_qps; i++)
napi_enable(&port->port_res[i].napi);
}
@@ -2650,11 +2450,12 @@ static int ehea_open(struct net_device *dev)
ret = ehea_up(dev);
if (!ret) {
port_napi_enable(port);
- netif_start_queue(dev);
+ netif_tx_start_all_queues(dev);
}
mutex_unlock(&port->port_lock);
- schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
+ schedule_delayed_work(&port->stats_work,
+ round_jiffies_relative(msecs_to_jiffies(1000)));
return ret;
}
@@ -2668,6 +2469,7 @@ static int ehea_down(struct net_device *dev)
return 0;
ehea_drop_multicast_list(dev);
+ ehea_allmulti(dev, 0);
ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
ehea_free_interrupts(dev);
@@ -2696,7 +2498,7 @@ static int ehea_stop(struct net_device *dev)
cancel_work_sync(&port->reset_task);
cancel_delayed_work_sync(&port->stats_work);
mutex_lock(&port->port_lock);
- netif_stop_queue(dev);
+ netif_tx_stop_all_queues(dev);
port_napi_disable(port);
ret = ehea_down(dev);
mutex_unlock(&port->port_lock);
@@ -2722,7 +2524,7 @@ static void ehea_flush_sq(struct ehea_port *port)
{
int i;
- for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ for (i = 0; i < port->num_def_qps; i++) {
struct ehea_port_res *pr = &port->port_res[i];
int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
int ret;
@@ -2738,7 +2540,7 @@ static void ehea_flush_sq(struct ehea_port *port)
}
}
-int ehea_stop_qps(struct net_device *dev)
+static int ehea_stop_qps(struct net_device *dev)
{
struct ehea_port *port = netdev_priv(dev);
struct ehea_adapter *adapter = port->adapter;
@@ -2756,7 +2558,7 @@ int ehea_stop_qps(struct net_device *dev)
goto out;
}
- for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
+ for (i = 0; i < (port->num_def_qps); i++) {
struct ehea_port_res *pr = &port->port_res[i];
struct ehea_qp *qp = pr->qp;
@@ -2807,7 +2609,7 @@ out:
return ret;
}
-void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
+static void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
{
struct ehea_qp qp = *orig_qp;
struct ehea_qp_init_attr *init_attr = &qp.init_attr;
@@ -2840,7 +2642,7 @@ void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
}
}
-int ehea_restart_qps(struct net_device *dev)
+static int ehea_restart_qps(struct net_device *dev)
{
struct ehea_port *port = netdev_priv(dev);
struct ehea_adapter *adapter = port->adapter;
@@ -2858,7 +2660,7 @@ int ehea_restart_qps(struct net_device *dev)
goto out;
}
- for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
+ for (i = 0; i < (port->num_def_qps); i++) {
struct ehea_port_res *pr = &port->port_res[i];
struct ehea_qp *qp = pr->qp;
@@ -2920,7 +2722,7 @@ static void ehea_reset_port(struct work_struct *work)
mutex_lock(&dlpar_mem_lock);
port->resets++;
mutex_lock(&port->port_lock);
- netif_stop_queue(dev);
+ netif_tx_disable(dev);
port_napi_disable(port);
@@ -2936,7 +2738,7 @@ static void ehea_reset_port(struct work_struct *work)
port_napi_enable(port);
- netif_wake_queue(dev);
+ netif_tx_wake_all_queues(dev);
out:
mutex_unlock(&port->port_lock);
mutex_unlock(&dlpar_mem_lock);
@@ -2963,7 +2765,7 @@ static void ehea_rereg_mrs(void)
if (dev->flags & IFF_UP) {
mutex_lock(&port->port_lock);
- netif_stop_queue(dev);
+ netif_tx_disable(dev);
ehea_flush_sq(port);
ret = ehea_stop_qps(dev);
if (ret) {
@@ -3008,7 +2810,7 @@ static void ehea_rereg_mrs(void)
if (!ret) {
check_sqs(port);
port_napi_enable(port);
- netif_wake_queue(dev);
+ netif_tx_wake_all_queues(dev);
} else {
netdev_err(dev, "Unable to restart QPS\n");
}
@@ -3031,7 +2833,7 @@ static void ehea_tx_watchdog(struct net_device *dev)
ehea_schedule_port_reset(port);
}
-int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
+static int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
{
struct hcp_query_ehea *cb;
u64 hret;
@@ -3059,7 +2861,7 @@ out:
return ret;
}
-int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
+static int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
{
struct hcp_ehea_port_cb4 *cb4;
u64 hret;
@@ -3112,7 +2914,7 @@ static ssize_t ehea_show_port_id(struct device *dev,
static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
NULL);
-static void __devinit logical_port_release(struct device *dev)
+static void logical_port_release(struct device *dev)
{
struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
of_node_put(port->ofdev.dev.of_node);
@@ -3163,7 +2965,7 @@ static const struct net_device_ops ehea_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ehea_netpoll,
#endif
- .ndo_get_stats = ehea_get_stats,
+ .ndo_get_stats64 = ehea_get_stats64,
.ndo_set_mac_address = ehea_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = ehea_set_multicast_list,
@@ -3173,7 +2975,7 @@ static const struct net_device_ops ehea_netdev_ops = {
.ndo_tx_timeout = ehea_tx_watchdog,
};
-struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
+static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
u32 logical_port_id,
struct device_node *dn)
{
@@ -3184,10 +2986,9 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
int jumbo;
/* allocate memory for the port structures */
- dev = alloc_etherdev(sizeof(struct ehea_port));
+ dev = alloc_etherdev_mq(sizeof(struct ehea_port), EHEA_MAX_PORT_RES);
if (!dev) {
- pr_err("no mem for net_device\n");
ret = -ENOMEM;
goto out_err;
}
@@ -3216,6 +3017,9 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
if (ret)
goto out_free_mc_list;
+ netif_set_real_num_rx_queues(dev, port->num_def_qps);
+ netif_set_real_num_tx_queues(dev, port->num_def_qps);
+
port_dev = ehea_register_port(port, dn);
if (!port_dev)
goto out_free_mc_list;
@@ -3228,17 +3032,16 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
dev->netdev_ops = &ehea_netdev_ops;
ehea_set_ethtool_ops(dev);
- dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
- | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_LRO;
- dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
- | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
- | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
- | NETIF_F_LLTX | NETIF_F_RXCSUM;
+ dev->hw_features = NETIF_F_SG | NETIF_F_TSO |
+ NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_CTAG_TX;
+ dev->features = NETIF_F_SG | NETIF_F_TSO |
+ NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM;
+ dev->vlan_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA |
+ NETIF_F_IP_CSUM;
dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
- if (use_lro)
- dev->features |= NETIF_F_LRO;
-
INIT_WORK(&port->reset_task, ehea_reset_port);
INIT_DELAYED_WORK(&port->stats_work, ehea_update_stats);
@@ -3252,8 +3055,6 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
goto out_unreg_port;
}
- port->lro_max_aggr = lro_max_aggr;
-
ret = ehea_get_jumboframe_status(port, &jumbo);
if (ret)
netdev_err(dev, "failed determining jumbo frame status\n");
@@ -3444,7 +3245,7 @@ static ssize_t ehea_remove_port(struct device *dev,
static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
-int ehea_create_device_sysfs(struct platform_device *dev)
+static int ehea_create_device_sysfs(struct platform_device *dev)
{
int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
if (ret)
@@ -3455,25 +3256,25 @@ out:
return ret;
}
-void ehea_remove_device_sysfs(struct platform_device *dev)
+static void ehea_remove_device_sysfs(struct platform_device *dev)
{
device_remove_file(&dev->dev, &dev_attr_probe_port);
device_remove_file(&dev->dev, &dev_attr_remove_port);
}
-static int __devinit ehea_probe_adapter(struct platform_device *dev,
- const struct of_device_id *id)
+static int ehea_probe_adapter(struct platform_device *dev)
{
struct ehea_adapter *adapter;
const u64 *adapter_handle;
int ret;
+ int i;
if (!dev || !dev->dev.of_node) {
pr_err("Invalid ibmebus device probed\n");
return -EINVAL;
}
- adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+ adapter = devm_kzalloc(&dev->dev, sizeof(*adapter), GFP_KERNEL);
if (!adapter) {
ret = -ENOMEM;
dev_err(&dev->dev, "no mem for ehea_adapter\n");
@@ -3498,7 +3299,7 @@ static int __devinit ehea_probe_adapter(struct platform_device *dev,
adapter->pd = EHEA_PD_ID;
- dev_set_drvdata(&dev->dev, adapter);
+ platform_set_drvdata(dev, adapter);
/* initialize adapter and ports */
@@ -3520,17 +3321,9 @@ static int __devinit ehea_probe_adapter(struct platform_device *dev,
tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
(unsigned long)adapter);
- ret = ibmebus_request_irq(adapter->neq->attr.ist1,
- ehea_interrupt_neq, IRQF_DISABLED,
- "ehea_neq", adapter);
- if (ret) {
- dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
- goto out_kill_eq;
- }
-
ret = ehea_create_device_sysfs(dev);
if (ret)
- goto out_free_irq;
+ goto out_kill_eq;
ret = ehea_setup_ports(adapter);
if (ret) {
@@ -3538,21 +3331,35 @@ static int __devinit ehea_probe_adapter(struct platform_device *dev,
goto out_rem_dev_sysfs;
}
+ ret = ibmebus_request_irq(adapter->neq->attr.ist1,
+ ehea_interrupt_neq, 0,
+ "ehea_neq", adapter);
+ if (ret) {
+ dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
+ goto out_shutdown_ports;
+ }
+
+ /* Handle any events that might be pending. */
+ tasklet_hi_schedule(&adapter->neq_tasklet);
+
ret = 0;
goto out;
+out_shutdown_ports:
+ for (i = 0; i < EHEA_MAX_PORTS; i++)
+ if (adapter->port[i]) {
+ ehea_shutdown_single_port(adapter->port[i]);
+ adapter->port[i] = NULL;
+ }
+
out_rem_dev_sysfs:
ehea_remove_device_sysfs(dev);
-out_free_irq:
- ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
-
out_kill_eq:
ehea_destroy_eq(adapter->neq);
out_free_ad:
list_del(&adapter->list);
- kfree(adapter);
out:
ehea_update_firmware_handles();
@@ -3560,9 +3367,9 @@ out:
return ret;
}
-static int __devexit ehea_remove(struct platform_device *dev)
+static int ehea_remove(struct platform_device *dev)
{
- struct ehea_adapter *adapter = dev_get_drvdata(&dev->dev);
+ struct ehea_adapter *adapter = platform_get_drvdata(dev);
int i;
for (i = 0; i < EHEA_MAX_PORTS; i++)
@@ -3579,14 +3386,13 @@ static int __devexit ehea_remove(struct platform_device *dev)
ehea_destroy_eq(adapter->neq);
ehea_remove_adapter_mr(adapter);
list_del(&adapter->list);
- kfree(adapter);
ehea_update_firmware_handles();
return 0;
}
-void ehea_crash_handler(void)
+static void ehea_crash_handler(void)
{
int i;
@@ -3698,7 +3504,7 @@ static ssize_t ehea_show_capabilities(struct device_driver *drv,
static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH,
ehea_show_capabilities, NULL);
-int __init ehea_module_init(void)
+static int __init ehea_module_init(void)
{
int ret;