aboutsummaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2010-12-26 22:37:05 -0800
committerDavid S. Miller <davem@davemloft.net>2010-12-26 22:37:05 -0800
commit17f7f4d9fcce8f1b75b5f735569309dee7665968 (patch)
tree14d7e49ca0053a0fcab3c33b5023bf3f90c5c08a /drivers/net
parent041110a439e21cd40709ead4ffbfa8034619ad77 (diff)
parentd7c1255a3a21e98bdc64df8ccf005a174d7e6289 (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: net/ipv4/fib_frontend.c
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig12
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/atl1c/atl1c_main.c39
-rw-r--r--drivers/net/epic100.c4
-rw-r--r--drivers/net/hamachi.c4
-rw-r--r--drivers/net/mlx4/fw.c4
-rw-r--r--drivers/net/sundance.c4
-rw-r--r--drivers/net/tile/Makefile10
-rw-r--r--drivers/net/tile/tilepro.c2406
-rw-r--r--drivers/net/usb/mcs7830.c2
-rw-r--r--drivers/net/wan/x25_asy.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c1
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c1
-rw-r--r--drivers/net/xen-netfront.c4
-rw-r--r--drivers/net/yellowfin.c4
15 files changed, 2460 insertions, 38 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index a20693fcb32..89be23340ee 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2954,6 +2954,18 @@ source "drivers/s390/net/Kconfig"
source "drivers/net/caif/Kconfig"
+config TILE_NET
+ tristate "Tilera GBE/XGBE network driver support"
+ depends on TILE
+ default y
+ select CRC32
+ help
+ This is a standard Linux network device driver for the
+ on-chip Tilera Gigabit Ethernet and XAUI interfaces.
+
+ To compile this driver as a module, choose M here: the module
+ will be called tile_net.
+
config XEN_NETDEV_FRONTEND
tristate "Xen network device frontend driver"
depends on XEN
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 652fc6b9803..b90738d1399 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -301,3 +301,4 @@ obj-$(CONFIG_CAIF) += caif/
obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/
obj-$(CONFIG_PCH_GBE) += pch_gbe/
+obj-$(CONFIG_TILE_NET) += tile/
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index e48ea956c51..a699bbf20eb 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -702,6 +702,7 @@ static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter)
adapter->wol = 0;
+ device_set_wakeup_enable(&pdev->dev, false);
adapter->link_speed = SPEED_0;
adapter->link_duplex = FULL_DUPLEX;
adapter->num_rx_queues = AT_DEF_RECEIVE_QUEUE;
@@ -2444,8 +2445,9 @@ static int atl1c_close(struct net_device *netdev)
return 0;
}
-static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
+static int atl1c_suspend(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl1c_adapter *adapter = netdev_priv(netdev);
struct atl1c_hw *hw = &adapter->hw;
@@ -2454,7 +2456,6 @@ static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
u32 wol_ctrl_data = 0;
u16 mii_intr_status_data = 0;
u32 wufc = adapter->wol;
- int retval = 0;
atl1c_disable_l0s_l1(hw);
if (netif_running(netdev)) {
@@ -2462,9 +2463,6 @@ static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
atl1c_down(adapter);
}
netif_device_detach(netdev);
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
if (wufc)
if (atl1c_phy_power_saving(hw) != 0)
@@ -2525,12 +2523,8 @@ static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data);
AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
- /* pcie patch */
- device_set_wakeup_enable(&pdev->dev, 1);
-
AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT |
GPHY_CTRL_EXT_RESET);
- pci_prepare_to_sleep(pdev);
} else {
AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_POWER_SAVING);
master_ctrl_data |= MASTER_CTRL_CLK_SEL_DIS;
@@ -2540,25 +2534,17 @@ static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
hw->phy_configured = false; /* re-init PHY when resume */
- pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
}
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
return 0;
}
-static int atl1c_resume(struct pci_dev *pdev)
+static int atl1c_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl1c_adapter *adapter = netdev_priv(netdev);
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_enable_wake(pdev, PCI_D3cold, 0);
-
AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE |
ATL1C_PCIE_PHY_RESET);
@@ -2582,7 +2568,12 @@ static int atl1c_resume(struct pci_dev *pdev)
static void atl1c_shutdown(struct pci_dev *pdev)
{
- atl1c_suspend(pdev, PMSG_SUSPEND);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct atl1c_adapter *adapter = netdev_priv(netdev);
+
+ atl1c_suspend(&pdev->dev);
+ pci_wake_from_d3(pdev, adapter->wol);
+ pci_set_power_state(pdev, PCI_D3hot);
}
static const struct net_device_ops atl1c_netdev_ops = {
@@ -2886,16 +2877,16 @@ static struct pci_error_handlers atl1c_err_handler = {
.resume = atl1c_io_resume,
};
+static SIMPLE_DEV_PM_OPS(atl1c_pm_ops, atl1c_suspend, atl1c_resume);
+
static struct pci_driver atl1c_driver = {
.name = atl1c_driver_name,
.id_table = atl1c_pci_tbl,
.probe = atl1c_probe,
.remove = __devexit_p(atl1c_remove),
- /* Power Managment Hooks */
- .suspend = atl1c_suspend,
- .resume = atl1c_resume,
.shutdown = atl1c_shutdown,
- .err_handler = &atl1c_err_handler
+ .err_handler = &atl1c_err_handler,
+ .driver.pm = &atl1c_pm_ops,
};
/*
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index aa56963ad55..c353bf3113c 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -935,7 +935,7 @@ static void epic_init_ring(struct net_device *dev)
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
for (i = 0; i < RX_RING_SIZE; i++) {
- struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz);
+ struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz + 2);
ep->rx_skbuff[i] = skb;
if (skb == NULL)
break;
@@ -1233,7 +1233,7 @@ static int epic_rx(struct net_device *dev, int budget)
entry = ep->dirty_rx % RX_RING_SIZE;
if (ep->rx_skbuff[entry] == NULL) {
struct sk_buff *skb;
- skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz);
+ skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz + 2);
if (skb == NULL)
break;
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 9a6485892b3..80d25ed5334 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -1202,7 +1202,7 @@ static void hamachi_init_ring(struct net_device *dev)
}
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
for (i = 0; i < RX_RING_SIZE; i++) {
- struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz);
+ struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz + 2);
hmp->rx_skbuff[i] = skb;
if (skb == NULL)
break;
@@ -1669,7 +1669,7 @@ static int hamachi_rx(struct net_device *dev)
entry = hmp->dirty_rx % RX_RING_SIZE;
desc = &(hmp->rx_ring[entry]);
if (hmp->rx_skbuff[entry] == NULL) {
- struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz);
+ struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz + 2);
hmp->rx_skbuff[entry] = skb;
if (skb == NULL)
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index b68eee2414c..7a7e18ba278 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -289,6 +289,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
dev_cap->bf_reg_size = 1 << (field & 0x1f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
+ if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size)) {
+ mlx4_warn(dev, "firmware bug: log2 # of blue flame regs is invalid (%d), forcing 3\n", field & 0x1f);
+ field = 3;
+ }
dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index e5662962c7b..4793df843c2 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -1020,7 +1020,7 @@ static void init_ring(struct net_device *dev)
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
for (i = 0; i < RX_RING_SIZE; i++) {
- struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
+ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + 2);
np->rx_skbuff[i] = skb;
if (skb == NULL)
break;
@@ -1411,7 +1411,7 @@ static void refill_rx (struct net_device *dev)
struct sk_buff *skb;
entry = np->dirty_rx % RX_RING_SIZE;
if (np->rx_skbuff[entry] == NULL) {
- skb = dev_alloc_skb(np->rx_buf_sz);
+ skb = dev_alloc_skb(np->rx_buf_sz + 2);
np->rx_skbuff[entry] = skb;
if (skb == NULL)
break; /* Better luck next round. */
diff --git a/drivers/net/tile/Makefile b/drivers/net/tile/Makefile
new file mode 100644
index 00000000000..f634f142cab
--- /dev/null
+++ b/drivers/net/tile/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the TILE on-chip networking support.
+#
+
+obj-$(CONFIG_TILE_NET) += tile_net.o
+ifdef CONFIG_TILEGX
+tile_net-objs := tilegx.o mpipe.o iorpc_mpipe.o dma_queue.o
+else
+tile_net-objs := tilepro.o
+endif
diff --git a/drivers/net/tile/tilepro.c b/drivers/net/tile/tilepro.c
new file mode 100644
index 00000000000..0e6bac5ec65
--- /dev/null
+++ b/drivers/net/tile/tilepro.c
@@ -0,0 +1,2406 @@
+/*
+ * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/kernel.h> /* printk() */
+#include <linux/slab.h> /* kmalloc() */
+#include <linux/errno.h> /* error codes */
+#include <linux/types.h> /* size_t */
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/netdevice.h> /* struct device, and other headers */
+#include <linux/etherdevice.h> /* eth_type_trans */
+#include <linux/skbuff.h>
+#include <linux/ioctl.h>
+#include <linux/cdev.h>
+#include <linux/hugetlb.h>
+#include <linux/in6.h>
+#include <linux/timer.h>
+#include <linux/io.h>
+#include <asm/checksum.h>
+#include <asm/homecache.h>
+
+#include <hv/drv_xgbe_intf.h>
+#include <hv/drv_xgbe_impl.h>
+#include <hv/hypervisor.h>
+#include <hv/netio_intf.h>
+
+/* For TSO */
+#include <linux/ip.h>
+#include <linux/tcp.h>
+
+
+/* There is no singlethread_cpu, so schedule work on the current cpu. */
+#define singlethread_cpu -1
+
+
+/*
+ * First, "tile_net_init_module()" initializes all four "devices" which
+ * can be used by linux.
+ *
+ * Then, "ifconfig DEVICE up" calls "tile_net_open()", which analyzes
+ * the network cpus, then uses "tile_net_open_aux()" to initialize
+ * LIPP/LEPP, and then uses "tile_net_open_inner()" to register all
+ * the tiles, provide buffers to LIPP, allow ingress to start, and
+ * turn on hypervisor interrupt handling (and NAPI) on all tiles.
+ *
+ * If registration fails due to the link being down, then "retry_work"
+ * is used to keep calling "tile_net_open_inner()" until it succeeds.
+ *
+ * If "ifconfig DEVICE down" is called, it uses "tile_net_stop()" to
+ * stop egress, drain the LIPP buffers, unregister all the tiles, stop
+ * LIPP/LEPP, and wipe the LEPP queue.
+ *
+ * We start out with the ingress interrupt enabled on each CPU. When
+ * this interrupt fires, we disable it, and call "napi_schedule()".
+ * This will cause "tile_net_poll()" to be called, which will pull
+ * packets from the netio queue, filtering them out, or passing them
+ * to "netif_receive_skb()". If our budget is exhausted, we will
+ * return, knowing we will be called again later. Otherwise, we
+ * reenable the ingress interrupt, and call "napi_complete()".
+ *
+ *
+ * NOTE: The use of "native_driver" ensures that EPP exists, and that
+ * "epp_sendv" is legal, and that "LIPP" is being used.
+ *
+ * NOTE: Failing to free completions for an arbitrarily long time
+ * (which is defined to be illegal) does in fact cause bizarre
+ * problems. The "egress_timer" helps prevent this from happening.
+ *
+ * NOTE: The egress code can be interrupted by the interrupt handler.
+ */
+
+
+/* HACK: Allow use of "jumbo" packets. */
+/* This should be 1500 if "jumbo" is not set in LIPP. */
+/* This should be at most 10226 (10240 - 14) if "jumbo" is set in LIPP. */
+/* ISSUE: This has not been thoroughly tested (except at 1500). */
+#define TILE_NET_MTU 1500
+
+/* HACK: Define to support GSO. */
+/* ISSUE: This may actually hurt performance of the TCP blaster. */
+/* #define TILE_NET_GSO */
+
+/* Define this to collapse "duplicate" acks. */
+/* #define IGNORE_DUP_ACKS */
+
+/* HACK: Define this to verify incoming packets. */
+/* #define TILE_NET_VERIFY_INGRESS */
+
+/* Use 3000 to enable the Linux Traffic Control (QoS) layer, else 0. */
+#define TILE_NET_TX_QUEUE_LEN 0
+
+/* Define to dump packets (prints out the whole packet on tx and rx). */
+/* #define TILE_NET_DUMP_PACKETS */
+
+/* Define to enable debug spew (all PDEBUG's are enabled). */
+/* #define TILE_NET_DEBUG */
+
+
+/* Define to activate paranoia checks. */
+/* #define TILE_NET_PARANOIA */
+
+/* Default transmit lockup timeout period, in jiffies. */
+#define TILE_NET_TIMEOUT (5 * HZ)
+
+/* Default retry interval for bringing up the NetIO interface, in jiffies. */
+#define TILE_NET_RETRY_INTERVAL (5 * HZ)
+
+/* Number of ports (xgbe0, xgbe1, gbe0, gbe1). */
+#define TILE_NET_DEVS 4
+
+
+
+/* Paranoia. */
+#if NET_IP_ALIGN != LIPP_PACKET_PADDING
+#error "NET_IP_ALIGN must match LIPP_PACKET_PADDING."
+#endif
+
+
+/* Debug print. */
+#ifdef TILE_NET_DEBUG
+#define PDEBUG(fmt, args...) net_printk(fmt, ## args)
+#else
+#define PDEBUG(fmt, args...)
+#endif
+
+
+MODULE_AUTHOR("Tilera");
+MODULE_LICENSE("GPL");
+
+
+#define IS_MULTICAST(mac_addr) \
+ (((u8 *)(mac_addr))[0] & 0x01)
+
+#define IS_BROADCAST(mac_addr) \
+ (((u16 *)(mac_addr))[0] == 0xffff)
+
+
+/*
+ * Queue of incoming packets for a specific cpu and device.
+ *
+ * Includes a pointer to the "system" data, and the actual "user" data.
+ */
+struct tile_netio_queue {
+ netio_queue_impl_t *__system_part;
+ netio_queue_user_impl_t __user_part;
+
+};
+
+
+/*
+ * Statistics counters for a specific cpu and device.
+ */
+struct tile_net_stats_t {
+ u32 rx_packets;
+ u32 rx_bytes;
+ u32 tx_packets;
+ u32 tx_bytes;
+};
+
+
+/*
+ * Info for a specific cpu and device.
+ *
+ * ISSUE: There is a "dev" pointer in "napi" as well.
+ */
+struct tile_net_cpu {
+ /* The NAPI struct. */
+ struct napi_struct napi;
+ /* Packet queue. */
+ struct tile_netio_queue queue;
+ /* Statistics. */
+ struct tile_net_stats_t stats;
+ /* ISSUE: Is this needed? */
+ bool napi_enabled;
+ /* True if this tile has succcessfully registered with the IPP. */
+ bool registered;
+ /* True if the link was down last time we tried to register. */
+ bool link_down;
+ /* True if "egress_timer" is scheduled. */
+ bool egress_timer_scheduled;
+ /* Number of small sk_buffs which must still be provided. */
+ unsigned int num_needed_small_buffers;
+ /* Number of large sk_buffs which must still be provided. */
+ unsigned int num_needed_large_buffers;
+ /* A timer for handling egress completions. */
+ struct timer_list egress_timer;
+};
+
+
+/*
+ * Info for a specific device.
+ */
+struct tile_net_priv {
+ /* Our network device. */
+ struct net_device *dev;
+ /* The actual egress queue. */
+ lepp_queue_t *epp_queue;
+ /* Protects "epp_queue->cmd_tail" and "epp_queue->comp_tail" */
+ spinlock_t cmd_lock;
+ /* Protects "epp_queue->comp_head". */
+ spinlock_t comp_lock;
+ /* The hypervisor handle for this interface. */
+ int hv_devhdl;
+ /* The intr bit mask that IDs this device. */
+ u32 intr_id;
+ /* True iff "tile_net_open_aux()" has succeeded. */
+ int partly_opened;
+ /* True iff "tile_net_open_inner()" has succeeded. */
+ int fully_opened;
+ /* Effective network cpus. */
+ struct cpumask network_cpus_map;
+ /* Number of network cpus. */
+ int network_cpus_count;
+ /* Credits per network cpu. */
+ int network_cpus_credits;
+ /* Network stats. */
+ struct net_device_stats stats;
+ /* For NetIO bringup retries. */
+ struct delayed_work retry_work;
+ /* Quick access to per cpu data. */
+ struct tile_net_cpu *cpu[NR_CPUS];
+};
+
+
+/*
+ * The actual devices (xgbe0, xgbe1, gbe0, gbe1).
+ */
+static struct net_device *tile_net_devs[TILE_NET_DEVS];
+
+/*
+ * The "tile_net_cpu" structures for each device.
+ */
+static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe0);
+static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe1);
+static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe0);
+static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe1);
+
+
+/*
+ * True if "network_cpus" was specified.
+ */
+static bool network_cpus_used;
+
+/*
+ * The actual cpus in "network_cpus".
+ */
+static struct cpumask network_cpus_map;
+
+
+
+#ifdef TILE_NET_DEBUG
+/*
+ * printk with extra stuff.
+ *
+ * We print the CPU we're running in brackets.
+ */
+static void net_printk(char *fmt, ...)
+{
+ int i;
+ int len;
+ va_list args;
+ static char buf[256];
+
+ len = sprintf(buf, "tile_net[%2.2d]: ", smp_processor_id());
+ va_start(args, fmt);
+ i = vscnprintf(buf + len, sizeof(buf) - len - 1, fmt, args);
+ va_end(args);
+ buf[255] = '\0';
+ pr_notice(buf);
+}
+#endif
+
+
+#ifdef TILE_NET_DUMP_PACKETS
+/*
+ * Dump a packet.
+ */
+static void dump_packet(unsigned char *data, unsigned long length, char *s)
+{
+ unsigned long i;
+ static unsigned int count;
+
+ pr_info("dump_packet(data %p, length 0x%lx s %s count 0x%x)\n",
+ data, length, s, count++);
+
+ pr_info("\n");
+
+ for (i = 0; i < length; i++) {
+ if ((i & 0xf) == 0)
+ sprintf(buf, "%8.8lx:", i);
+ sprintf(buf + strlen(buf), " %2.2x", data[i]);
+ if ((i & 0xf) == 0xf || i == length - 1)
+ pr_info("%s\n", buf);
+ }
+}
+#endif
+
+
+/*
+ * Provide support for the __netio_fastio1() swint
+ * (see <hv/drv_xgbe_intf.h> for how it is used).
+ *
+ * The fastio swint2 call may clobber all the caller-saved registers.
+ * It rarely clobbers memory, but we allow for the possibility in
+ * the signature just to be on the safe side.
+ *
+ * Also, gcc doesn't seem to allow an input operand to be
+ * clobbered, so we fake it with dummy outputs.
+ *
+ * This function can't be static because of the way it is declared
+ * in the netio header.
+ */
+inline int __netio_fastio1(u32 fastio_index, u32 arg0)
+{
+ long result, clobber_r1, clobber_r10;
+ asm volatile("swint2"
+ : "=R00" (result),
+ "=R01" (clobber_r1), "=R10" (clobber_r10)
+ : "R10" (fastio_index), "R01" (arg0)
+ : "memory", "r2", "r3", "r4",
+ "r5", "r6", "r7", "r8", "r9",
+ "r11", "r12", "r13", "r14",
+ "r15", "r16", "r17", "r18", "r19",
+ "r20", "r21", "r22", "r23", "r24",
+ "r25", "r26", "r27", "r28", "r29");
+ return result;
+}
+
+
+/*
+ * Provide a linux buffer to LIPP.
+ */
+static void tile_net_provide_linux_buffer(struct tile_net_cpu *info,
+ void *va, bool small)
+{
+ struct tile_netio_queue *queue = &info->queue;
+
+ /* Convert "va" and "small" to "linux_buffer_t". */
+ unsigned int buffer = ((unsigned int)(__pa(va) >> 7) << 1) + small;
+
+ __netio_fastio_free_buffer(queue->__user_part.__fastio_index, buffer);
+}
+
+
+/*
+ * Provide a linux buffer for LIPP.
+ */
+static bool tile_net_provide_needed_buffer(struct tile_net_cpu *info,
+ bool small)
+{
+ /* ISSUE: What should we use here? */
+ unsigned int large_size = NET_IP_ALIGN + TILE_NET_MTU + 100;
+
+ /* Round up to ensure to avoid "false sharing" with last cache line. */
+ unsigned int buffer_size =
+ (((small ? LIPP_SMALL_PACKET_SIZE : large_size) +
+ CHIP_L2_LINE_SIZE() - 1) & -CHIP_L2_LINE_SIZE());
+
+ /*
+ * ISSUE: Since CPAs are 38 bits, and we can only encode the
+ * high 31 bits in a "linux_buffer_t", the low 7 bits must be
+ * zero, and thus, we must align the actual "va" mod 128.
+ */
+ const unsigned long align = 128;
+
+ struct sk_buff *skb;
+ void *va;
+
+ struct sk_buff **skb_ptr;
+
+ /* Note that "dev_alloc_skb()" adds NET_SKB_PAD more bytes, */
+ /* and also "reserves" that many bytes. */
+ /* ISSUE: Can we "share" the NET_SKB_PAD bytes with "skb_ptr"? */
+ int len = sizeof(*skb_ptr) + align + buffer_size;
+
+ while (1) {
+
+ /* Allocate (or fail). */
+ skb = dev_alloc_skb(len);
+ if (skb == NULL)
+ return false;
+
+ /* Make room for a back-pointer to 'skb'. */
+ skb_reserve(skb, sizeof(*skb_ptr));
+
+ /* Make sure we are aligned. */
+ skb_reserve(skb, -(long)skb->data & (align - 1));
+
+ /* This address is given to IPP. */
+ va = skb->data;
+
+ if (small)
+ break;
+
+ /* ISSUE: This has never been observed! */
+ /* Large buffers must not span a huge page. */
+ if (((((long)va & ~HPAGE_MASK) + 1535) & HPAGE_MASK) == 0)
+ break;
+ pr_err("Leaking unaligned linux buffer at %p.\n", va);
+ }
+
+ /* Skip two bytes to satisfy LIPP assumptions. */
+ /* Note that this aligns IP on a 16 byte boundary. */
+ /* ISSUE: Do this when the packet arrives? */
+ skb_reserve(skb, NET_IP_ALIGN);
+
+ /* Save a back-pointer to 'skb'. */
+ skb_ptr = va - sizeof(*skb_ptr);
+ *skb_ptr = skb;
+
+ /* Invalidate the packet buffer. */
+ if (!hash_default)
+ __inv_buffer(skb->data, buffer_size);
+
+ /* Make sure "skb_ptr" has been flushed. */
+ __insn_mf();
+
+#ifdef TILE_NET_PARANOIA
+#if CHIP_HAS_CBOX_HOME_MAP()
+ if (hash_default) {
+ HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)va);
+ if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3)
+ panic("Non-coherent ingress buffer!");
+ }
+#endif
+#endif
+
+ /* Provide the new buffer. */
+ tile_net_provide_linux_buffer(info, va, small);
+
+ return true;
+}
+
+
+/*
+ * Provide linux buffers for LIPP.
+ */
+static void tile_net_provide_needed_buffers(struct tile_net_cpu *info)
+{
+ while (info->num_needed_small_buffers != 0) {
+ if (!tile_net_provide_needed_buffer(info, true))
+ goto oops;
+ info->num_needed_small_buffers--;
+ }
+
+ while (info->num_needed_large_buffers != 0) {
+ if (!tile_net_provide_needed_buffer(info, false))
+ goto oops;
+ info->num_needed_large_buffers--;
+ }
+
+ return;
+
+oops:
+
+ /* Add a description to the page allocation failure dump. */
+ pr_notice("Could not provide a linux buffer to LIPP.\n");
+}
+
+
+/*
+ * Grab some LEPP completions, and store them in "comps", of size
+ * "comps_size", and return the number of completions which were
+ * stored, so the caller can free them.
+ *
+ * If "pending" is not NULL, it will be set to true if there might
+ * still be some pending completions caused by this tile, else false.
+ */
+static unsigned int tile_net_lepp_grab_comps(struct net_device *dev,
+ struct sk_buff *comps[],
+ unsigned int comps_size,
+ bool *pending)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+
+ lepp_queue_t *eq = priv->epp_queue;
+
+ unsigned int n = 0;
+
+ unsigned int comp_head;
+ unsigned int comp_busy;
+ unsigned int comp_tail;
+
+ spin_lock(&priv->comp_lock);
+
+ comp_head = eq->comp_head;
+ comp_busy = eq->comp_busy;
+ comp_tail = eq->comp_tail;
+
+ while (comp_head != comp_busy && n < comps_size) {
+ comps[n++] = eq->comps[comp_head];
+ LEPP_QINC(comp_head);
+ }
+
+ if (pending != NULL)
+ *pending = (comp_head != comp_tail);
+
+ eq->comp_head = comp_head;
+
+ spin_unlock(&priv->comp_lock);
+
+ return n;
+}
+
+
+/*
+ * Make sure the egress timer is scheduled.
+ *
+ * Note that we use "schedule if not scheduled" logic instead of the more
+ * obvious "reschedule" logic, because "reschedule" is fairly expensive.
+ */
+static void tile_net_schedule_egress_timer(struct tile_net_cpu *info)
+{
+ if (!info->egress_timer_scheduled) {
+ mod_timer_pinned(&info->egress_timer, jiffies + 1);
+ info->egress_timer_scheduled = true;
+ }
+}
+
+
+/*
+ * The "function" for "info->egress_timer".
+ *
+ * This timer will reschedule itself as long as there are any pending
+ * completions expected (on behalf of any tile).
+ *
+ * ISSUE: Realistically, will the timer ever stop scheduling itself?
+ *
+ * ISSUE: This timer is almost never actually needed, so just use a global
+ * timer that can run on any tile.
+ *
+ * ISSUE: Maybe instead track number of expected completions, and free
+ * only that many, resetting to zero if "pending" is ever false.
+ */
+static void tile_net_handle_egress_timer(unsigned long arg)
+{
+ struct tile_net_cpu *info = (struct tile_net_cpu *)arg;
+ struct net_device *dev = info->napi.dev;
+
+ struct sk_buff *olds[32];
+ unsigned int wanted = 32;
+ unsigned int i, nolds = 0;
+ bool pending;
+
+ /* The timer is no longer scheduled. */
+ info->egress_timer_scheduled = false;
+
+ nolds = tile_net_lepp_grab_comps(dev, olds, wanted, &pending);
+
+ for (i = 0; i < nolds; i++)
+ kfree_skb(olds[i]);
+
+ /* Reschedule timer if needed. */
+ if (pending)
+ tile_net_schedule_egress_timer(info);
+}
+
+
+#ifdef IGNORE_DUP_ACKS
+
+/*
+ * Help detect "duplicate" ACKs. These are sequential packets (for a
+ * given flow) which are exactly 66 bytes long, sharing everything but
+ * ID=2@0x12, Hsum=2@0x18, Ack=4@0x2a, WinSize=2@0x30, Csum=2@0x32,
+ * Tstamps=10@0x38. The ID's are +1, the Hsum's are -1, the Ack's are
+ * +N, and the Tstamps are usually identical.
+ *
+ * NOTE: Apparently truly duplicate acks (with identical "ack" values),
+ * should not be collapsed, as they are used for some kind of flow control.
+ */
+static bool is_dup_ack(char *s1, char *s2, unsigned int len)
+{
+ int i;
+
+ unsigned long long ignorable = 0;
+
+ /* Identification. */
+ ignorable |= (1ULL << 0x12);
+ ignorable |= (1ULL << 0x13);
+
+ /* Header checksum. */
+ ignorable |= (1ULL << 0x18);
+ ignorable |= (1ULL << 0x19);
+
+ /* ACK. */
+ ignorable |= (1ULL << 0x2a);
+ ignorable |= (1ULL << 0x2b);
+ ignorable |= (1ULL << 0x2c);
+ ignorable |= (1ULL << 0x2d);
+
+ /* WinSize. */
+ ignorable |= (1ULL << 0x30);
+ ignorable |= (1ULL << 0x31);
+
+ /* Checksum. */
+ ignorable |= (1ULL << 0x32);
+ ignorable |= (1ULL << 0x33);
+
+ for (i = 0; i < len; i++, ignorable >>= 1) {
+
+ if ((ignorable & 1) || (s1[i] == s2[i]))
+ continue;
+
+#ifdef TILE_NET_DEBUG
+ /* HACK: Mention non-timestamp diffs. */
+ if (i < 0x38 && i != 0x2f &&
+ net_ratelimit())
+ pr_info("Diff at 0x%x\n", i);
+#endif
+
+ return false;
+ }
+
+#ifdef TILE_NET_NO_SUPPRESS_DUP_ACKS
+ /* HACK: Do not suppress truly duplicate ACKs. */
+ /* ISSUE: Is this actually necessary or helpful? */
+ if (s1[0x2a] == s2[0x2a] &&
+ s1[0x2b] == s2[0x2b] &&
+ s1[0x2c] == s2[0x2c] &&
+ s1[0x2d] == s2[0x2d]) {
+ return false;
+ }
+#endif
+
+ return true;
+}
+
+#endif
+
+
+
+/*
+ * Like "tile_net_handle_packets()", but just discard packets.
+ */
+static void tile_net_discard_packets(struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int my_cpu = smp_processor_id();
+ struct tile_net_cpu *info = priv->cpu[my_cpu];
+ struct tile_netio_queue *queue = &info->queue;
+ netio_queue_impl_t *qsp = queue->__system_part;
+ netio_queue_user_impl_t *qup = &queue->__user_part;
+
+ while (qup->__packet_receive_read !=
+ qsp->__packet_receive_queue.__packet_write) {
+
+ int index = qup->__packet_receive_read;
+
+ int index2_aux = index + sizeof(netio_pkt_t);
+ int index2 =
+ ((index2_aux ==
+ qsp->__packet_receive_queue.__last_packet_plus_one) ?
+ 0 : index2_aux);
+
+ netio_pkt_t *pkt = (netio_pkt_t *)
+ ((unsigned long) &qsp[1] + index);
+
+ /* Extract the "linux_buffer_t". */
+ unsigned int buffer = pkt->__packet.word;
+
+ /* Convert "linux_buffer_t" to "va". */
+ void *va = __va((phys_addr_t)(buffer >> 1) << 7);
+
+ /* Acquire the associated "skb". */
+ struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
+ struct sk_buff *skb = *skb_ptr;
+
+ kfree_skb(skb);
+
+ /* Consume this packet. */
+ qup->__packet_receive_read = index2;
+ }
+}
+
+
+/*
+ * Handle the next packet. Return true if "processed", false if "filtered".
+ */
+static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
+{
+ struct net_device *dev = info->napi.dev;
+
+ struct tile_netio_queue *queue = &info->queue;
+ netio_queue_impl_t *qsp = queue->__system_part;
+ netio_queue_user_impl_t *qup = &queue->__user_part;
+ struct tile_net_stats_t *stats = &info->stats;
+
+ int filter;
+
+ int index2_aux = index + sizeof(netio_pkt_t);
+ int index2 =
+ ((index2_aux ==
+ qsp->__packet_receive_queue.__last_packet_plus_one) ?
+ 0 : index2_aux);
+
+ netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index);
+
+ netio_pkt_metadata_t *metadata = NETIO_PKT_METADATA(pkt);
+
+ /* Extract the packet size. */
+ unsigned long len =
+ (NETIO_PKT_CUSTOM_LENGTH(pkt) +
+ NET_IP_ALIGN - NETIO_PACKET_PADDING);
+
+ /* Extract the "linux_buffer_t". */
+ unsigned int buffer = pkt->__packet.word;
+
+ /* Extract "small" (vs "large"). */
+ bool small = ((buffer & 1) != 0);
+
+ /* Convert "linux_buffer_t" to "va". */
+ void *va = __va((phys_addr_t)(buffer >> 1) << 7);
+
+ /* Extract the packet data pointer. */
+ /* Compare to "NETIO_PKT_CUSTOM_DATA(pkt)". */
+ unsigned char *buf = va + NET_IP_ALIGN;
+
+#ifdef IGNORE_DUP_ACKS
+
+ static int other;
+ static int final;