aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/wan
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-12-28 12:49:40 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2008-12-28 12:49:40 -0800
commit0191b625ca5a46206d2fb862bb08f36f2fcb3b31 (patch)
tree454d1842b1833d976da62abcbd5c47521ebe9bd7 /drivers/net/wan
parent54a696bd07c14d3b1192d03ce7269bc59b45209a (diff)
parenteb56092fc168bf5af199d47af50c0d84a96db898 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (1429 commits) net: Allow dependancies of FDDI & Tokenring to be modular. igb: Fix build warning when DCA is disabled. net: Fix warning fallout from recent NAPI interface changes. gro: Fix potential use after free sfc: If AN is enabled, always read speed/duplex from the AN advertising bits sfc: When disabling the NIC, close the device rather than unregistering it sfc: SFT9001: Add cable diagnostics sfc: Add support for multiple PHY self-tests sfc: Merge top-level functions for self-tests sfc: Clean up PHY mode management in loopback self-test sfc: Fix unreliable link detection in some loopback modes sfc: Generate unique names for per-NIC workqueues 802.3ad: use standard ethhdr instead of ad_header 802.3ad: generalize out mac address initializer 802.3ad: initialize ports LACPDU from const initializer 802.3ad: remove typedef around ad_system 802.3ad: turn ports is_individual into a bool 802.3ad: turn ports is_enabled into a bool 802.3ad: make ntt bool ixgbe: Fix set_ringparam in ixgbe to use the same memory pools. ... Fixed trivial IPv4/6 address printing conflicts in fs/cifs/connect.c due to the conversion to %pI (in this networking merge) and the addition of doing IPv6 addresses (from the earlier merge of CIFS).
Diffstat (limited to 'drivers/net/wan')
-rw-r--r--drivers/net/wan/Kconfig9
-rw-r--r--drivers/net/wan/Makefile3
-rw-r--r--drivers/net/wan/c101.c6
-rw-r--r--drivers/net/wan/cosa.c1
-rw-r--r--drivers/net/wan/cycx_x25.c91
-rw-r--r--drivers/net/wan/dlci.c37
-rw-r--r--drivers/net/wan/dscc4.c4
-rw-r--r--drivers/net/wan/farsync.c2
-rw-r--r--drivers/net/wan/hd64570.c (renamed from drivers/net/wan/hd6457x.c)255
-rw-r--r--drivers/net/wan/hd64572.c640
-rw-r--r--drivers/net/wan/hdlc_fr.c10
-rw-r--r--drivers/net/wan/hdlc_ppp.c649
-rw-r--r--drivers/net/wan/hostess_sv11.c1
-rw-r--r--drivers/net/wan/ixp4xx_hss.c1325
-rw-r--r--drivers/net/wan/lapbether.c3
-rw-r--r--drivers/net/wan/lmc/lmc_main.c1
-rw-r--r--drivers/net/wan/lmc/lmc_proto.c1
-rw-r--r--drivers/net/wan/n2.c9
-rw-r--r--drivers/net/wan/pc300_drv.c22
-rw-r--r--drivers/net/wan/pc300too.c121
-rw-r--r--drivers/net/wan/pci200syn.c79
-rw-r--r--drivers/net/wan/sbni.c101
-rw-r--r--drivers/net/wan/sdla.c48
-rw-r--r--drivers/net/wan/sealevel.c1
-rw-r--r--drivers/net/wan/syncppp.c1480
-rw-r--r--drivers/net/wan/wanxl.c9
-rw-r--r--drivers/net/wan/x25_asy.c52
-rw-r--r--drivers/net/wan/z85230.c12
28 files changed, 2897 insertions, 2075 deletions
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 21efd99b929..d08ce6a264c 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -207,6 +207,8 @@ config PC300
tristate "Cyclades-PC300 support (RS-232/V.35, X.21, T1/E1 boards)"
depends on HDLC && PCI && BROKEN
---help---
+ This driver is broken because of struct tty_driver change.
+
Driver for the Cyclades-PC300 synchronous communication boards.
These boards provide synchronous serial interfaces to your
@@ -333,6 +335,13 @@ config DSCC4_PCI_RST
Say Y if your card supports this feature.
+config IXP4XX_HSS
+ tristate "Intel IXP4xx HSS (synchronous serial port) support"
+ depends on HDLC && ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR
+ help
+ Say Y here if you want to use built-in HSS ports
+ on IXP4xx processor.
+
config DLCI
tristate "Frame Relay DLCI support"
---help---
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index 102549605d0..19d14bc2835 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -14,7 +14,7 @@ obj-$(CONFIG_HDLC_RAW) += hdlc_raw.o
obj-$(CONFIG_HDLC_RAW_ETH) += hdlc_raw_eth.o
obj-$(CONFIG_HDLC_CISCO) += hdlc_cisco.o
obj-$(CONFIG_HDLC_FR) += hdlc_fr.o
-obj-$(CONFIG_HDLC_PPP) += hdlc_ppp.o syncppp.o
+obj-$(CONFIG_HDLC_PPP) += hdlc_ppp.o
obj-$(CONFIG_HDLC_X25) += hdlc_x25.o
pc300-y := pc300_drv.o
@@ -41,6 +41,7 @@ obj-$(CONFIG_C101) += c101.o
obj-$(CONFIG_WANXL) += wanxl.o
obj-$(CONFIG_PCI200SYN) += pci200syn.o
obj-$(CONFIG_PC300TOO) += pc300too.o
+obj-$(CONFIG_IXP4XX_HSS) += ixp4xx_hss.o
clean-files := wanxlfw.inc
$(obj)/wanxl.o: $(obj)/wanxlfw.inc
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index c8e563106a4..b46897996f7 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -88,7 +88,7 @@ static card_t **new_card = &first_card;
/* EDA address register must be set in EDAL, EDAH order - 8 bit ISA bus */
#define sca_outw(value, reg, card) do { \
writeb(value & 0xFF, (card)->win0base + C101_SCA + (reg)); \
- writeb((value >> 8 ) & 0xFF, (card)->win0base + C101_SCA + (reg+1));\
+ writeb((value >> 8 ) & 0xFF, (card)->win0base + C101_SCA + (reg + 1));\
} while(0)
#define port_to_card(port) (port)
@@ -113,7 +113,7 @@ static inline void openwin(card_t *card, u8 page)
}
-#include "hd6457x.c"
+#include "hd64570.c"
static inline void set_carrier(port_t *port)
@@ -381,7 +381,7 @@ static int __init c101_run(unsigned long irq, unsigned long winbase)
return result;
}
- sca_init_sync_port(card); /* Set up C101 memory */
+ sca_init_port(card); /* Set up C101 memory */
set_carrier(card);
printk(KERN_INFO "%s: Moxa C101 on IRQ%u,"
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 7f97f8d08c3..d80b72e22de 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -754,7 +754,6 @@ static int cosa_net_rx_done(struct channel_data *chan)
chan->netdev->stats.rx_bytes += chan->cosa->rxsize;
netif_rx(chan->rx_skb);
chan->rx_skb = NULL;
- chan->netdev->last_rx = jiffies;
return 0;
}
diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
index 5a7303dc096..5fa52923efa 100644
--- a/drivers/net/wan/cycx_x25.c
+++ b/drivers/net/wan/cycx_x25.c
@@ -199,6 +199,8 @@ static struct net_device *cycx_x25_get_dev_by_lcn(struct wan_device *wandev,
static struct net_device *
cycx_x25_get_dev_by_dte_addr(struct wan_device *wandev, char *dte);
+static void cycx_x25_chan_setup(struct net_device *dev);
+
#ifdef CYCLOMX_X25_DEBUG
static void hex_dump(char *msg, unsigned char *p, int len);
static void cycx_x25_dump_config(struct cycx_x25_config *conf);
@@ -353,6 +355,12 @@ static int cycx_wan_update(struct wan_device *wandev)
return 0;
}
+/* callback to initialize device */
+static void cycx_x25_chan_setup(struct net_device *dev)
+{
+ dev->init = cycx_netdevice_init;
+}
+
/* Create new logical channel.
* This routine is called by the router when ROUTER_IFNEW IOCTL is being
* handled.
@@ -376,11 +384,12 @@ static int cycx_wan_new_if(struct wan_device *wandev, struct net_device *dev,
return -EINVAL;
}
- /* allocate and initialize private data */
- chan = kzalloc(sizeof(struct cycx_x25_channel), GFP_KERNEL);
- if (!chan)
+ dev = alloc_netdev(sizeof(struct cycx_x25_channel), conf->name,
+ cycx_x25_chan_setup);
+ if (!dev)
return -ENOMEM;
+ chan = netdev_priv(dev);
strcpy(chan->name, conf->name);
chan->card = card;
chan->link = conf->port;
@@ -396,14 +405,14 @@ static int cycx_wan_new_if(struct wan_device *wandev, struct net_device *dev,
if (len > WAN_ADDRESS_SZ) {
printk(KERN_ERR "%s: %s local addr too long!\n",
wandev->name, chan->name);
- kfree(chan);
- return -EINVAL;
+ err = -EINVAL;
+ goto error;
} else {
chan->local_addr = kmalloc(len + 1, GFP_KERNEL);
if (!chan->local_addr) {
- kfree(chan);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto error;
}
}
@@ -429,41 +438,31 @@ static int cycx_wan_new_if(struct wan_device *wandev, struct net_device *dev,
"%s: PVC %u is out of range on interface %s!\n",
wandev->name, lcn, chan->name);
err = -EINVAL;
+ goto error;
}
} else {
printk(KERN_ERR "%s: invalid media address on interface %s!\n",
wandev->name, chan->name);
err = -EINVAL;
+ goto error;
}
- if (err) {
- kfree(chan->local_addr);
- kfree(chan);
- return err;
- }
-
- /* prepare network device data space for registration */
- strcpy(dev->name, chan->name);
- dev->init = cycx_netdevice_init;
- dev->priv = chan;
-
return 0;
+
+error:
+ free_netdev(dev);
+ return err;
}
/* Delete logical channel. */
static int cycx_wan_del_if(struct wan_device *wandev, struct net_device *dev)
{
- if (dev->priv) {
- struct cycx_x25_channel *chan = dev->priv;
+ struct cycx_x25_channel *chan = netdev_priv(dev);
- if (chan->svc) {
- kfree(chan->local_addr);
- if (chan->state == WAN_CONNECTED)
- del_timer(&chan->timer);
- }
-
- kfree(chan);
- dev->priv = NULL;
+ if (chan->svc) {
+ kfree(chan->local_addr);
+ if (chan->state == WAN_CONNECTED)
+ del_timer(&chan->timer);
}
return 0;
@@ -484,7 +483,7 @@ static const struct header_ops cycx_header_ops = {
* registration. */
static int cycx_netdevice_init(struct net_device *dev)
{
- struct cycx_x25_channel *chan = dev->priv;
+ struct cycx_x25_channel *chan = netdev_priv(dev);
struct cycx_device *card = chan->card;
struct wan_device *wandev = &card->wandev;
@@ -542,7 +541,7 @@ static int cycx_netdevice_open(struct net_device *dev)
* o if there's no more open channels then disconnect physical link. */
static int cycx_netdevice_stop(struct net_device *dev)
{
- struct cycx_x25_channel *chan = dev->priv;
+ struct cycx_x25_channel *chan = netdev_priv(dev);
netif_stop_queue(dev);
@@ -596,7 +595,7 @@ static int cycx_netdevice_rebuild_header(struct sk_buff *skb)
static int cycx_netdevice_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- struct cycx_x25_channel *chan = dev->priv;
+ struct cycx_x25_channel *chan = netdev_priv(dev);
struct cycx_device *card = chan->card;
if (!chan->svc)
@@ -670,7 +669,7 @@ free_packet:
* Return a pointer to struct net_device_stats */
static struct net_device_stats *cycx_netdevice_get_stats(struct net_device *dev)
{
- struct cycx_x25_channel *chan = dev->priv;
+ struct cycx_x25_channel *chan = netdev_priv(dev);
return chan ? &chan->ifstats : NULL;
}
@@ -783,7 +782,7 @@ static void cycx_x25_irq_rx(struct cycx_device *card, struct cycx_x25_cmd *cmd)
return;
}
- chan = dev->priv;
+ chan = netdev_priv(dev);
reset_timer(dev);
if (chan->drop_sequence) {
@@ -843,7 +842,6 @@ static void cycx_x25_irq_rx(struct cycx_device *card, struct cycx_x25_cmd *cmd)
skb_reset_mac_header(skb);
netif_rx(skb);
- dev->last_rx = jiffies; /* timestamp */
}
/* Connect interrupt handler. */
@@ -884,7 +882,7 @@ static void cycx_x25_irq_connect(struct cycx_device *card,
return;
}
- chan = dev->priv;
+ chan = netdev_priv(dev);
chan->lcn = lcn;
cycx_x25_connect_response(card, chan);
cycx_x25_set_chan_state(dev, WAN_CONNECTED);
@@ -914,7 +912,7 @@ static void cycx_x25_irq_connect_confirm(struct cycx_device *card,
}
clear_bit(--key, (void*)&card->u.x.connection_keys);
- chan = dev->priv;
+ chan = netdev_priv(dev);
chan->lcn = lcn;
cycx_x25_set_chan_state(dev, WAN_CONNECTED);
}
@@ -954,7 +952,7 @@ static void cycx_x25_irq_disconnect(struct cycx_device *card,
dev = cycx_x25_get_dev_by_lcn(wandev, lcn);
if (dev) {
- struct cycx_x25_channel *chan = dev->priv;
+ struct cycx_x25_channel *chan = netdev_priv(dev);
cycx_x25_disconnect_response(card, chan->link, lcn);
cycx_x25_set_chan_state(dev, WAN_DISCONNECTED);
@@ -1302,7 +1300,7 @@ static struct net_device *cycx_x25_get_dev_by_lcn(struct wan_device *wandev,
struct cycx_x25_channel *chan;
while (dev) {
- chan = (struct cycx_x25_channel*)dev->priv;
+ chan = netdev_priv(dev);
if (chan->lcn == lcn)
break;
@@ -1319,7 +1317,7 @@ static struct net_device *
struct cycx_x25_channel *chan;
while (dev) {
- chan = (struct cycx_x25_channel*)dev->priv;
+ chan = netdev_priv(dev);
if (!strcmp(chan->addr, dte))
break;
@@ -1337,7 +1335,7 @@ static struct net_device *
* <0 failure */
static int cycx_x25_chan_connect(struct net_device *dev)
{
- struct cycx_x25_channel *chan = dev->priv;
+ struct cycx_x25_channel *chan = netdev_priv(dev);
struct cycx_device *card = chan->card;
if (chan->svc) {
@@ -1362,7 +1360,7 @@ static int cycx_x25_chan_connect(struct net_device *dev)
* o if SVC then clear X.25 call */
static void cycx_x25_chan_disconnect(struct net_device *dev)
{
- struct cycx_x25_channel *chan = dev->priv;
+ struct cycx_x25_channel *chan = netdev_priv(dev);
if (chan->svc) {
x25_clear_call(chan->card, chan->link, chan->lcn, 0, 0);
@@ -1375,7 +1373,7 @@ static void cycx_x25_chan_disconnect(struct net_device *dev)
static void cycx_x25_chan_timer(unsigned long d)
{
struct net_device *dev = (struct net_device *)d;
- struct cycx_x25_channel *chan = dev->priv;
+ struct cycx_x25_channel *chan = netdev_priv(dev);
if (chan->state == WAN_CONNECTED)
cycx_x25_chan_disconnect(dev);
@@ -1387,7 +1385,7 @@ static void cycx_x25_chan_timer(unsigned long d)
/* Set logical channel state. */
static void cycx_x25_set_chan_state(struct net_device *dev, u8 state)
{
- struct cycx_x25_channel *chan = dev->priv;
+ struct cycx_x25_channel *chan = netdev_priv(dev);
struct cycx_device *card = chan->card;
unsigned long flags;
char *string_state = NULL;
@@ -1453,7 +1451,7 @@ static void cycx_x25_set_chan_state(struct net_device *dev, u8 state)
* to the router. */
static int cycx_x25_chan_send(struct net_device *dev, struct sk_buff *skb)
{
- struct cycx_x25_channel *chan = dev->priv;
+ struct cycx_x25_channel *chan = netdev_priv(dev);
struct cycx_device *card = chan->card;
int bitm = 0; /* final packet */
unsigned len = skb->len;
@@ -1494,7 +1492,6 @@ static void cycx_x25_chan_send_event(struct net_device *dev, u8 event)
skb->protocol = x25_type_trans(skb, dev);
netif_rx(skb);
- dev->last_rx = jiffies; /* timestamp */
}
/* Convert line speed in bps to a number used by cyclom 2x code. */
@@ -1547,7 +1544,7 @@ static unsigned dec_to_uint(u8 *str, int len)
static void reset_timer(struct net_device *dev)
{
- struct cycx_x25_channel *chan = dev->priv;
+ struct cycx_x25_channel *chan = netdev_priv(dev);
if (chan->svc)
mod_timer(&chan->timer, jiffies+chan->idle_tmout*HZ);
@@ -1600,7 +1597,7 @@ static void cycx_x25_dump_devs(struct wan_device *wandev)
printk(KERN_INFO "---------------------------------------\n");
while(dev) {
- struct cycx_x25_channel *chan = dev->priv;
+ struct cycx_x25_channel *chan = netdev_priv(dev);
printk(KERN_INFO "%-5.5s %-15.15s %d ETH_P_%s\n",
chan->name, chan->addr, netif_queue_stopped(dev),
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
index b14242768fa..a297e3efa05 100644
--- a/drivers/net/wan/dlci.c
+++ b/drivers/net/wan/dlci.c
@@ -74,7 +74,7 @@ static int dlci_header(struct sk_buff *skb, struct net_device *dev,
unsigned int hlen;
char *dest;
- dlp = dev->priv;
+ dlp = netdev_priv(dev);
hdr.control = FRAD_I_UI;
switch(type)
@@ -110,7 +110,7 @@ static void dlci_receive(struct sk_buff *skb, struct net_device *dev)
struct frhdr *hdr;
int process, header;
- dlp = dev->priv;
+ dlp = netdev_priv(dev);
if (!pskb_may_pull(skb, sizeof(*hdr))) {
printk(KERN_NOTICE "%s: invalid data no header\n",
dev->name);
@@ -181,7 +181,6 @@ static void dlci_receive(struct sk_buff *skb, struct net_device *dev)
dlp->stats.rx_bytes += skb->len;
netif_rx(skb);
dlp->stats.rx_packets++;
- dev->last_rx = jiffies;
}
else
dev_kfree_skb(skb);
@@ -197,7 +196,7 @@ static int dlci_transmit(struct sk_buff *skb, struct net_device *dev)
if (!skb || !dev)
return(0);
- dlp = dev->priv;
+ dlp = netdev_priv(dev);
netif_stop_queue(dev);
@@ -235,9 +234,9 @@ static int dlci_config(struct net_device *dev, struct dlci_conf __user *conf, in
struct frad_local *flp;
int err;
- dlp = dev->priv;
+ dlp = netdev_priv(dev);
- flp = dlp->slave->priv;
+ flp = netdev_priv(dlp->slave);
if (!get)
{
@@ -269,7 +268,7 @@ static int dlci_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (!capable(CAP_NET_ADMIN))
return(-EPERM);
- dlp = dev->priv;
+ dlp = netdev_priv(dev);
switch(cmd)
{
@@ -298,7 +297,7 @@ static int dlci_change_mtu(struct net_device *dev, int new_mtu)
{
struct dlci_local *dlp;
- dlp = dev->priv;
+ dlp = netdev_priv(dev);
return((*dlp->slave->change_mtu)(dlp->slave, new_mtu));
}
@@ -309,7 +308,7 @@ static int dlci_open(struct net_device *dev)
struct frad_local *flp;
int err;
- dlp = dev->priv;
+ dlp = netdev_priv(dev);
if (!*(short *)(dev->dev_addr))
return(-EINVAL);
@@ -317,7 +316,7 @@ static int dlci_open(struct net_device *dev)
if (!netif_running(dlp->slave))
return(-ENOTCONN);
- flp = dlp->slave->priv;
+ flp = netdev_priv(dlp->slave);
err = (*flp->activate)(dlp->slave, dev);
if (err)
return(err);
@@ -335,9 +334,9 @@ static int dlci_close(struct net_device *dev)
netif_stop_queue(dev);
- dlp = dev->priv;
+ dlp = netdev_priv(dev);
- flp = dlp->slave->priv;
+ flp = netdev_priv(dlp->slave);
err = (*flp->deactivate)(dlp->slave, dev);
return 0;
@@ -347,7 +346,7 @@ static struct net_device_stats *dlci_get_stats(struct net_device *dev)
{
struct dlci_local *dlp;
- dlp = dev->priv;
+ dlp = netdev_priv(dev);
return(&dlp->stats);
}
@@ -365,7 +364,7 @@ static int dlci_add(struct dlci_add *dlci)
if (!slave)
return -ENODEV;
- if (slave->type != ARPHRD_FRAD || slave->priv == NULL)
+ if (slave->type != ARPHRD_FRAD || netdev_priv(slave) == NULL)
goto err1;
/* create device name */
@@ -391,11 +390,11 @@ static int dlci_add(struct dlci_add *dlci)
*(short *)(master->dev_addr) = dlci->dlci;
- dlp = (struct dlci_local *) master->priv;
+ dlp = netdev_priv(master);
dlp->slave = slave;
dlp->master = master;
- flp = slave->priv;
+ flp = netdev_priv(slave);
err = (*flp->assoc)(slave, master);
if (err < 0)
goto err2;
@@ -435,9 +434,9 @@ static int dlci_del(struct dlci_add *dlci)
return(-EBUSY);
}
- dlp = master->priv;
+ dlp = netdev_priv(master);
slave = dlp->slave;
- flp = slave->priv;
+ flp = netdev_priv(slave);
rtnl_lock();
err = (*flp->deassoc)(slave, master);
@@ -491,7 +490,7 @@ static const struct header_ops dlci_header_ops = {
static void dlci_setup(struct net_device *dev)
{
- struct dlci_local *dlp = dev->priv;
+ struct dlci_local *dlp = netdev_priv(dev);
dev->flags = 0;
dev->open = dlci_open;
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 5f1ccb2b08b..888025db2f0 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -659,7 +659,6 @@ static inline void dscc4_rx_skb(struct dscc4_dev_priv *dpriv,
skb_put(skb, pkt_len);
if (netif_running(dev))
skb->protocol = hdlc_type_trans(skb, dev);
- skb->dev->last_rx = jiffies;
netif_rx(skb);
} else {
if (skb->data[pkt_len] & FrameRdo)
@@ -730,8 +729,7 @@ static int __devinit dscc4_init_one(struct pci_dev *pdev,
goto err_free_mmio_region_1;
}
- ioaddr = ioremap(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
+ ioaddr = pci_ioremap_bar(pdev, 0);
if (!ioaddr) {
printk(KERN_ERR "%s: cannot remap MMIO region %llx @ %llx\n",
DRV_NAME, (unsigned long long)pci_resource_len(pdev, 0),
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 9557ad078ab..48a2c9d2895 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -896,7 +896,6 @@ fst_rx_dma_complete(struct fst_card_info *card, struct fst_port_info *port,
fst_process_rx_status(rx_status, port_to_dev(port)->name);
if (rx_status == NET_RX_DROP)
dev->stats.rx_dropped++;
- dev->last_rx = jiffies;
}
/*
@@ -1322,7 +1321,6 @@ fst_intr_rx(struct fst_card_info *card, struct fst_port_info *port)
fst_process_rx_status(rx_status, port_to_dev(port)->name);
if (rx_status == NET_RX_DROP)
dev->stats.rx_dropped++;
- dev->last_rx = jiffies;
} else {
card->dma_skb_rx = skb;
card->dma_port_rx = port;
diff --git a/drivers/net/wan/hd6457x.c b/drivers/net/wan/hd64570.c
index 591fb45a7c6..223238de475 100644
--- a/drivers/net/wan/hd6457x.c
+++ b/drivers/net/wan/hd64570.c
@@ -1,5 +1,5 @@
/*
- * Hitachi SCA HD64570 and HD64572 common driver for Linux
+ * Hitachi SCA HD64570 driver for Linux
*
* Copyright (C) 1998-2003 Krzysztof Halasa <khc@pm.waw.pl>
*
@@ -7,9 +7,7 @@
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
- * Sources of information:
- * Hitachi HD64570 SCA User's Manual
- * Hitachi HD64572 SCA-II User's Manual
+ * Source of information: Hitachi HD64570 SCA User's Manual
*
* We use the following SCA memory map:
*
@@ -26,33 +24,26 @@
* tx_ring_buffers * HDLC_MAX_MRU = logical channel #0 TX buffers (if used)
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/jiffies.h>
-#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
#include <linux/fcntl.h>
-#include <linux/interrupt.h>
+#include <linux/hdlc.h>
#include <linux/in.h>
-#include <linux/string.h>
-#include <linux/errno.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/ioport.h>
-#include <linux/bitops.h>
-
-#include <asm/system.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
-
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
-
-#include <linux/hdlc.h>
-
-#if (!defined (__HD64570_H) && !defined (__HD64572_H)) || \
- (defined (__HD64570_H) && defined (__HD64572_H))
-#error Either hd64570.h or hd64572.h must be included
-#endif
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include "hd64570.h"
#define get_msci(port) (phy_node(port) ? MSCI1_OFFSET : MSCI0_OFFSET)
#define get_dmac_rx(port) (phy_node(port) ? DMAC1RX_OFFSET : DMAC0RX_OFFSET)
@@ -62,16 +53,6 @@
#define SCA_INTR_DMAC_RX(node) (node ? 0x20 : 0x02)
#define SCA_INTR_DMAC_TX(node) (node ? 0x40 : 0x04)
-#ifdef __HD64570_H /* HD64570 */
-#define sca_outa(value, reg, card) sca_outw(value, reg, card)
-#define sca_ina(reg, card) sca_inw(reg, card)
-#define writea(value, ptr) writew(value, ptr)
-
-#else /* HD64572 */
-#define sca_outa(value, reg, card) sca_outl(value, reg, card)
-#define sca_ina(reg, card) sca_inl(reg, card)
-#define writea(value, ptr) writel(value, ptr)
-#endif
static inline struct net_device *port_to_dev(port_t *port)
{
@@ -81,8 +62,6 @@ static inline struct net_device *port_to_dev(port_t *port)
static inline int sca_intr_status(card_t *card)
{
u8 result = 0;
-
-#ifdef __HD64570_H /* HD64570 */
u8 isr0 = sca_in(ISR0, card);
u8 isr1 = sca_in(ISR1, card);
@@ -93,18 +72,6 @@ static inline int sca_intr_status(card_t *card)
if (isr0 & 0x0F) result |= SCA_INTR_MSCI(0);
if (isr0 & 0xF0) result |= SCA_INTR_MSCI(1);
-#else /* HD64572 */
- u32 isr0 = sca_inl(ISR0, card);
-
- if (isr0 & 0x0000000F) result |= SCA_INTR_DMAC_RX(0);
- if (isr0 & 0x000000F0) result |= SCA_INTR_DMAC_TX(0);
- if (isr0 & 0x00000F00) result |= SCA_INTR_DMAC_RX(1);
- if (isr0 & 0x0000F000) result |= SCA_INTR_DMAC_TX(1);
- if (isr0 & 0x003E0000) result |= SCA_INTR_MSCI(0);
- if (isr0 & 0x3E000000) result |= SCA_INTR_MSCI(1);
-
-#endif /* HD64570 vs HD64572 */
-
if (!(result & SCA_INTR_DMAC_TX(0)))
if (sca_in(DSR_TX(0), card) & DSR_EOM)
result |= SCA_INTR_DMAC_TX(0);
@@ -127,7 +94,6 @@ static inline u16 next_desc(port_t *port, u16 desc, int transmit)
}
-
static inline u16 desc_abs_number(port_t *port, u16 desc, int transmit)
{
u16 rx_buffs = port_to_card(port)->rx_ring_buffers;
@@ -139,28 +105,26 @@ static inline u16 desc_abs_number(port_t *port, u16 desc, int transmit)
}
-
static inline u16 desc_offset(port_t *port, u16 desc, int transmit)
{
- /* Descriptor offset always fits in 16 bytes */
+ /* Descriptor offset always fits in 16 bits */
return desc_abs_number(port, desc, transmit) * sizeof(pkt_desc);
}
-
-static inline pkt_desc __iomem *desc_address(port_t *port, u16 desc, int transmit)
+static inline pkt_desc __iomem *desc_address(port_t *port, u16 desc,
+ int transmit)
{
#ifdef PAGE0_ALWAYS_MAPPED
return (pkt_desc __iomem *)(win0base(port_to_card(port))
- + desc_offset(port, desc, transmit));
+ + desc_offset(port, desc, transmit));
#else
return (pkt_desc __iomem *)(winbase(port_to_card(port))
- + desc_offset(port, desc, transmit));
+ + desc_offset(port, desc, transmit));
#endif
}
-
static inline u32 buffer_offset(port_t *port, u16 desc, int transmit)
{
return port_to_card(port)->buff_offset +
@@ -186,7 +150,7 @@ static inline void sca_set_carrier(port_t *port)
}
-static void sca_init_sync_port(port_t *port)
+static void sca_init_port(port_t *port)
{
card_t *card = port_to_card(port);
int transmit, i;
@@ -195,7 +159,7 @@ static void sca_init_sync_port(port_t *port)
port->txin = 0;
port->txlast = 0;
-#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
+#ifndef PAGE0_ALWAYS_MAPPED
openwin(card, 0);
#endif
@@ -209,7 +173,7 @@ static void sca_init_sync_port(port_t *port)
u16 chain_off = desc_offset(port, i + 1, transmit);
u32 buff_off = buffer_offset(port, i, transmit);
- writea(chain_off, &desc->cp);
+ writew(chain_off, &desc->cp);
writel(buff_off, &desc->bp);
writew(0, &desc->len);
writeb(0, &desc->stat);
@@ -222,16 +186,14 @@ static void sca_init_sync_port(port_t *port)
sca_out(DCR_ABORT, transmit ? DCR_TX(phy_node(port)) :
DCR_RX(phy_node(port)), card);
-#ifdef __HD64570_H
- sca_out(0, dmac + CPB, card); /* pointer base */
-#endif
/* current desc addr */
- sca_outa(desc_offset(port, 0, transmit), dmac + CDAL, card);
+ sca_out(0, dmac + CPB, card); /* pointer base */
+ sca_outw(desc_offset(port, 0, transmit), dmac + CDAL, card);
if (!transmit)
- sca_outa(desc_offset(port, buffs - 1, transmit),
+ sca_outw(desc_offset(port, buffs - 1, transmit),
dmac + EDAL, card);
else
- sca_outa(desc_offset(port, 0, transmit), dmac + EDAL,
+ sca_outw(desc_offset(port, 0, transmit), dmac + EDAL,
card);
/* clear frame end interrupt counter */
@@ -258,7 +220,6 @@ static void sca_init_sync_port(port_t *port)
}
-
#ifdef NEED_SCA_MSCI_INTR
/* MSCI interrupt service */
static inline void sca_msci_intr(port_t *port)
@@ -282,17 +243,15 @@ static inline void sca_msci_intr(port_t *port)
#endif
-
-static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc, u16 rxin)
+static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc,
+ u16 rxin)
{
struct net_device *dev = port_to_dev(port);
struct sk_buff *skb;
u16 len;
u32 buff;
-#ifndef ALL_PAGES_ALWAYS_MAPPED
u32 maxlen;
u8 page;
-#endif
len = readw(&desc->len);
skb = dev_alloc_skb(len);
@@ -302,7 +261,6 @@ static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc, u1
}
buff = buffer_offset(port, rxin, 0);
-#ifndef ALL_PAGES_ALWAYS_MAPPED
page = buff / winsize(card);
buff = buff % winsize(card);
maxlen = winsize(card) - buff;
@@ -314,12 +272,10 @@ static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc, u1
openwin(card, page + 1);
memcpy_fromio(skb->data + maxlen, winbase(card), len - maxlen);
} else
-#endif
- memcpy_fromio(skb->data, winbase(card) + buff, len);
+ memcpy_fromio(skb->data, winbase(card) + buff, len);
-#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
- /* select pkt_desc table page back */
- openwin(card, 0);
+#ifndef PAGE0_ALWAYS_MAPPED
+ openwin(card, 0); /* select pkt_desc table page back */
#endif
skb_put(skb, len);
#ifdef DEBUG_PKT
@@ -328,13 +284,11 @@ static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc, u1
#endif
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
- dev->last_rx = jiffies;
skb->protocol = hdlc_type_trans(skb, dev);
netif_rx(skb);
}
-
/* Receive DMA interrupt service */
static inline void sca_rx_intr(port_t *port)
{
@@ -354,7 +308,7 @@ static inline void sca_rx_intr(port_t *port)
while (1) {
u32 desc_off = desc_offset(port, port->rxin, 0);
pkt_desc __iomem *desc;
- u32 cda = sca_ina(dmac + CDAL, card);
+ u32 cda = sca_inw(dmac + CDAL, card);
if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc)))
break; /* No frame received */
@@ -378,7 +332,7 @@ static inline void sca_rx_intr(port_t *port)
sca_rx(card, port, desc, port->rxin);
/* Set new error descriptor address */
- sca_outa(desc_off, dmac + EDAL, card);
+ sca_outw(desc_off, dmac + EDAL, card);
port->rxin = next_desc(port, port->rxin, 0);
}
@@ -387,7 +341,6 @@ static inline void sca_rx_intr(port_t *port)
}
-
/* Transmit DMA interrupt service */
static inline void sca_tx_intr(port_t *port)
{
@@ -408,7 +361,7 @@ static inline void sca_tx_intr(port_t *port)
pkt_desc __iomem *desc;
u32 desc_off = desc_offset(port, port->txlast, 1);
- u32 cda = sca_ina(dmac + CDAL, card);
+ u32 cda = sca_inw(dmac + CDAL, card);
if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc)))
break; /* Transmitter is/will_be sending this frame */
@@ -424,17 +377,13 @@ static inline void sca_tx_intr(port_t *port)
}
-
static irqreturn_t sca_intr(int irq, void* dev_id)
{
card_t *card = dev_id;
int i;
u8 stat;
int handled = 0;
-
-#ifndef ALL_PAGES_ALWAYS_MAPPED
u8 page = sca_get_page(card);
-#endif
while((stat = sca_intr_status(card)) != 0) {
handled = 1;
@@ -453,14 +402,11 @@ static irqreturn_t sca_intr(int irq, void* dev_id)
}
}
-#ifndef ALL_PAGES_ALWAYS_MAPPED
openwin(card, page); /* Restore original page */
-#endif
return IRQ_RETVAL(handled);
}
-
static void sca_set_port(port_t *port)
{
card_t* card = port_to_card(port);
@@ -498,12 +444,7 @@ static void sca_set_port(port_t *port)
port->tmc = tmc;
/* baud divisor - time constant*/
-#ifdef __HD64570_H
sca_out(port->tmc, msci + TMC, card);
-#else
- sca_out(port->tmc, msci + TMCR, card);
- sca_out(port->tmc, msci + TMCT, card);
-#endif
/* Set BRG bits */
sca_out(port->rxs, msci + RXS, card);
@@ -519,7 +460,6 @@ static void sca_set_port(port_t *port)
}
-
static void sca_open(struct net_device *dev)
{
port_t *port = dev_to_port(dev);
@@ -541,11 +481,7 @@ static void sca_open(struct net_device *dev)
switch(port->parity) {
case PARITY_CRC16_PR0: md0 = MD0_HDLC | MD0_CRC_16_0; break;
case PARITY_CRC16_PR1: md0 = MD0_HDLC | MD0_CRC_16; break;
-#ifdef __HD64570_H
case PARITY_CRC16_PR0_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU_0; break;
-#else
- case PARITY_CRC32_PR1_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU32; break;
-#endif
case PARITY_CRC16_PR1_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU; break;
default: md0 = MD0_HDLC | MD0_CRC_NONE;
}
@@ -555,35 +491,20 @@ static void sca_open(struct net_device *dev)
sca_out(0x00, msci + MD1, card); /* no address field check */
sca_out(md2, msci + MD2, card);
sca_out(0x7E, msci + IDL, card); /* flag character 0x7E */
-#ifdef __HD64570_H
sca_out(CTL_IDLE, msci + CTL, card);
-#else
- /* Skip the rest of underrun frame */
- sca_out(CTL_IDLE | CTL_URCT | CTL_URSKP, msci + CTL, card);
-#endif
-#ifdef __HD64570_H
/* Allow at least 8 bytes before requesting RX DMA operation */
/* TX with higher priority and possibly with shorter transfers */
sca_out(0x07, msci + RRC, card); /* +1=RXRDY/DMA activation condition*/
sca_out(0x10, msci + TRC0, card); /* = TXRDY/DMA activation condition*/
sca_out(0x14, msci + TRC1, card); /* +1=TXRDY/DMA deactiv condition */
-#else
- sca_out(0x0F, msci + RNR, card); /* +1=RX DMA activation condition */
- sca_out(0x3C, msci + TFS, card); /* +1 = TX start */
- sca_out(0x38, msci + TCR, card); /* =Critical TX DMA activ condition */
- sca_out(0x38, msci + TNR0, card); /* =TX DMA activation condition */
- sca_out(0x3F, msci + TNR1, card); /* +1=TX DMA deactivation condition*/
-#endif
/* We're using the following interrupts:
- TXINT (DMAC completed all transmisions, underrun or DCD change)
- all DMA interrupts
*/
-
sca_set_carrier(port);
-#ifdef __HD64570_H
/* MSCI TX INT and RX INT A IRQ enable */
sca_out(IE0_TXINT | IE0_RXINTA, msci + IE0, card);
sca_out(IE1_UDRN | IE1_CDCD, msci + IE1, card);
@@ -592,21 +513,8 @@ static void sca_open(struct net_device *dev)
/* enable DMA IRQ */
sca_out(sca_in(IER1, card) | (phy_node(port) ? 0xF0 : 0x0F),
IER1, card);
-#else
- /* MSCI TXINT and RXINTA interrupt enable */
- sca_outl(IE0_TXINT | IE0_RXINTA | IE0_UDRN | IE0_CDCD, msci + IE0,
- card);
- /* DMA & MSCI IRQ enable */
- sca_outl(sca_inl(IER0, card) |
- (phy_node(port) ? 0x0A006600 : 0x000A0066), IER0, card);
-#endif
-#ifdef __HD64570_H
sca_out(port->tmc, msci + TMC, card); /* Restore registers */
-#else
- sca_out(port->tmc, msci + TMCR, card);
- sca_out(port->tmc, msci + TMCT, card);
-#endif
sca_out(port->rxs, msci + RXS, card);
sca_out(port->txs, msci + TXS, card);
sca_out(CMD_TX_ENABLE, msci + CMD, card);
@@ -616,7 +524,6 @@ static void sca_open(struct net_device *dev)
}
-
static void sca_close(struct net_device *dev)
{
port_t *port = dev_to_port(dev);
@@ -624,23 +531,17 @@ static void sca_close(struct net_device *dev)
/* reset channel */
sca_out(CMD_RESET, get_msci(port) + CMD, port_to_card(port));
-#ifdef __HD64570_H
/* disable MSCI interrupts */
sca_out(sca_in(IER0, card) & (phy_node(port) ? 0x0F : 0xF0),
IER0, card);
/* disable DMA interrupts */
sca_out(sca_in(IER1, card) & (phy_node(port) ? 0x0F : 0xF0),
IER1, card);
-#else
- /* disable DMA & MSCI IRQ */
- sca_outl(sca_inl(IER0, card) &
- (phy_node(port) ? 0x00FF00FF : 0xFF00FF00), IER0, card);
-#endif
+
netif_stop_queue(dev);
}
-
static int sca_attach(struct net_device *dev, unsigned short encoding,
unsigned short parity)
{
@@ -654,11 +555,7 @@ static int sca_attach(struct net_device *dev, unsigned short encoding,
if (parity != PARITY_NONE &&
parity != PARITY_CRC16_PR0 &&
parity != PARITY_CRC16_PR1 &&
-#ifdef __HD64570_H
parity != PARITY_CRC16_PR0_CCITT &&
-#else
- parity != PARITY_CRC32_PR1_CCITT &&
-#endif
parity != PARITY_CRC16_PR1_CCITT)
return -EINVAL;
@@ -668,34 +565,30 @@ static int sca_attach(struct net_device *dev, unsigned short encoding,
}
-
#ifdef DEBUG_RINGS
static void sca_dump_rings(struct net_device *dev)
{
port_t *port = dev_to_port(dev);
card_t *card = port_to_card(port);
u16 cnt;
-#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
- u8 page;
-#endif
+#ifndef PAGE0_ALWAYS_MAPPED
+ u8 page = sca_get_page(card);
-#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
- page = sca_get_page(card);
openwin(card, 0);
#endif
printk(KERN_DEBUG "RX ring: CDA=%u EDA=%u DSR=%02X in=%u %sactive",
- sca_ina(get_dmac_rx(port) + CDAL, card),
- sca_ina(get_dmac_rx(port) + EDAL, card),
+ sca_inw(get_dmac_rx(port) + CDAL, card),
+ sca_inw(get_dmac_rx(port) + EDAL, card),
sca_in(DSR_RX(phy_node(port)), card), port->rxin,
- sca_in(DSR_RX(phy_node(port)), card) & DSR_DE?"":"in");
+ sca_in(DSR_RX(phy_node(port)), card) & DSR_DE ? "" : "in");
for (cnt = 0; cnt < port_to_card(port)->rx_ring_buffers; cnt++)
printk(" %02X", readb(&(desc_address(port, cnt, 0)->stat)));
printk("\n" KERN_DEBUG "TX ring: CDA=%u EDA=%u DSR=%02X in=%u "
"last=%u %sactive",
- sca_ina(get_dmac_tx(port) + CDAL, card),
- sca_ina(get_dmac_tx(port) + EDAL, card),
+ sca_inw(get_dmac_tx(port) + CDAL, card),
+ sca_inw(get_dmac_tx(port) + EDAL, card),
sca_in(DSR_TX(phy_node(port)), card), port->txin, port->txlast,
sca_in(DSR_TX(phy_node(port)), card) & DSR_DE ? "" : "in");
@@ -703,12 +596,8 @@ static void sca_dump_rings(struct net_device *dev)
printk(" %02X", readb(&(desc_address(port, cnt, 1)->stat)));
printk("\n");
- printk(KERN_DEBUG "MSCI: MD: %02x %02x %02x, "
- "ST: %02x %02x %02x %02x"
-#ifdef __HD64572_H
- " %02x"
-#endif
- ", FST: %02x CST: %02x %02x\n",
+ printk(KERN_DEBUG "MSCI: MD: %02x %02x %02x, ST: %02x %02x %02x %02x,"
+ " FST: %02x CST: %02x %02x\n",
sca_in(get_msci(port) + MD0, card),
sca_in(get_msci(port) + MD1, card),
sca_in(get_msci(port) + MD2, card),
@@ -716,52 +605,33 @@ static void sca_dump_rings(struct net_device *dev)
sca_in(get_msci(port) + ST1, card),
sca_in(get_msci(port) + ST2, card),
sca_in(get_msci(port) + ST3, card),
-#ifdef __HD64572_H
- sca_in(get_msci(port) + ST4, card),
-#endif
sca_in(get_msci(port) + FST, card),
sca_in(get_msci(port) + CST0, card),
sca_in(get_msci(port) + CST1, card));
-#ifdef __HD64572_H
- printk(KERN_DEBUG "ILAR: %02x ISR: %08x %08x\n", sca_in(ILAR, card),
- sca_inl(ISR0, card), sca_inl(ISR1, card));
-#else
printk(KERN_DEBUG "ISR: %02x %02x %02x\n", sca_in(ISR0, card),
sca_in(ISR1, card), sca_in(ISR2, card));
-#endif
-#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
+#ifndef PAGE0_ALWAYS_MAPPED
openwin(card, page); /* Restore original page */
#endif
}
#endif /* DEBUG_RINGS */
-
static int sca_xmit(struct sk_buff *skb, struct net_device *dev)
{
port_t *port = dev_to_port(dev);
card_t *card = port_to_card(port);
pkt_desc __iomem *desc;
u32 buff, len;
-#ifndef ALL_PAGES_ALWAYS_MAPPED
u8 page;
u32 maxlen;
-#endif
spin_lock_irq(&port->lock);
desc = desc_address(port, port->txin + 1, 1);
- if (readb(&desc->stat)) { /* allow 1 packet gap */
- /* should never happen - previous xmit should stop queue */
-#ifdef DEBUG_PKT
- printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name);
-#endif
- netif_stop_queue(dev);
- spin_unlock_irq(&port->lock);
- return 1; /* request packet to be queued */
- }
+ BUG_ON(readb(&desc->stat)); /* previous xmit should stop queue */
#ifdef DEBUG_PKT
printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len);
@@ -771,7 +641,6 @@ static int sca_xmit(struct sk_buff *skb, struct net_device *dev)
desc = desc_address(port, port->txin, 1);
buff = buffer_offset(port, port->txin, 1);
len = skb->len;
-#ifndef ALL_PAGES_ALWAYS_MAPPED
page = buff / winsize(card);
buff = buff % winsize(card);
maxlen = winsize(card) - buff;
@@ -781,12 +650,10 @@ static int sca_xmit(struct sk_buff *skb, struct net_device *dev)
memcpy_toio(winbase(card) + buff, skb->data, maxlen);
openwin(card, page + 1);
memcpy_toio(winbase(card), skb->data + maxlen, len - maxlen);
- }
- else
-#endif
+ } else
memcpy_toio(winbase(card) + buff, skb->data, len);
-#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
+#ifndef PAGE0_ALWAYS_MAPPED
openwin(card, 0); /* select pkt_desc table page back */
#endif
writew(len, &desc->len);
@@ -794,7 +661,7 @@ static int sca_xmit(struct sk_buff *skb, struct net_device *dev)
dev->trans_start = jiffies;
port->txin = next_desc(port, port->txin, 1);
- sca_outa(desc_offset(port, port->txin, 1),
+ sca_outw(desc_offset(port, port->txin, 1),
get_dmac_tx(port) + EDAL, card);
sca_out(DSR_DE, DSR_TX(phy_node(port)), card); /* Enable TX DMA */
@@ -810,40 +677,29 @@ static int sca_xmit(struct sk_buff *skb, struct net_device *dev)
}
-
#ifdef NEED_DETECT_RAM
-static u32 __devinit sca_detect_ram(card_t *card, u8 __iomem *rambase, u32 ramsize)
+static u32 __devinit sca_detect_ram(card_t *card, u8 __iomem *rambase,
+ u32 ramsize)
{
/* Round RAM size to 32 bits, fill from end to start */
u32 i = ramsize &= ~3;
-
-#ifndef ALL_PAGES_ALWAYS_MAPPED
u32 size = winsize(card);
openwin(card, (i - 4) / size); /* select last window */
-#endif
+
do {
i -= 4;
-#ifndef ALL_PAGES_ALWAYS_MAPPED
if ((i + 4) % size == 0)
openwin(card, i / size);
writel(i ^ 0x12345678, rambase + i % size);
-#else
- writel(i ^ 0x12345678, rambase + i);
-#endif
- }while (i > 0);
+ } while (i > 0);
for (i = 0; i < ramsize ; i += 4) {
-#ifndef ALL_PAGES_ALWAYS_MAPPED
if (i % size == 0)
openwin(card, i / size);
if (readl(rambase + i % size) != (i ^ 0x12345678))
break;
-#else
- if (readl(rambase + i) != (i ^ 0x12345678))
- break;
-#endif
}
return i;
@@ -851,7 +707,6 @@ static u32 __devinit sca_detect_ram(card_t *card, u8 __iomem *rambase, u32 ramsi
#endif /* NEED_DETECT_RAM */
-
static void __devinit sca_init(card_t *card, int wait_states)
{
sca_out(wait_states, WCRL, card); /* Wait Control */
diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c
new file mode 100644
index 00000000000..08b3536944f
--- /dev/null
+++ b/drivers/net/wan/hd64572.c
@@ -0,0 +1,640 @@
+/*
+ * Hitachi (now Renesas) SCA-II HD64572 driver for Linux
+ *
+ * Copyright (C) 1998-2008 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * Source of information: HD64572 SCA-II User's Manual
+ *
+ * We use the following SCA memory map:
+ *
+ * Packet buffer descriptor rings - starting from card->rambase:
+ * rx_ring_buffers * sizeof(pkt_desc) = logical channel #0 RX ring
+ * tx_ring_buffers * sizeof(pkt_desc) = logical channel #0 TX ring
+ * rx_ring_buffers * sizeof(pkt_desc) = logical channel #1 RX ring (if used)
+ * tx_ring_buffers * sizeof(pkt_desc) = logical channel #1 TX ring (if used)
+ *
+ * Packet data buffers - starting from card->rambase + buff_offset:
+ * rx_ring_buffers * HDLC_MAX_MRU = logical channel #0 RX buffers
+ * tx_ring_buffers * HDLC_MAX_MRU = logical channel #0 TX buffers
+ * rx_ring_buffers * HDLC_MAX_MRU = logical channel #0 RX buffers (if used)
+ * tx_ring_buffers * HDLC_MAX_MRU = logical channel #0 TX buffers (if used)
+ */
+
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/hdlc.h>
+#include <linux/in.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include "hd64572.h"
+
+#define NAPI_WEIGHT 16
+
+#define get_msci(port) (port->chan ? MSCI1_OFFSET : MSCI0_OFFSET)
+#define get_dmac_rx(port) (port->chan ? DMAC1RX_OFFSET : DMAC0RX_OFFSET)
+#define get_dmac_tx(port) (port->chan ? DMAC1TX_OFFSET : DMAC0TX_OFFSET)
+
+#define sca_in(reg, card) readb(card->scabase + (reg))
+#define sca_out(value, reg, card) writeb(value, card->scabase + (reg))
+#define sca_inw(reg, card) readw(card->scabase + (reg))
+#define sca_outw(value, reg, card) writew(value, card->scabase + (reg))
+#define sca_inl(reg, card) readl(card->scabase + (reg))
+#define sca_outl(value, reg, card) writel(value, card->scabase + (reg))
+
+static int sca_poll(struct napi_struct *napi, int budget);
+
+static inline port_t* dev_to_port(struct net_device *dev)
+{
+ return dev_to_hdlc(dev)->priv;
+}
+
+static inline void enable_intr(port_t *port)
+{
+ /* enable DMIB and MSCI RXINTA interrupts */
+ sca_outl(sca_inl(IER0, port->card) |
+ (port->chan ? 0x08002200 : 0x00080022), IER0, port->card);
+}
+
+static inline void disable_intr(port_t *port)
+{
+ sca_outl(sca_inl(IER0, port->card) &
+ (port->chan ? 0x00FF00FF : 0xFF00FF00), IER0, port->card);
+}
+
+static inline u16 desc_abs_number(port_t *port, u16 desc, int transmit)
+{
+ u16 rx_buffs = port->card->rx_ring_buffers;
+ u16 tx_buffs = port->card->tx_ring_buffers;
+
+ desc %= (transmit ? tx_buffs : rx_buffs); // called with "X + 1" etc.
+ return port->chan * (rx_buffs + tx_buffs) + transmit * rx_buffs + desc;
+}
+
+
+static inline u16 desc_offset(port_t *port, u16 desc, int transmit)
+{
+ /* Descriptor offset always fits in 16 bits */
+ return desc_abs_number(port, desc, transmit) * sizeof(pkt_desc);
+}
+
+
+static inline pkt_desc __iomem *desc_address(port_t *port, u16 desc,
+ int transmit)
+{
+ return (pkt_desc __iomem *)(port->card->rambase +
+ desc_offset(port, desc, transmit));
+}
+
+
+static inline u32 buffer_offset(port_t *port, u16 desc, int transmit)
+{
+ return port->card->buff_offset +
+ desc_abs_number(port, desc, transmit) * (u32)HDLC_MAX_MRU;
+}
+
+
+static inline void sca_set_carrier(port_t *port)
+{
+ if (!(sca_in(get_msci(port) + ST3, port->card) & ST3_DCD)) {
+#ifdef DEBUG_LINK
+ printk(KERN_DEBUG "%s: sca_set_carrier on\n",
+ port->netdev.name);
+#endif
+ netif_carrier_on(port->netdev);
+ } else {
+#ifdef DEBUG_LINK
+ printk(KERN_DEBUG "%s: sca_set_carrier off\n",
+ port->netdev.name);
+#endif
+ netif_carrier_off(port->netdev);
+ }
+}
+
+
+static void sca_init_port(port_t *port)
+{
+ card_t *card = port->card;
+ u16 dmac_rx = get_dmac_rx(port), dmac_tx = get_dmac_tx(port);
+ int transmit, i;
+
+ port->rxin = 0;
+ port->txin = 0;
+ port->txlast = 0;
+
+ for (transmit = 0; transmit < 2; transmit++) {
+ u16 buffs = transmit ? card->tx_ring_buffers
+ : card->rx_ring_buffers;
+
+ for (i = 0; i < buffs; i++) {
+ pkt_desc __iomem *desc = desc_address(port, i, transmit);
+ u16 chain_off = desc_offset(port, i + 1, transmit);
+ u32 buff_off = buffer_offset(port, i, transmit);
+
+ writel(chain_off, &desc->cp);
+ writel(buff_off, &desc->bp);
+ writew(0, &desc->len);
+ writeb(0, &desc->stat);
+ }
+ }
+
+ /* DMA disable - to halt state */
+ sca_out(0, DSR_RX(port->chan), card);
+ sca_out(0, DSR_TX(port->chan), card);
+
+ /* software ABORT - to initial state */
+ sca_out(DCR_ABORT, DCR_RX(port->chan), card);
+ sca_out(DCR_ABORT, DCR_TX(port->chan), card);
+
+ /* current desc addr */
+ sca_outl(desc_offset(port, 0, 0), dmac_rx + CDAL, card);
+ sca_outl(desc_offset(port, card->tx_ring_buffers - 1, 0),
+ dmac_rx + EDAL, card);
+ sca_outl(desc_offset(port, 0, 1), dmac_tx + CDAL, card);
+ sca_outl(desc_offset(port, 0, 1), dmac_tx + EDAL, card);
+
+ /* clear frame end interrupt counter */
+ sca_out(DCR_CLEAR_EOF, DCR_RX(port->chan), card);
+ sca_out(DCR_CLEAR_EOF, DCR_TX(port->chan), card);
+
+ /* Receive */
+ sca_outw(HDLC_MAX_MRU, dmac_rx + BFLL, card); /* set buffer length */
+ sca_out(0x14, DMR_RX(port->chan), card); /* Chain mode, Multi-frame */
+ sca_out(DIR_EOME, DIR_RX(port->chan), card); /* enable interrupts */
+ sca_out(DSR_DE, DSR_RX(port->chan), card); /* DMA enable */
+
+ /* Transmit */
+ sca_out(0x14, DMR_TX(port->chan), card); /* Chain mode, Multi-frame */
+ sca_out(DIR_EOME, DIR_TX(port->chan), card); /* enable interrupts */
+
+ sca_set_carrier(port);
+ netif_napi_add(port->netdev, &port->napi, sca_poll, NAPI_WEIGHT);
+}
+
+
+/* MSCI interrupt service */
+static inline void sca_msci_intr(port_t *port)
+{
+ u16 msci = get_msci(port);
+ card_t* card = port->card;
+
+ if (sca_in(msci + ST1, card) & ST1_CDCD) {
+ /* Reset MSCI CDCD status bit */
+ sca_out(ST1_CDCD, msci + ST1, card);
+ sca_set_carrier(port);
+ }
+}
+
+
+static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc,
+ u16 rxin)
+{
+ struct net_device *dev = port->netdev;
+ struct sk_buff *skb;
+ u16 len;
+ u32 buff;
+
+ len = readw(&desc->len);
+ skb = dev_alloc_skb(len);
+ if (!skb) {
+ dev->stats.rx_dropped++;
+ return;
+ }
+
+ buff = buffer_offset(port, rxin, 0);
+ memcpy_fromio(skb->data, card->rambase + buff, len);
+
+ skb_put(skb, len);
+#ifdef DEBUG_PKT
+ printk(KERN_DEBUG "%s RX(%i):", dev->name, skb->len);
+ debug_frame(skb);
+#endif
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += skb->len;
+ skb->protocol = hdlc_type_trans(skb, dev);
+ netif_receive_skb(skb);
+}
+
+
+/* Receive DMA service */
+static inline int sca_rx_done(port_t *port, int budget)
+{
+ struct net_device *dev = port->netdev;
+ u16 dmac = get_dmac_rx(port);
+ card_t *card = port->card;
+ u8 stat = sca_in(DSR_RX(port->chan), card); /* read DMA Status */
+ int received = 0;
+
+ /* Reset DSR status bits */
+ sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE,
+ DSR_RX(port->chan), card);
+
+ if (stat & DSR_BOF)
+ /* Dropped one or more frames */
+ dev->stats.rx_over_errors++;
+
+ while (received < budget) {
+ u32 desc_off = desc_offset(port, port->rxin, 0);
+ pkt_desc __iomem *desc;
+ u32 cda = sca_inl(dmac + CDAL, card);
+
+ if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc)))
+ break; /* No frame received */
+
+ desc = desc_address(port, port->rxin, 0);
+ stat = readb(&desc->stat);
+ if (!(stat & ST_RX_EOM))
+ port->rxpart = 1; /* partial frame received */
+ else if ((stat & ST_ERROR_MASK) || port->rxpart) {
+ dev->stats.rx_errors++;
+ if (stat & ST_RX_OVERRUN)
+ dev->stats.rx_fifo_errors++;
+ else if ((stat & (ST_RX_SHORT | ST_RX_ABORT |
+ ST_RX_RESBIT)) || port->rxpart)
+ dev->stats.rx_frame_errors++;
+ else if (stat & ST_RX_CRC)
+ dev->stats.rx_crc_errors++;
+ if (stat & ST_RX_EOM)
+ port->rxpart = 0; /* received last fragment */
+ } else {
+ sca_rx(card, port, desc, port->rxin);
+ received++;
+ }
+
+ /* Set new error descriptor address */
+ sca_outl(desc_off, dmac + EDAL, card);
+ port->rxin = (port->rxin + 1) % card->rx_ring_buffers;
+ }
+
+ /* make sure RX DMA is enabled */
+ sca_out(DSR_DE, DSR_RX(port->chan), card);
+ return received;
+}
+
+
+/* Transmit DMA service */
+static inline void sca_tx_done(port_t *port)
+{
+ struct net_device *dev = port->netdev;
+ card_t* card = port->card;
+ u8 stat;
+
+ spin_lock(&port->lock);
+
+ stat = sca_in(DSR_TX(port->chan), card); /* read DMA Status */
+
+ /* Reset DSR status bits */
+ sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE,
+ DSR_TX(port->chan), card);
+
+ while (1) {
+ pkt_desc __iomem *desc = desc_address(port, port->txlast, 1);
+ u8 stat = readb(&desc->stat);
+
+ if (!(stat & ST_TX_OWNRSHP))
+ break; /* not yet transmitted */
+ if (stat & ST_TX_UNDRRUN) {
+ dev->stats.tx_errors++;
+ dev->stats.tx_fifo_errors++;
+ } else {
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += readw(&desc->len);
+ }
+ writeb(0, &desc->stat); /* Free descriptor */
+ port->txlast = (port->txlast + 1) % card->tx_ring_buffers;
+ }
+
+ netif_wake_queue(dev);
+ spin_unlock(&port->lock);
+}
+
+
+static int sca_poll(struct napi_struct *napi, int budget)
+{
+ port_t *port = container_of(napi, port_t, napi);
+ u32 isr0 = sca_inl(ISR0, port->card);
+ int received = 0;
+
+ if (isr0 & (port->chan ? 0x08000000 : 0x00080000))
+ sca_msci_intr(port);
+
+ if (isr0 & (port->chan ? 0x00002000 : 0x00000020))
+ sca_tx_done(port);
+
+ if (isr0 & (port->chan ? 0x00000200 : 0x00000002))
+ received = sca_rx_done(port, budget);
+
+ if (received < budget) {
+ netif_rx_complete(napi);
+ enable_intr(port);
+ }
+
+ return received;
+}
+
+static irqreturn_t sca_intr(int irq, void *dev_id)
+{
+ card_t *card = dev_id;
+ u32 isr0 = sca_inl(ISR0, card);
+ int i, handled = 0;
+
+ for (i = 0; i < 2; i++) {
+ port_t *port = get_port(card, i);
+ if (port && (isr0 & (i ? 0x08002200 : 0x00080022))) {
+ handled = 1;
+ disable_intr(port);
+ netif_rx_schedule(&port->napi);
+ }
+ }
+
+ return IRQ_RETVAL(handled);
+}
+
+
+static void sca_set_port(port_t *port)
+{
+ card_t* card = port->card;
+ u16 msci = get_msci(port);
+ u8 md2 = sca_in(msci + MD2, card);
+ unsigned int tmc, br = 10, brv = 1024;
+
+
+ if (port->settings.clock_rate > 0) {
+ /* Try lower br for better accuracy*/
+ do {
+ br--;
+ brv >>= 1; /* brv = 2^9 = 512 max in specs */
+
+ /* Baud Rate = CLOCK_BASE / TMC / 2^BR */
+ tmc = CLOCK_BASE / brv / port->settings.clock_rate;
+ }while (br > 1 && tmc <= 128);
+
+ if (tmc < 1) {
+ tmc = 1;
+ br = 0; /* For baud=CLOCK_BASE we use tmc=1 br=0 */
+ brv = 1;
+ } else if (tmc > 255)
+ tmc = 256; /* tmc=0 means 256 - low baud rates */
+
+ port->settings.clock_rate = CLOCK_BASE / brv / tmc;
+ } else {
+ br = 9; /* Minimum clock rate */
+ tmc = 256; /* 8bit = 0 */
+ port->settings.clock_rate = CLOCK_BASE / (256 * 512);
+ }
+
+ port->rxs = (port->rxs & ~CLK_BRG_MASK) | br;
+ port->txs = (port->txs & ~CLK_BRG_MASK) | br;
+ port->tmc = tmc;
+
+ /* baud divisor - time constant*/
+ sca_out(port->tmc, msci + TMCR, card);
+ sca_out(port->tmc, msci + TMCT, card);
+
+ /* Set BRG bits */
+ sca_out(port->rxs, msci + RXS, card);
+ sca_out(port->txs, msci + TXS, card);
+
+ if (port->settings.loopback)
+ md2 |= MD2_LOOPBACK;
+ else
+ md2 &= ~MD2_LOOPBACK;
+
+ sca_out(md2, msci + MD2, card);
+
+}
+
+
+static void sca_open(struct net_device *dev)
+{
+ port_t *port = dev_to_port(dev);
+ card_t* card = port->card;
+ u16 msci = get_msci(port);
+ u8 md0, md2;
+
+ switch(port->encoding) {
+ case ENCODING_NRZ: md2 = MD2_NRZ; break;
+ case ENCODING_NRZI: md2 = MD2_NRZI; break;
+ case ENCODING_FM_MARK: md2 = MD2_FM_MARK; break;
+ case ENCODING_FM_SPACE: md2 = MD2_FM_SPACE; break;
+ default: md2 = MD2_MANCHESTER;
+ }
+
+ if (port->settings.loopback)
+ md2 |= MD2_LOOPBACK;
+
+ switch(port->parity) {
+ case PARITY_CRC16_PR0: md0 = MD0_HDLC | MD0_CRC_16_0; break;
+ case PARITY_CRC16_PR1: md0 = MD0_HDLC | MD0_CRC_16; break;
+ case PARITY_CRC32_PR1_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU32; break;
+ case PARITY_CRC16_PR1_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU; break;
+ default: md0 = MD0_HDLC | MD0_CRC_NONE;
+ }
+
+ sca_out(CMD_RESET, msci + CMD, card);
+ sca_out(md0, msci + MD0, card);
+ sca_out(0x00, msci + MD1, card); /* no address field check */
+ sca_out(md2, msci + MD2, card);
+ sca_out(0x7E, msci + IDL, card); /* flag character 0x7E */
+ /* Skip the rest of underrun frame */
+ sca_out(CTL_IDLE | CTL_URCT | CTL_URSKP, msci + CTL, card);
+ sca_out(0x0F, msci + RNR, card); /* +1=RX DMA activation condition */
+ sca_out(0x3C, msci + TFS, card); /* +1 = TX start */
+ sca_out(0x38, msci + TCR, card); /* =Critical TX DMA activ condition */
+ sca_out(0x38, msci + TNR0, card); /* =TX DMA activation condition */
+ sca_out(0x3F, msci + TNR1, card); /* +1=TX DMA deactivation condition*/
+
+/* We're using the following interrupts:
+ - RXINTA (DCD changes only)
+ - DMIB (EOM - single frame transfer complete)
+*/
+ sca_outl(IE0_RXINTA | IE0_CDCD, msci + IE0, card);
+
+ sca_out(port->tmc, msci + TMCR, card);
+ sca_out(port->tmc, msci + TMCT, card);
+ sca_out(port->rxs, msci + RXS, card);
+ sca_out(port->txs, msci + TXS, card);
+ sca_out(CMD_TX_ENABLE, msci + CMD, card);
+ sca_out(CMD_RX_ENABLE, msci + CMD, card);
+
+ sca_set_carrier(port);
+ enable_intr(port);
+ napi_enable(&port->napi);
+ netif_start_queue(dev);
+}
+
+
+static void sca_close(struct net_device *dev)
+{
+ port_t *port = dev_to_port(dev);
+
+ /* reset channel */
+ sca_out(CMD_RESET, get_msci(port) + CMD, port->card);
+ disable_intr(port);
+ napi_disable(&port->napi);
+ netif_stop_queue(dev);
+}
+
+
+static int sca_attach(struct net_device *dev, unsigned short encoding,
+ unsigned short parity)
+{
+ if (encoding != ENCODING_NRZ &&
+ encoding != ENCODING_NRZI &&
+ encoding != ENCODING_FM_MARK &&
+ encoding != ENCODING_FM_SPACE &&
+ encoding != ENCODING_MANCHESTER)
+ return -EINVAL;
+
+ if (parity != PARITY_NONE &&
+ parity != PARITY_CRC16_PR0 &&
+ parity != PARITY_CRC16_PR1 &&
+ parity != PARITY_CRC32_PR1_CCITT &&
+ parity != PARITY_CRC16_PR1_CCITT)
+ return -EINVAL;
+
+ dev_to_port(dev)->encoding = encoding;
+ dev_to_port(dev)->parity = parity;
+ return 0;
+}
+
+
+#ifdef DEBUG_RINGS
+static void sca_dump_rings(struct net_device *dev)
+{
+ port_t *port = dev_to_port(dev);
+ card_t *card = port->card;
+ u16 cnt;
+
+ printk(KERN_DEBUG "RX ring: CDA=%u EDA=%u DSR=%02X in=%u %sactive",
+ sca_inl(get_dmac_rx(port) + CDAL, card),
+ sca_inl(get_dmac_rx(port) + EDAL, card),
+ sca_in(DSR_RX(port->chan), card), port->rxin,
+ sca_in(DSR_RX(port->chan), card) & DSR_DE ? "" : "in");
+ for (cnt = 0; cnt < port->card->rx_ring_buffers; cnt++)
+ printk(" %02X", readb(&(desc_address(port, cnt, 0)->stat)));
+
+ printk("\n" KERN_DEBUG "TX ring: CDA=%u EDA=%u DSR=%02X in=%u "
+ "last=%u %sactive",
+ sca_inl(get_dmac_tx(port) + CDAL, card),
+ sca_inl(get_dmac_tx(port) + EDAL, card),
+ sca_in(DSR_TX(port->chan), card), port->txin, port->txlast,
+ sca_in(DSR_TX(port->chan), card) & DSR_DE ? "" : "in");
+
+ for (cnt = 0; cnt < port->card->tx_ring_buffers; cnt++)
+ printk(" %02X", readb(&(desc_address(port, cnt, 1)->stat)));
+ printk("\n");
+
+ printk(KERN_DEBUG "MSCI: MD: %02x %02x %02x,"
+ " ST: %02x %02x %02x %02x %02x, FST: %02x CST: %02x %02x\n",
+ sca_in(get_msci(port) + MD0, card),
+ sca_in(get_msci(port) + MD1, card),
+ sca_in(get_msci(port) + MD2, card),
+ sca_in(get_msci(port) + ST0, card),
+ sca_in(get_msci(port) + ST1, card),
+ sca_in(get_msci(port) + ST2, card),
+ sca_in(get_msci(port) + ST3, card),
+ sca_in(get_msci(port) + ST4, card),
+ sca_in(get_msci(port) + FST, card),
+ sca_in(get_msci(port) + CST0, card),
+ sca_in(get_msci(port) + CST1, card));
+
+ printk(KERN_DEBUG "ILAR: %02x ISR: %08x %08x\n", sca_in(ILAR, card),
+ sca_inl(ISR0, card), sca_inl(ISR1, card));
+}
+#endif /* DEBUG_RINGS */
+
+
+static int sca_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ port_t *port = dev_to_port(dev);
+ card_t *card = port->card;
+ pkt_desc __iomem *desc;
+ u32 buff, len;
+
+ spin_lock_irq(&port->lock);
+
+ desc = desc_address(port, port->txin + 1, 1);
+ BUG_ON(readb(&desc->stat)); /* previous xmit should stop queue */
+
+#ifdef DEBUG_PKT
+ printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len);
+ debug_frame(skb);
+#endif
+
+ desc = desc_address(port, port->txin, 1);
+ buff = buffer_offset(port, port->txin, 1);
+ len = skb->len;
+ memcpy_toio(card->rambase + buff, skb->data, len);
+
+ writew(len, &desc->len);
+ writeb(ST_TX_EOM, &desc->stat);
+ dev->trans_start = jiffies;
+
+ port->txin = (port->txin + 1) % card->tx_ring_buffers;
+ sca_outl(desc_offset(port, port->txin, 1),
+ get_dmac_tx(port) + EDAL, card);
+
+ sca_out(DSR_DE, DSR_TX(port->chan), card); /* Enable TX DMA */
+
+ desc = desc_address(port, port->txin + 1, 1);
+ if (readb(&desc->stat)) /* allow 1 packet gap */
+ netif_stop_queue(dev);
+
+ spin_unlock_irq(&port->lock);
+
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+
+static u32 __devinit sca_detect_ram(card_t *card, u8 __iomem *rambase,
+ u32 ramsize)
+{
+ /* Round RAM size to 32 bits, fill from end to start */
+ u32 i = ramsize &= ~3;
+
+ do {
+ i -= 4;
+ writel(i ^ 0x12345678, rambase + i);
+ } while (i > 0);
+
+ for (i = 0; i < ramsize ; i += 4) {
+ if (readl(rambase + i) != (i ^ 0x12345678))
+ break;
+ }
+
+ return i;
+}
+
+
+static void __devinit sca_init(card_t *card, int wait_states)
+{
+ sca_out(wait_states, WCRL, card); /* Wait Control */
+ sca_out(wait_states, WCRM, card);
+ sca_out(wait_states, WCRH, card);
+
+ sca_out(0, DMER, card); /* DMA Master disable */
+ sca_out(0x03, PCR, card); /* DMA priority */
+ sca_out(0, DSR_RX(0), card); /* DMA disable - to halt state */
+ sca_out(0, DSR_TX(0), card);
+ sca_out(0, DSR_RX(1), card);
+ sca_out(0, DSR_TX(1), card);
+ sca_out(DMER_DME, DMER, card); /* DMA Master enable */
+}
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index d3d5055741a..f1ddd7c3459 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -342,7 +342,7 @@ static int fr_hard_header(struct sk_buff **skb_p, u16 dlci)
static int pvc_open(struct net_device *dev)
{
- pvc_device *pvc = dev->priv;
+ pvc_device *pvc = dev->ml_priv;
if ((pvc->frad->flags & IFF_UP) == 0)
return -EIO; /* Frad must be UP in order to activate PVC */
@@ -362,7 +362,7 @@ static int pvc_open(struct net_device *dev)
static int pvc_close(struct net_device *dev)
{
- pvc_device *pvc = dev->priv;
+ pvc_device *pvc = dev->ml_priv;
if (--pvc->open_count == 0) {
hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
@@ -381,7 +381,7 @@ static int pvc_close(struct net_device *dev)
static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- pvc_device *pvc = dev->priv;
+ pvc_device *pvc = dev->ml_priv;
fr_proto_pvc_info info;
if (ifr->ifr_settings.type == IF_GET_PROTO) {
@@ -409,7 +409,7 @@ static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static int pvc_xmit(struct sk_buff *skb, struct net_device *dev)
{
- pvc_device *pvc = dev->priv;
+ pvc_device *pvc = dev->ml_priv;
if (pvc->state.active) {
if (dev->type == ARPHRD_ETHER) {
@@ -1111,7 +1111,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
dev->change_mtu = pvc_change_mtu;
dev->mtu = HDLC_MAX_MTU;
dev->tx_queue_len = 0;
- dev->priv = pvc;
+ dev->ml_priv = pvc;
result = dev_alloc_name(dev, dev->name);
if (result < 0) {
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index 4efe9e6d32d..57fe714c1c7 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -2,7 +2,7 @@
* Generic HDLC support routines for Linux
* Point-to-point protocol support
*
- * Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl>
+ * Copyright (C) 1999 - 2008 Krzysztof Halasa <khc@pm.waw.pl>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
@@ -18,87 +18,633 @@
#include <linux/module.h>
#include <linux/pkt_sched.h>
#include <linux/poll.h>
-#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
-#include <net/syncppp.h>
+#include <linux/spinlock.h>
+
+#define DEBUG_CP 0 /* also bytes# to dump */
+#define DEBUG_STATE 0
+#define DEBUG_HARD_HEADER 0
+
+#define HDLC_ADDR_ALLSTATIONS 0xFF
+#define HDLC_CTRL_UI 0x03
+
+#define PID_LCP 0xC021
+#define PID_IP 0x0021
+#define PID_IPCP 0x8021
+#define PID_IPV6 0x0057
+#define PID_IPV6CP 0x8057
+
+enum {IDX_LCP = 0, IDX_IPCP, IDX_IPV6CP, IDX_COUNT};
+enum {CP_CONF_REQ = 1, CP_CONF_ACK, CP_CONF_NAK, CP_CONF_REJ, CP_TERM_REQ,
+ CP_TERM_ACK, CP_CODE_REJ, LCP_PROTO_REJ, LCP_ECHO_REQ, LCP_ECHO_REPLY,
+ LCP_DISC_REQ, CP_CODES};
+#if DEBUG_CP
+static const char *const code_names[CP_CODES] = {
+ "0", "ConfReq", "ConfAck", "ConfNak", "ConfRej", "TermReq",
+ "TermAck", "CodeRej", "ProtoRej", "EchoReq", "EchoReply", "Discard"
+};
+static char debug_buffer[64 + 3 * DEBUG_CP];
+#endif
+
+enum {LCP_OPTION_MRU = 1, LCP_OPTION_ACCM, LCP_OPTION_MAGIC = 5};
+
+struct hdlc_header {
+ u8 address;
+ u8 control;
+ __be16 protocol;
+};
+
+struct cp_header {
+ u8 code;
+ u8 id;
+ __be16 len;
+};
+
-struct ppp_state {
- struct ppp_device pppdev;
- struct ppp_device *syncppp_ptr;
- int (*old_change_mtu)(struct net_device *dev, int new_mtu);
+struct proto {
+ struct net_device *dev;
+ struct timer_list timer;
+ unsigned long timeout;
+ u16 pid; /* protocol ID */
+ u8 state;
+ u8 cr_id; /* ID of last Configuration-Request */
+ u8 restart_counter;
};
+struct ppp {
+ struct proto protos[IDX_COUNT];
+ spinlock_t lock;
+ unsigned long last_pong;
+ unsigned int req_timeout, cr_retries, term_retries;
+ unsigned int keepalive_interval, keepalive_timeout;
+ u8 seq; /* local sequence number for requests */
+ u8 echo_id; /* ID of last Echo-Request (LCP) */
+};
+
+enum {CLOSED = 0, STOPPED, STOPPING, REQ_SENT, ACK_RECV, ACK_SENT, OPENED,
+ STATES, STATE_MASK = 0xF};
+enum {START = 0, STOP, TO_GOOD, TO_BAD, RCR_GOOD, RCR_BAD, RCA, RCN, RTR, RTA,
+ RUC, RXJ_GOOD, RXJ_BAD, EVENTS};
+enum {INV = 0x10, IRC = 0x20, ZRC = 0x40, SCR = 0x80, SCA = 0x100,
+ SCN = 0x200, STR = 0x400, STA = 0x800, SCJ = 0x1000};
+
+#if DEBUG_STATE
+static const char *const state_names[STATES] = {
+ "Closed", "Stopped", "Stopping", "ReqSent", "AckRecv", "AckSent",
+ "Opened"
+};
+static const char *const event_names[EVENTS] = {
+ "Start", "Stop", "TO+", "TO-", "RCR+", "RCR-", "RCA", "RCN",
+ "RTR", "RTA", "RUC", "RXJ+", "RXJ-"
+};
+#endif
+
+static struct sk_buff_head tx_queue; /* used when holding the spin lock */
+
static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr);
+static inline struct ppp* get_ppp(struct net_device *dev)
+{
+ return (struct ppp *)dev_to_hdlc(dev)->state;
+}
-static inline struct ppp_state* state(hdlc_device *hdlc)
+static inline struct proto* get_proto(struct net_device *dev, u16 pid)
{
- return(struct ppp_state *)(hdlc->state);
+ struct ppp *ppp = get_ppp(dev);
+
+ switch (pid) {
+ case PID_LCP:
+ return &ppp->protos[IDX_LCP];
+ case PID_IPCP:
+ return &ppp->protos[IDX_IPCP];
+ case PID_IPV6CP:
+ return &ppp->protos[IDX_IPV6CP];
+ default:
+ return NULL;
+ }
}
+static inline const char* proto_name(u16 pid)
+{
+ switch (pid) {
+ case PID_LCP:
+ return "LCP";
+ case PID_IPCP:
+ return "IPCP";
+ case PID_IPV6CP:
+ return "IPV6CP";
+ default:
+ return NULL;
+ }
+}
-static int ppp_open(struct net_device *dev)
+static __be16 ppp_type_trans(struct sk_buff *skb, struct net_device *dev)
{
- hdlc_device *hdlc = dev_to_hdlc(dev);
- int (*old_ioctl)(struct net_device *, struct ifreq *, int);
- int result;
+ struct hdlc_header *data = (struct hdlc_header*)skb->data;
+
+ if (skb->len < sizeof(struct hdlc_header))
+ return htons(ETH_P_HDLC);
+ if (data->address != HDLC_ADDR_ALLSTATIONS ||
+ data->control != HDLC_CTRL_UI)
+ return htons(ETH_P_HDLC);
+
+ switch (data->protocol) {
+ case __constant_htons(PID_IP):
+ skb_pull(skb, sizeof(struct hdlc_header));
+ return htons(ETH_P_IP);
- dev->ml_priv = &state(hdlc)->syncppp_ptr;
- state(hdlc)->syncppp_ptr = &state(hdlc)->pppdev;
- state(hdlc)->pppdev.dev = dev;
+ case __constant_htons(PID_IPV6):
+ skb_pull(skb, sizeof(struct hdlc_header));
+ return htons(ETH_P_IPV6);
- old_ioctl = dev->do_ioctl;
- state(hdlc)->old_change_mtu = dev->change_mtu;
- sppp_attach(&state(hdlc)->pppdev);
- /* sppp_attach nukes them. We don't need syncppp's ioctl */
- dev->do_ioctl = old_ioctl;
- state(hdlc)->pppdev.sppp.pp_flags &= ~PP_CISCO;
- dev->type = ARPHRD_PPP;
- result = sppp_open(dev);
- if (result) {
- sppp_detach(dev);
- return result;
+ default:
+ return htons(ETH_P_HDLC);
}
+}
- return 0;
+
+static int ppp_hard_header(struct sk_buff *skb, struct net_device *dev,
+ u16 type, const void *daddr, const void *saddr,
+ unsigned int len)
+{
+ struct hdlc_header *data;
+#if DEBUG_HARD_HEADER
+ printk(KERN_DEBUG "%s: ppp_hard_header() called\n", dev->name);
+#endif
+
+ skb_push(skb, sizeof(struct hdlc_header));
+ data = (struct hdlc_header*)skb->data;
+
+ data->address = HDLC_ADDR_ALLSTATIONS;
+ data->control = HDLC_CTRL_UI;
+ switch (type) {
+ case ETH_P_IP:
+ data->protocol = htons(PID_IP);
+ break;
+ case ETH_P_IPV6:
+ data->protocol = htons(PID_IPV6);
+ break;
+ case PID_LCP:
+ case PID_IPCP:
+ case PID_IPV6CP:
+ data->protocol = htons(type);
+ break;
+ default: /* unknown protocol */
+ data->protocol = 0;
+ }
+ return sizeof(struct hdlc_header);
}
+static void ppp_tx_flush(void)
+{
+ struct sk_buff *skb;
+ while ((skb = skb_dequeue(&tx_queue)) != NULL)
+ dev_queue_xmit(skb);
+}
-static void ppp_close(struct net_device *dev)
+static void ppp_tx_cp(struct net_device *dev, u16 pid, u8 code,
+ u8 id, unsigned int len, const void *data)
{
- hdlc_device *hdlc = dev_to_hdlc(dev);
+ struct sk_buff *skb;
+ struct cp_header *cp;
+ unsigned int magic_len = 0;
+ static u32 magic;
+
+#if DEBUG_CP
+ int i;
+ char *ptr;
+#endif
+
+ if (pid == PID_LCP && (code == LCP_ECHO_REQ || code == LCP_ECHO_REPLY))
+ magic_len = sizeof(magic);
+
+ skb = dev_alloc_skb(sizeof(struct hdlc_header) +
+ sizeof(struct cp_header) + magic_len + len);
+ if (!skb) {
+ printk(KERN_WARNING "%s: out of memory in ppp_tx_cp()\n",
+ dev->name);
+ return;
+ }
+ skb_reserve(skb, sizeof(struct hdlc_header));
+
+ cp = (struct cp_header *)skb_put(skb, sizeof(struct cp_header));
+ cp->code = code;
+ cp->id = id;
+ cp->len = htons(sizeof(struct cp_header) + magic_len + len);
+
+ if (magic_len)
+ memcpy(skb_put(skb, magic_len), &magic, magic_len);
+ if (len)
+ memcpy(skb_put(skb, len), data, len);
+
+#if DEBUG_CP
+ BUG_ON(code >= CP_CODES);
+ ptr = debug_buffer;
+ *ptr = '\x0';
+ for (i = 0; i < min_t(unsigned int, magic_len + len, DEBUG_CP); i++) {
+ sprintf(ptr, " %02X", skb->data[sizeof(struct cp_header) + i]);
+ ptr += strlen(ptr);
+ }
+ printk(KERN_DEBUG "%s: TX %s [%s id 0x%X]%s\n", dev->name,
+ proto_name(pid), code_names[code], id, debug_buffer);
+#endif
- sppp_close(dev);
- sppp_detach(dev);
+ ppp_hard_header(skb, dev, pid, NULL, NULL, 0);
- dev->change_mtu = state(hdlc)->old_change_mtu;
- dev->mtu = HDLC_MAX_MTU;
- dev->hard_header_len = 16;
+ skb->priority = TC_PRIO_CONTROL;
+ skb->dev = dev;
+ skb_reset_network_header(skb);
+ skb_queue_tail(&tx_queue, skb);
}
+/* State transition table (compare STD-51)
+ Events Actions
+ TO+ = Timeout with counter > 0 irc = Initialize-Restart-Count
+ TO- = Timeout with counter expired zrc = Zero-Restart-Count
+
+ RCR+ = Receive-Configure-Request (Good) scr = Send-Configure-Request
+ RCR- = Receive-Configure-Request (Bad)
+ RCA = Receive-Configure-Ack sca = Send-Configure-Ack
+ RCN = Receive-Configure-Nak/Rej scn = Send-Configure-Nak/Rej
+
+ RTR = Receive-Terminate-Request str = Send-Terminate-Request
+ RTA = Receive-Terminate-Ack sta = Send-Terminate-Ack
+
+ RUC = Receive-Unknown-Code scj = Send-Code-Reject
+ RXJ+ = Receive-Code-Reject (permitted)
+ or Receive-Protocol-Reject
+ RXJ- = Receive-Code-Reject (catastrophic)
+ or Receive-Protocol-Reject
+*/
+static int cp_table[EVENTS][STATES] = {
+ /* CLOSED STOPPED STOPPING REQ_SENT ACK_RECV ACK_SENT OPENED
+ 0 1 2 3 4 5 6 */
+ {IRC|SCR|3, INV , INV , INV , INV , INV , INV }, /* START */
+ { INV , 0 , 0 , 0 , 0 , 0 , 0 }, /* STOP */
+ { INV , INV ,STR|2, SCR|3 ,SCR|3, SCR|5 , INV }, /* TO+ */
+ { INV , INV , 1 , 1 , 1 , 1 , INV }, /* TO- */
+ { STA|0 ,IRC|SCR|SCA|5, 2 , SCA|5 ,SCA|6, SCA|5 ,SCR|SCA|5}, /* RCR+ */
+ { STA|0 ,IRC|SCR|SCN|3, 2 , SCN|3 ,SCN|4, SCN|3 ,SCR|SCN|3}, /* RCR- */
+ { STA|0 , STA|1 , 2 , IRC|4 ,SCR|3, 6 , SCR|3 }, /* RCA */
+ { STA|0 , STA|1 , 2 ,IRC|SCR|3,SCR|3,IRC|SCR|5, SCR|3 }, /* RCN */
+ { STA|0 , STA|1 ,STA|2, STA|3 ,STA|3, STA|3 ,ZRC|STA|2}, /* RTR */
+ { 0 , 1 , 1 , 3 , 3 , 5 , SCR|3 }, /* RTA */
+ { SCJ|0 , SCJ|1 ,SCJ|2, SCJ|3 ,SCJ|4, SCJ|5 , SCJ|6 }, /* RUC */
+ { 0 , 1 , 2 , 3 , 3 , 5 , 6 }, /* RXJ+ */
+ { 0 , 1 , 1 , 1 , 1 , 1 ,IRC|STR|2}, /* RXJ- */
+};
+
-static __be16 ppp_type_trans(struct sk_buff *skb, struct net_device *dev)
+/* SCA: RCR+ must supply id, len and data
+ SCN: RCR- must supply code, id, len and data
+ STA: RTR must supply id
+ SCJ: RUC must supply CP packet len and data */
+static void ppp_cp_event(struct net_device *dev, u16 pid, u16 event, u8 code,
+ u8 id, unsigned int len, const void *data)
{
- return __constant_htons(ETH_P_WAN_PPP);
+ int old_state, action;
+ struct ppp *ppp = get_ppp(dev);
+ struct proto *proto = get_proto(dev, pid);
+
+ old_state = proto->state;
+ BUG_ON(old_state >= STATES);
+ BUG_ON(event >= EVENTS);
+
+#if DEBUG_STATE
+ printk(KERN_DEBUG "%s: %s ppp_cp_event(%s) %s ...\n", dev->name,
+ proto_name(pid), event_names[event], state_names[proto->state]);
+#endif
+
+ action = cp_table[event][old_state];
+
+ proto->state = action & STATE_MASK;
+ if (action & (SCR | STR)) /* set Configure-Req/Terminate-Req timer */
+ mod_timer(&proto->timer, proto->timeout =
+ jiffies + ppp->req_timeout * HZ);
+ if (action & ZRC)
+ proto->restart_counter = 0;
+ if (action & IRC)
+ proto->restart_counter = (proto->state == STOPPING) ?
+ ppp->term_retries : ppp->cr_retries;
+
+ if (action & SCR) /* send Configure-Request */
+ ppp_tx_cp(dev, pid, CP_CONF_REQ, proto->cr_id = ++ppp->seq,
+ 0, NULL);
+ if (action & SCA) /* send Configure-Ack */
+ ppp_tx_cp(dev, pid, CP_CONF_ACK, id, len, data);
+ if (action & SCN) /* send Configure-Nak/Reject */
+ ppp_tx_cp(dev, pid, code, id, len, data);
+ if (action & STR) /* send Terminate-Request */
+ ppp_tx_cp(dev, pid, CP_TERM_REQ, ++ppp->seq, 0, NULL);
+ if (action & STA) /* send Terminate-Ack */
+ ppp_tx_cp(dev, pid, CP_TERM_ACK, id, 0, NULL);
+ if (action & SCJ) /* send Code-Reject */
+ ppp_tx_cp(dev, pid, CP_CODE_REJ, ++ppp->seq, len, data);
+
+ if (old_state != OPENED && proto->state == OPENED) {
+ printk(KERN_INFO "%s: %s up\n", dev->name, proto_name(pid));
+ if (pid == PID_LCP) {
+ netif_dormant_off(dev);
+ ppp_cp_event(dev, PID_IPCP, START, 0, 0, 0, NULL);
+ ppp_cp_event(dev, PID_IPV6CP, START, 0, 0, 0, NULL);
+ ppp->last_pong = jiffies;
+ mod_timer(&proto->timer, proto->timeout =
+ jiffies + ppp->keepalive_interval * HZ);
+ }
+ }
+ if (old_state == OPENED && proto->state != OPENED) {
+ printk(KERN_INFO "%s: %s down\n", dev->name, proto_name(pid));
+ if (pid == PID_LCP) {
+ netif_dormant_on(dev);
+ ppp_cp_event(dev, PID_IPCP, STOP, 0, 0, 0, NULL);
+ ppp_cp_event(dev, PID_IPV6CP, STOP, 0, 0, 0, NULL);
+ }
+ }
+ if (old_state != CLOSED && proto->state == CLOSED)
+ del_timer(&proto->timer);
+
+#if DEBUG_STATE
+ printk(KERN_DEBUG "%s: %s ppp_cp_event(%s) ... %s\n", dev->name,
+ proto_name(pid), event_names[event], state_names[proto->state]);
+#endif
+}
+
+
+static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id,
+ unsigned int req_len, const u8 *data)
+{
+ static u8 const valid_accm[6] = { LCP_OPTION_ACCM, 6, 0, 0, 0, 0 };
+ const u8 *opt;
+ u8 *out;
+ unsigned int len = req_len, nak_len = 0, rej_len = 0;
+
+ if (!(out = kmalloc(len, GFP_ATOMIC))) {
+ dev->stats.rx_dropped++;
+ return; /* out of memory, ignore CR packet */
+ }
+
+ for (opt = data; len; len -= opt[1], opt += opt[1]) {
+ if (len < 2 || len < opt[1]) {
+ dev->stats.rx_errors++;
+ return; /* bad packet, drop silently */
+ }
+
+ if (pid == PID_LCP)
+ switch (opt[0]) {
+ case LCP_OPTION_MRU:
+ continue; /* MRU always OK and > 1500 bytes? */
+
+ case LCP_OPTION_ACCM: /* async control character map */
+ if (!memcmp(opt, valid_accm,
+ sizeof(valid_accm)))
+ continue;
+ if (!rej_len) { /* NAK it */
+ memcpy(out + nak_len, valid_accm,
+ sizeof(valid_accm));
+ nak_len += sizeof(valid_accm);
+ continue;
+ }
+ break;
+ case LCP_OPTION_MAGIC:
+ if (opt[1] != 6 || (!opt[2] && !opt[3] &&
+ !opt[4] && !opt[5]))
+ break; /* reject invalid magic number */
+ continue;
+ }
+ /* reject this option */
+ memcpy(out + rej_len, opt, opt[1]);
+ rej_len += opt[1];
+ }
+
+ if (rej_len)
+ ppp_cp_event(dev, pid, RCR_BAD, CP_CONF_REJ, id, rej_len, out);
+ else if (nak_len)
+ ppp_cp_event(dev, pid, RCR_BAD, CP_CONF_NAK, id, nak_len, out);
+ else
+ ppp_cp_event(dev, pid, RCR_GOOD, CP_CONF_ACK, id, req_len, data);
+
+ kfree(out);
+}
+
+static int ppp_rx(struct sk_buff *skb)
+{
+ struct hdlc_header *hdr = (struct hdlc_header*)skb->data;
+ struct net_device *dev = skb->dev;
+ struct ppp *ppp = get_ppp(dev);
+ struct proto *proto;
+ struct cp_header *cp;
+ unsigned long flags;
+ unsigned int len;
+ u16 pid;
+#if DEBUG_CP
+ int i;
+ char *ptr;
+#endif
+
+ spin_lock_irqsave(&ppp->lock, flags);
+ /* Check HDLC header */
+ if (skb->len < sizeof(struct hdlc_header))
+ goto rx_error;
+ cp = (struct cp_header*)skb_pull(skb, sizeof(struct hdlc_header));
+ if (hdr->address != HDLC_ADDR_ALLSTATIONS ||
+ hdr->control != HDLC_CTRL_UI)
+ goto rx_error;
+
+ pid = ntohs(hdr->protocol);
+ proto = get_proto(dev, pid);
+ if (!proto) {
+ if (ppp->protos[IDX_LCP].state == OPENED)
+ ppp_tx_cp(dev, PID_LCP, LCP_PROTO_REJ,
+ ++ppp->seq, skb->len + 2, &hdr->protocol);
+ goto rx_error;
+ }
+
+ len = ntohs(cp->len);
+ if (len < sizeof(struct cp_header) /* no complete CP header? */ ||
+ skb->len < len /* truncated packet? */)
+ goto rx_error;
+ skb_pull(skb, sizeof(struct cp_header));
+ len -= sizeof(struct cp_header);
+
+ /* HDLC and CP headers stripped from skb */
+#if DEBUG_CP
+ if (cp->code < CP_CODES)
+ sprintf(debug_buffer, "[%s id 0x%X]", code_names[cp->code],
+ cp->id);
+ else
+ sprintf(debug_buffer, "[code %u id 0x%X]", cp->code, cp->id);
+ ptr = debug_buffer + strlen(debug_buffer);
+ for (i = 0; i < min_t(unsigned int, len, DEBUG_CP); i++) {
+ sprintf(ptr, " %02X", skb->data[i]);
+ ptr += strlen(ptr);
+ }
+ printk(KERN_DEBUG "%s: RX %s %s\n", dev->name, proto_name(pid),
+ debug_buffer);
+#endif
+
+ /* LCP only */
+ if (pid == PID_LCP)
+ switch (cp->code) {
+ case LCP_PROTO_REJ:
+ pid = ntohs(*(__be16*)skb->data);
+ if (pid == PID_LCP || pid == PID_IPCP ||
+ pid == PID_IPV6CP)
+ ppp_cp_event(dev, pid, RXJ_BAD, 0, 0,
+ 0, NULL);
+ goto out;
+
+ case LCP_ECHO_REQ: /* send Echo-Reply */
+ if (len >= 4 && proto->state == OPENED)
+ ppp_tx_cp(dev, PID_LCP, LCP_ECHO_REPLY,
+ cp->id, len - 4, skb->data + 4);
+ goto out;
+
+ case LCP_ECHO_REPLY:
+ if (cp->id == ppp->echo_id)
+ ppp->last_pong = jiffies;
+ goto out;
+
+ case LCP_DISC_REQ: /* discard */
+ goto out;
+ }
+
+ /* LCP, IPCP and IPV6CP */
+ switch (cp->code) {
+ case CP_CONF_REQ:
+ ppp_cp_parse_cr(dev, pid, cp->id, len, skb->data);
+ goto out;
+
+ case CP_CONF_ACK:
+ if (cp->id == proto->cr_id)
+ ppp_cp_event(dev, pid, RCA, 0, 0, 0, NULL);
+ goto out;
+
+ case CP_CONF_REJ:
+ case CP_CONF_NAK:
+ if (cp->id == proto->cr_id)
+ ppp_cp_event(dev, pid, RCN, 0, 0, 0, NULL);
+ goto out;
+
+ case CP_TERM_REQ:
+ ppp_cp_event(dev, pid, RTR, 0, cp->id, 0, NULL);
+ goto out;
+
+ case CP_TERM_ACK:
+ ppp_cp_event(dev, pid, RTA, 0, 0, 0, NULL);
+ goto out;
+
+ case CP_CODE_REJ:
+ ppp_cp_event(dev, pid, RXJ_BAD, 0, 0, 0, NULL);
+ goto out;
+
+ default:
+ len += sizeof(struct cp_header);
+ if (len > dev->mtu)
+ len = dev->mtu;
+ ppp_cp_event(dev, pid, RUC, 0, 0, len, cp);
+ goto out;
+ }
+ goto out;
+
+rx_error:
+ dev->stats.rx_errors++;
+out:
+ spin_unlock_irqrestore(&ppp->lock, flags);
+ dev_kfree_skb_any(skb);
+ ppp_tx_flush();
+ return NET_RX_DROP;
}
+static void ppp_timer(unsigned long arg)
+{
+ struct proto *proto = (struct proto *)arg;
+ struct ppp *ppp = get_ppp(proto->dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ppp->lock, flags);
+ switch (proto->state) {
+ case STOPPING:
+ case REQ_SENT:
+ case ACK_RECV:
+ case ACK_SENT:
+ if (proto->restart_counter) {
+ ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
+ 0, NULL);
+ proto->restart_counter--;
+ } else
+ ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0,
+ 0, NULL);
+ break;
+
+ case OPENED:
+ if (proto->pid != PID_LCP)
+ break;
+ if (time_after(jiffies, ppp->last_pong +
+ ppp->keepalive_timeout * HZ)) {
+ printk(KERN_INFO "%s: Link down\n", proto->dev->name);
+ ppp_cp_event(proto->dev, PID_LCP, STOP, 0, 0, 0, NULL);
+ ppp_cp_event(proto->dev, PID_LCP, START, 0, 0, 0, NULL);
+ } else { /* send keep-alive packet */
+ ppp->echo_id = ++ppp->seq;
+ ppp_tx_cp(proto->dev, PID_LCP, LCP_ECHO_REQ,
+ ppp->echo_id, 0, NULL);
+ proto->timer.expires = jiffies +
+ ppp->keepalive_interval * HZ;
+ add_timer(&proto->timer);
+ }
+ break;
+ }
+ spin_unlock_irqrestore(&ppp->lock, flags);
+ ppp_tx_flush();
+}
+
+
+static void ppp_start(struct net_device *dev)
+{
+ struct ppp *ppp = get_ppp(dev);
+ int i;
+
+ for (i = 0; i < IDX_COUNT; i++) {
+ struct proto *proto = &ppp->protos[i];
+ proto->dev = dev;
+ init_timer(&proto->timer);
+ proto->timer.function = ppp_timer;
+ proto->timer.data = (unsigned long)proto;
+ proto->state = CLOSED;
+ }
+ ppp->protos[IDX_LCP].pid = PID_LCP;
+ ppp->protos[IDX_IPCP].pid = PID_IPCP;
+ ppp->protos[IDX_IPV6CP].pid = PID_IPV6CP;
+
+ ppp_cp_event(dev, PID_LCP, START, 0, 0, 0, NULL);
+}
+
+static void ppp_stop(struct net_device *dev)
+{
+ ppp_cp_event(dev, PID_LCP, STOP, 0, 0, 0, NULL);
+}
static struct hdlc_proto proto = {
- .open = ppp_open,
- .close = ppp_close,
+ .start = ppp_start,
+ .stop = ppp_stop,
.type_trans = ppp_type_trans,
.ioctl = ppp_ioctl,
+ .netif_rx = ppp_rx,
.module = THIS_MODULE,
};
+static const struct header_ops ppp_header_ops = {
+ .create = ppp_hard_header,
+};
static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
+ struct ppp *ppp;
int result;
switch (ifr->ifr_settings.type) {
@@ -109,25 +655,35 @@ static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
return 0; /* return protocol only, no settable parameters */
case IF_PROTO_PPP:
- if(!capable(CAP_NET_ADMIN))
+ if (!capable(CAP_NET_ADMIN))
return -EPERM;
- if(dev->flags & IFF_UP)
+ if (dev->flags & IFF_UP)
return -EBUSY;
/* no settable parameters */
- result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
+ result = hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
if (result)
return result;
- result = attach_hdlc_protocol(dev, &proto,
- sizeof(struct ppp_state));
+ result = attach_hdlc_protocol(dev, &proto, sizeof(struct ppp));
if (result)
return result;
+
+ ppp = get_ppp(dev);
+ spin_lock_init(&ppp->lock);
+ ppp->req_timeout = 2;
+ ppp->cr_retries = 10;
+ ppp->term_retries = 2;
+ ppp->keepalive_interval = 10;
+ ppp->keepalive_timeout = 60;
+
dev->hard_start_xmit = hdlc->xmit;
+ dev->hard_header_len = sizeof(struct hdlc_header);
+ dev->header_ops = &ppp_header_ops;
dev->type = ARPHRD_PPP;
- netif_dormant_off(dev);
+ netif_dormant_on(dev);
return 0;
}
@@ -137,12 +693,11 @@ static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
static int __init mod_init(void)
{
+ skb_queue_head_init(&tx_queue);
register_hdlc_protocol(&proto);
return 0;
}
-
-
static void __exit mod_exit(void)
{
unregister_hdlc_protocol(&proto);
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
index e299313f828..af54f0cf1b3 100644
--- a/drivers/net/wan/hostess_sv11.c
+++ b/drivers/net/wan/hostess_sv11.c
@@ -66,7 +66,6 @@ static void hostess_input(struct z8530_channel *c, struct sk_buff *skb)
* it right now.
*/
netif_rx(skb);
- c->netdevice->last_rx = jiffies;
}
/*
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
new file mode 100644
index 00000000000..0c6802507a7
--- /dev/null
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -0,0 +1,1325 @@
+/*
+ * Intel IXP4xx HSS (synchronous serial port) driver for Linux
+ *
+ * Copyright (C) 2007-2008 Krzysztof Hałasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/cdev.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/fs.h>
+#include <linux/hdlc.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <mach/npe.h>
+#include <mach/qmgr.h>
+
+#define DEBUG_DESC 0
+#define DEBUG_RX 0
+#define DEBUG_TX 0
+#define DEBUG_PKT_BYTES 0
+#define DEBUG_CLOSE 0
+
+#define DRV_NAME "ixp4xx_hss"
+
+#define PKT_EXTRA_FLAGS 0 /* orig 1 */
+#define PKT_NUM_PIPES 1 /* 1, 2 or 4 */
+#define PKT_PIPE_FIFO_SIZEW 4 /* total 4 dwords per HSS */
+
+#define RX_DESCS 16 /* also length of all RX queues */
+#define TX_DESCS 16 /* also length of all TX queues */
+
+#define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS))
+#define RX_SIZE (HDLC_MAX_MRU + 4) /* NPE needs more space */
+#define MAX_CLOSE_WAIT 1000 /* microseconds */
+#define HSS_COUNT 2
+#define FRAME_SIZE 256 /* doesn't matter at this point */
+#define FRAME_OFFSET 0
+#define MAX_CHANNELS (FRAME_SIZE / 8)
+
+#define NAPI_WEIGHT 16
+
+/* Queue IDs */
+#define HSS0_CHL_RXTRIG_QUEUE 12 /* orig size = 32 dwords */
+#define HSS0_PKT_RX_QUEUE 13 /* orig size = 32 dwords */
+#define HSS0_PKT_TX0_QUEUE 14 /* orig size = 16 dwords */
+#define HSS0_PKT_TX1_QUEUE 15
+#define HSS0_PKT_TX2_QUEUE 16
+#define HSS0_PKT_TX3_QUEUE 17
+#define HSS0_PKT_RXFREE0_QUEUE 18 /* orig size = 16 dwords */
+#define HSS0_PKT_RXFREE1_QUEUE 19
+#define HSS0_PKT_RXFREE2_QUEUE 20
+#define HSS0_PKT_RXFREE3_QUEUE 21
+#define HSS0_PKT_TXDONE_QUEUE 22 /* orig size = 64 dwords */
+
+#define HSS1_CHL_RXTRIG_QUEUE 10
+#define HSS1_PKT_RX_QUEUE 0
+#define HSS1_PKT_TX0_QUEUE 5
+#define HSS1_PKT_TX1_QUEUE 6
+#define HSS1_PKT_TX2_QUEUE 7
+#define HSS1_PKT_TX3_QUEUE 8
+#define HSS1_PKT_RXFREE0_QUEUE 1
+#define HSS1_PKT_RXFREE1_QUEUE 2
+#define HSS1_PKT_RXFREE2_QUEUE 3
+#define HSS1_PKT_RXFREE3_QUEUE 4
+#define HSS1_PKT_TXDONE_QUEUE 9
+
+#define NPE_PKT_MODE_HDLC 0
+#define NPE_PKT_MODE_RAW 1
+#define NPE_PKT_MODE_56KMODE 2
+#define NPE_PKT_MODE_56KENDIAN_MSB 4
+
+/* PKT_PIPE_HDLC_CFG_WRITE flags */
+#define PKT_HDLC_IDLE_ONES 0x1 /* default = flags */
+#define PKT_HDLC_CRC_32 0x2 /* default = CRC-16 */
+#define PKT_HDLC_MSB_ENDIAN 0x4 /* default = LE */
+
+
+/* hss_config, PCRs */
+/* Frame sync sampling, default = active low */
+#define PCR_FRM_SYNC_ACTIVE_HIGH 0x40000000
+#define PCR_FRM_SYNC_FALLINGEDGE 0x80000000
+#define PCR_FRM_SYNC_RISINGEDGE 0xC0000000
+
+/* Frame sync pin: input (default) or output generated off a given clk edge */
+#define PCR_FRM_SYNC_OUTPUT_FALLING 0x20000000
+#define PCR_FRM_SYNC_OUTPUT_RISING 0x30000000
+
+/* Frame and data clock sampling on edge, default = falling */
+#define PCR_FCLK_EDGE_RISING 0x08000000
+#define PCR_DCLK_EDGE_RISING 0x04000000
+
+/* Clock direction, default = input */
+#define PCR_SYNC_CLK_DIR_OUTPUT 0x02000000
+
+/* Generate/Receive frame pulses, default = enabled */
+#define PCR_FRM_PULSE_DISABLED 0x01000000
+
+ /* Data rate is full (default) or half the configured clk speed */
+#define PCR_HALF_CLK_RATE 0x00200000
+
+/* Invert data between NPE and HSS FIFOs? (default = no) */
+#define PCR_DATA_POLARITY_INVERT 0x00100000
+
+/* TX/RX endianness, default = LSB */
+#define PCR_MSB_ENDIAN 0x00080000
+
+/* Normal (default) / open drain mode (TX only) */
+#define PCR_TX_PINS_OPEN_DRAIN 0x00040000
+
+/* No framing bit transmitted and expected on RX? (default = framing bit) */
+#define PCR_SOF_NO_FBIT 0x00020000
+
+/* Drive data pins? */
+#define PCR_TX_DATA_ENABLE 0x00010000
+
+/* Voice 56k type: drive the data pins low (default), high, high Z */
+#define PCR_TX_V56K_HIGH 0x00002000
+#define PCR_TX_V56K_HIGH_IMP 0x00004000
+
+/* Unassigned type: drive the data pins low (default), high, high Z */
+#define PCR_TX_UNASS_HIGH 0x00000800
+#define PCR_TX_UNASS_HIGH_IMP 0x00001000
+
+/* T1 @ 1.544MHz only: Fbit dictated in FIFO (default) or high Z */
+#define PCR_TX_FB_HIGH_IMP 0x00000400
+
+/* 56k data endiannes - which bit unused: high (default) or low */
+#define PCR_TX_56KE_BIT_0_UNUSED 0x00000200
+
+/* 56k data transmission type: 32/8 bit data (default) or 56K data */
+#define PCR_TX_56KS_56K_DATA 0x00000100
+
+/* hss_config, cCR */
+/* Number of packetized clients, default = 1 */
+#define CCR_NPE_HFIFO_2_HDLC 0x04000000
+#define CCR_NPE_HFIFO_3_OR_4HDLC 0x08000000
+
+/* default = no loopback */
+#define CCR_LOOPBACK 0x02000000
+
+/* HSS number, default = 0 (first) */
+#define CCR_SECOND_HSS 0x01000000
+
+
+/* hss_config, clkCR: main:10, num:10, denom:12 */
+#define CLK42X_SPEED_EXP ((0x3FF << 22) | ( 2 << 12) | 15) /*65 KHz*/
+
+#define CLK42X_SPEED_512KHZ (( 130 << 22) | ( 2 << 12) | 15)
+#define CLK42X_SPEED_1536KHZ (( 43 << 22) | ( 18 << 12) | 47)
+#define CLK42X_SPEED_1544KHZ (( 43 << 22) | ( 33 << 12) | 192)
+#define CLK42X_SPEED_2048KHZ (( 32 << 22) | ( 34 << 12) | 63)
+#define CLK42X_SPEED_4096KHZ (( 16 << 22) | ( 34 << 12) | 127)
+#define CLK42X_SPEED_8192KHZ (( 8 << 22) | ( 34 << 12) | 255)
+
+#define CLK46X_SPEED_512KHZ (( 130 << 22) | ( 24 << 12) | 127)
+#define CLK46X_SPEED_1536KHZ (( 43 << 22) | (152 << 12) | 383)
+#define CLK46X_SPEED_1544KHZ (( 43 << 22) | ( 66 << 12) | 385)
+#define CLK46X_SPEED_2048KHZ (( 32 << 22) | (280 << 12) | 511)
+#define CLK46X_SPEED_4096KHZ (( 16 << 22) | (280 << 12) | 1023)
+#define CLK46X_SPEED_8192KHZ (( 8 << 22) | (280 << 12) | 2047)
+
+
+/* hss_config, LUT entries */
+#define TDMMAP_UNASSIGNED 0
+#define TDMMAP_HDLC 1 /* HDLC - packetized */
+#define TDMMAP_VOICE56K 2 /* Voice56K - 7-bit channelized */
+#define TDMMAP_VOICE64K 3 /* Voice64K - 8-bit channelized */
+
+/* offsets into HSS config */
+#define HSS_CONFIG_TX_PCR 0x00 /* port configuration registers */
+#define HSS_CONFIG_RX_PCR 0x04
+#define HSS_CONFIG_CORE_CR 0x08 /* loopback control, HSS# */
+#define HSS_CONFIG_CLOCK_CR 0x0C /* clock generator control */
+#define HSS_CONFIG_TX_FCR 0x10 /* frame configuration registers */
+#define HSS_CONFIG_RX_FCR 0x14
+#define HSS_CONFIG_TX_LUT 0x18 /* channel look-up tables */
+#define HSS_CONFIG_RX_LUT 0x38
+
+
+/* NPE command codes */
+/* writes the ConfigWord value to the location specified by offset */
+#define PORT_CONFIG_WRITE 0x40
+
+/* triggers the NPE to load the contents of the configuration table */
+#define PORT_CONFIG_LOAD 0x41
+
+/* triggers the NPE to return an HssErrorReadResponse message */
+#define PORT_ERROR_READ 0x42
+
+/* triggers the NPE to reset internal status and enable the HssPacketized
+ operation for the flow specified by pPipe */
+#define PKT_PIPE_FLOW_ENABLE 0x50
+#define PKT_PIPE_FLOW_DISABLE 0x51
+#define PKT_NUM_PIPES_WRITE 0x52
+#define PKT_PIPE_FIFO_SIZEW_WRITE 0x53
+#define PKT_PIPE_HDLC_CFG_WRITE 0x54
+#define PKT_PIPE_IDLE_PATTERN_WRITE 0x55
+#define PKT_PIPE_RX_SIZE_WRITE 0x56
+#define PKT_PIPE_MODE_WRITE 0x57
+
+/* HDLC packet status values - desc->status */
+#define ERR_SHUTDOWN 1 /* stop or shutdown occurrance */
+#define ERR_HDLC_ALIGN 2 /* HDLC alignment error */
+#define ERR_HDLC_FCS 3 /* HDLC Frame Check Sum error */
+#define ERR_RXFREE_Q_EMPTY 4 /* RX-free queue became empty while receiving
+ this packet (if buf_len < pkt_len) */
+#define ERR_HDLC_TOO_LONG 5 /* HDLC frame size too long */
+#define ERR_HDLC_ABORT 6 /* abort sequence received */
+#define ERR_DISCONNECTING 7 /* disconnect is in progress */
+
+
+#ifdef __ARMEB__
+typedef struct sk_buff buffer_t;
+#define free_buffer dev_kfree_skb
+#define free_buffer_irq dev_kfree_skb_irq
+#else
+typedef void buffer_t;
+#define free_buffer kfree
+#define free_buffer_irq kfree
+#endif
+
+struct port {
+ struct device *dev;
+ struct npe *npe;
+ struct net_device *netdev;
+ struct napi_struct napi;
+ struct hss_plat_info *plat;
+ buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
+ struct desc *desc_tab; /* coherent */
+ u32 desc_tab_phys;
+ unsigned int id;
+ unsigned int clock_type, clock_rate, loopback;
+ unsigned int initialized, carrier;
+ u8 hdlc_cfg;
+};
+
+/* NPE message structure */
+struct msg {
+#ifdef __ARMEB__
+ u8 cmd, unused, hss_port, index;
+ union {
+ struct { u8 data8a, data8b, data8c, data8d; };
+ struct { u16 data16a, data16b; };
+ struct { u32 data32; };
+ };
+#else
+ u8 index, hss_port, unused, cmd;
+ union {
+ struct { u8 data8d, data8c, data8b, data8a; };
+ struct { u16 data16b, data16a; };
+ struct { u32 data32; };
+ };
+#endif
+};
+
+/* HDLC packet descriptor */
+struct desc {
+ u32 next; /* pointer to next buffer, unused */
+
+#ifdef __ARMEB__
+ u16 buf_len; /* buffer length */
+ u16 pkt_len; /* packet length */
+ u32 data; /* pointer to data buffer in RAM */
+ u8 status;
+ u8 error_count;
+ u16 __reserved;
+#else
+ u16 pkt_len; /* packet length */
+ u16 buf_len; /* buffer length */
+ u32 data; /* pointer to data buffer in RAM */
+ u16 __reserved;
+ u8 error_count;
+ u8 status;
+#endif
+ u32 __reserved1[4];
+};
+
+
+#define rx_desc_phys(port, n) ((port)->desc_tab_phys + \
+ (n) * sizeof(struct desc))
+#define rx_desc_ptr(port, n) (&(port)->desc_tab[n])
+
+#define tx_desc_phys(port, n) ((port)->desc_tab_phys + \
+ ((n) + RX_DESCS) * sizeof(struct desc))
+#define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS])
+
+/*****************************************************************************
+ * global variables
+ ****************************************************************************/
+
+static int ports_open;
+static struct dma_pool *dma_pool;
+static spinlock_t npe_lock;
+
+static const struct {
+ int tx, txdone, rx, rxfree;
+}queue_ids[2] = {{HSS0_PKT_TX0_QUEUE, HSS0_PKT_TXDONE_QUEUE, HSS0_PKT_RX_QUEUE,
+ HSS0_PKT_RXFREE0_QUEUE},
+ {HSS1_PKT_TX0_QUEUE, HSS1_PKT_TXDONE_QUEUE, HSS1_PKT_RX_QUEUE,
+ HSS1_PKT_RXFREE0_QUEUE},
+};
+
+/*****************************************************************************
+ * utility functions
+ ****************************************************************************/
+
+static inline struct port* dev_to_port(struct net_device *dev)
+{
+ return dev_to_hdlc(dev)->priv;
+}
+
+#ifndef __ARMEB__
+static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
+{
+ int i;
+ for (i = 0; i < cnt; i++)
+ dest[i] = swab32(src[i]);
+}
+#endif
+
+/*****************************************************************************
+ * HSS access
+ ****************************************************************************/
+
+static void hss_npe_send(struct port *port, struct msg *msg, const char* what)
+{
+ u32 *val = (u32*)msg;
+ if (npe_send_message(port->npe, msg, what)) {
+ printk(KERN_CRIT "HSS-%i: unable to send command [%08X:%08X]"
+ " to %s\n", port->id, val[0], val[1],
+ npe_name(port->npe));
+ BUG();
+ }
+}
+
+static void hss_config_set_lut(struct port *port)
+{
+ struct msg msg;
+ int ch;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.cmd = PORT_CONFIG_WRITE;
+ msg.hss_port = port->id;
+
+ for (ch = 0; ch < MAX_CHANNELS; ch++) {
+ msg.data32 >>= 2;
+ msg.data32 |= TDMMAP_HDLC << 30;
+
+ if (ch % 16 == 15) {
+ msg.index = HSS_CONFIG_TX_LUT + ((ch / 4) & ~3);
+ hss_npe_send(port, &msg, "HSS_SET_TX_LUT");
+
+ msg.index += HSS_CONFIG_RX_LUT - HSS_CONFIG_TX_LUT;
+ hss_npe_send(port, &msg, "HSS_SET_RX_LUT");
+ }
+ }
+}
+
+static void hss_config(struct port *port)
+{
+ struct msg msg;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.cmd = PORT_CONFIG_WRITE;
+ msg.hss_port = port->id;
+ msg.index = HSS_CONFIG_TX_PCR;
+ msg.data32 = PCR_FRM_SYNC_OUTPUT_RISING | PCR_MSB_ENDIAN |
+ PCR_TX_DATA_ENABLE | PCR_SOF_NO_FBIT;
+ if (port->clock_type == CLOCK_INT)
+ msg.data32 |= PCR_SYNC_CLK_DIR_OUTPUT;
+ hss_npe_send(port, &msg, "HSS_SET_TX_PCR");
+
+ msg.index = HSS_CONFIG_RX_PCR;
+ msg.data32 ^= PCR_TX_DATA_ENABLE | PCR_DCLK_EDGE_RISING;
+ hss_npe_send(port, &msg, "HSS_SET_RX_PCR");
+
+ memset(&msg, 0, sizeof(msg));
+ msg.cmd = PORT_CONFIG_WRITE;
+ msg.hss_port = port->id;
+ msg.index = HSS_CONFIG_CORE_CR;
+ msg.data32 = (port->loopback ? CCR_LOOPBACK : 0) |
+ (port->id ? CCR_SECOND_HSS : 0);
+ hss_npe_send(port, &msg, "HSS_SET_CORE_CR");
+
+ memset(&msg, 0, sizeof(msg));
+ msg.cmd = PORT_CONFIG_WRITE;
+ msg.hss_port = port->id;
+ msg.index = HSS_CONFIG_CLOCK_CR;
+ msg.data32 = CLK42X_SPEED_2048KHZ /* FIXME */;
+ hss_npe_send(port, &msg, "HSS_SET_CLOCK_CR");
+
+ memset(&msg, 0, sizeof(msg));
+ msg.cmd = PORT_CONFIG_WRITE;
+ msg.hss_port = port->id;
+ msg.index = HSS_CONFIG_TX_FCR;
+ msg.data16a = FRAME_OFFSET;
+ msg.data16b = FRAME_SIZE - 1;
+ hss_npe_send(port, &msg, "HSS_SET_TX_FCR");
+
+ memset(&msg, 0, sizeof(msg));
+ msg.cmd = PORT_CONFIG_WRITE;
+ msg.hss_port = port->id;
+ msg.index = HSS_CONFIG_RX_FCR;
+ msg.data16a = FRAME_OFFSET;
+ msg.data16b = FRAME_SIZE - 1;
+ hss_npe_send(port, &msg, "HSS_SET_RX_FCR");
+
+ hss_config_set_lut(port);
+
+ memset(&msg, 0, sizeof(msg));
+ msg.cmd = PORT_CONFIG_LOAD;
+ msg.hss_port = port->id;
+ hss_npe_send(port, &msg, "HSS_LOAD_CONFIG");
+
+ if (npe_recv_message(port->npe, &msg, "HSS_LOAD_CONFIG") ||
+ /* HSS_LOAD_CONFIG for port #1 returns port_id = #4 */
+ msg.cmd != PORT_CONFIG_LOAD || msg.data32) {
+ printk(KERN_CRIT "HSS-%i: HSS_LOAD_CONFIG failed\n",
+ port->id);
+ BUG();
+ }
+
+ /* HDLC may stop working without this - check FIXME */
+ npe_recv_message(port->npe, &msg, "FLUSH_IT");
+}
+
+static void hss_set_hdlc_cfg(struct port *port)
+{
+ struct msg msg;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.cmd = PKT_PIPE_HDLC_CFG_WRITE;
+ msg.hss_port = port->id;
+ msg.data8a = port->hdlc_cfg; /* rx_cfg */
+ msg.data8b = port->hdlc_cfg | (PKT_EXTRA_FLAGS << 3); /* tx_cfg */
+ hss_npe_send(port, &msg, "HSS_SET_HDLC_CFG");
+}
+
+static u32 hss_get_status(struct port *port)
+{
+ struct msg msg;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.cmd = PORT_ERROR_READ;
+ msg.hss_port = port->id;
+ hss_npe_send(port, &msg, "PORT_ERROR_READ");
+ if (npe_recv_message(port->npe, &msg, "PORT_ERROR_READ")) {
+ printk(KERN_CRIT "HSS-%i: unable to read HSS status\n",
+ port->id);
+ BUG();
+ }
+
+ return msg.data32;
+}
+
+static void hss_start_hdlc(struct port *port)
+{
+ struct msg msg;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.cmd = PKT_PIPE_FLOW_ENABLE;
+ msg.hss_port = port->id;
+ msg.data32 = 0;
+ hss_npe_send(port, &msg, "HSS_ENABLE_PKT_PIPE");
+}
+
+static void hss_stop_hdlc(struct port *port)
+{
+ struct msg msg;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.cmd = PKT_PIPE_FLOW_DISABLE;
+ msg.hss_port = port->id;
+ hss_npe_send(port, &msg, "HSS_DISABLE_PKT_PIPE");
+ hss_get_status(port); /* make sure it's halted */
+}
+
+static int hss_load_firmware(struct port *port)
+{
+ struct msg msg;
+ int err;
+
+ if (port->initialized)
+ return 0;
+
+ if (!npe_running(port->npe) &&
+ (err = npe_load_firmware(port->npe, npe_name(port->npe),
+ port->dev)))
+ return err;
+
+ /* HDLC mode configuration */
+ memset(&msg, 0, sizeof(msg));
+ msg.cmd = PKT_NUM_PIPES_WRITE;
+ msg.hss_port = port->id;
+ msg.data8a = PKT_NUM_PIPES;
+ hss_npe_send(port, &msg, "HSS_SET_PKT_PIPES");
+
+ msg.cmd = PKT_PIPE_FIFO_SIZEW_WRITE;
+ msg.data8a = PKT_PIPE_FIFO_SIZEW;
+ hss_npe_send(port, &msg, "HSS_SET_PKT_FIFO");
+
+ msg.cmd = PKT_PIPE_MODE_WRITE;
+ msg.data8a = NPE_PKT_MODE_HDLC;
+ /* msg.data8b = inv_mask */
+ /* msg.data8c = or_mask */
+ hss_npe_send(port, &msg, "HSS_SET_PKT_MODE");
+
+ msg.cmd = PKT_PIPE_RX_SIZE_WRITE;
+ msg.data16a = HDLC_MAX_MRU; /* including CRC */
+ hss_npe_send(port, &msg, "HSS_SET_PKT_RX_SIZE");
+
+ msg.cmd = PKT_PIPE_IDLE_PATTERN_WRITE;
+ msg.data32 = 0x7F7F7F7F; /* ??? FIXME */
+ hss_npe_send(port, &msg, "HSS_SET_PKT_IDLE");
+
+ port->initialized = 1;
+ return 0;
+}
+
+/*****************************************************************************
+ * packetized (HDLC) operation
+ ****************************************************************************/
+
+static inline void debug_pkt(struct net_device *dev, const char *func,
+ u8 *data, int len)
+{
+#if DEBUG_PKT_BYTES
+ int i;
+
+ printk(KERN_DEBUG "%s: %s(%i)", dev->name, func, len);
+ for (i = 0; i < len; i++) {
+ if (i >= DEBUG_PKT_BYTES)
+ break;
+ printk("%s%02X", !(i % 4) ? " " : "", data[i]);
+ }
+ printk("\n");
+#endif
+}
+
+
+static inline void debug_desc(u32 phys, struct desc *desc)
+{
+#if DEBUG_DESC
+ printk(KERN_DEBUG "%X: %X %3X %3X %08X %X %X\n",
+ phys, desc->next, desc->buf_len, desc->pkt_len,
+ desc->data, desc->status, desc->error_count);
+#endif
+}
+
+static inline int queue_get_desc(unsigned int queue, struct port *port,
+ int is_tx)
+{
+ u32 phys, tab_phys, n_desc;
+ struct desc *tab;
+
+ if (!(phys = qmgr_get_entry(queue)))
+ return -1;
+
+ BUG_ON(phys & 0x1F);
+ tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0);
+ tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0);
+ n_desc = (phys - tab_phys) / sizeof(struct desc);
+ BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS));
+ debug_desc(phys, &tab[n_desc]);
+ BUG_ON(tab[n_desc].next);
+ return n_desc;
+}
+
+static inline void queue_put_desc(unsigned int queue, u32 phys,
+ struct desc *desc)
+{
+ debug_desc(phys, desc);
+ BUG_ON(phys & 0x1F);
+ qmgr_put_entry(queue, phys);
+ BUG_ON(qmgr_stat_overflow(queue));
+}
+
+
+static inline void dma_unmap_tx(struct port *port, struct desc *desc)
+{
+#ifdef __ARMEB__
+ dma_unmap_single(&port->netdev->dev, desc->data,
+ desc->buf_len, DMA_TO_DEVICE);
+#else
+ dma_unmap_single(&port->netdev->dev, desc->data & ~3,
+ ALIGN((desc->data & 3) + desc->buf_len, 4),
+ DMA_TO_DEVICE);
+#endif
+}
+
+
+static void hss_hdlc_set_carrier(void *pdev, int carrier)
+{
+ struct net_device *netdev = pdev;
+ struct port *port = dev_to_port(netdev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&npe_lock, flags);
+ port->carrier = carrier;
+ if (!port->loopback) {
+ if (carrier)
+ netif_carrier_on(netdev);
+ else
+ netif_carrier_off(netdev);
+ }
+ spin_unlock_irqrestore(&npe_lock, flags);
+}
+
+static void hss_hdlc_rx_irq(void *pdev)
+{
+ struct net_device *dev = pdev;
+ struct port *port = dev_to_port(dev);
+
+#if DEBUG_RX
+ printk(KERN_DEBUG "%s: hss_hdlc_rx_irq\n", dev->name);
+#endif
+ qmgr_disable_irq(queue_ids[port->id].rx);
+ netif_rx_schedule(dev, &port->napi);
+}
+
+static int hss_hdlc_poll(struct napi_struct *napi, int budget)
+{
+ struct port *port = container_of(napi, struct port, napi);
+ struct net_device *dev = port->netdev;
+ unsigned int rxq = queue_ids[port->id].rx;
+ unsigned int rxfreeq = queue_ids[port->id].rxfree;
+ int received = 0;
+
+#if DEBUG_RX
+ printk(KERN_DEBUG "%s: hss_hdlc_poll\n", dev->name);
+#endif
+
+ while (received < budget) {
+ struct sk_buff *skb;
+ struct desc *desc;
+ int n;
+#ifdef __ARMEB__
+ struct sk_buff *temp;
+ u32 phys;
+#endif
+
+ if ((n = queue_get_desc(rxq, port, 0)) < 0) {
+#if DEBUG_RX
+ printk(KERN_DEBUG "%s: hss_hdlc_poll"
+ " netif_rx_complete\n", dev->name);
+#endif
+ netif_rx_complete(dev, napi);
+ qmgr_enable_irq(rxq);
+ if (!qmgr_stat_empty(rxq) &&
+ netif_rx_reschedule(dev, napi)) {
+#if DEBUG_RX
+ printk(KERN_DEBUG "%s: hss_hdlc_poll"
+ " netif_rx_reschedule succeeded\n",
+ dev->name);
+#endif
+ qmgr_disable_irq(rxq);
+ continue;
+ }
+#if DEBUG_RX
+ printk(KERN_DEBUG "%s: hss_hdlc_poll all done\n",
+ dev->name);
+#endif
+ return received; /* all work done */
+ }
+
+ desc = rx_desc_ptr(port, n);
+#if 0 /* FIXME - error_count counts modulo 256, perhaps we should use it */
+ if (desc->error_count)
+ printk(KERN_DEBUG "%s: hss_hdlc_poll status 0x%02X"
+ " errors %u\n", dev->name, desc->status,
+ desc->error_count);
+#endif
+ skb = NULL;
+ switch (desc->status) {
+ case 0:
+#ifdef __ARMEB__
+ if ((skb = netdev_alloc_skb(dev, RX_SIZE)) != NULL) {
+ phys = dma_map_single(&dev->dev, skb->data,
+ RX_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&dev->dev, phys)) {
+ dev_kfree_skb(skb);
+ skb = NULL;
+ }
+ }
+#else
+ skb = netdev_alloc_skb(dev, desc->pkt_len);
+#endif
+ if (!skb)
+ dev->stats.rx_dropped++;
+ break;
+ case ERR_HDLC_ALIGN:
+ case ERR_HDLC_ABORT:
+ dev->stats.rx_frame_errors++;
+ dev->stats.rx_errors++;
+ break;
+ case ERR_HDLC_FCS:
+ dev->stats.rx_crc_errors++;
+ dev->stats.rx_errors++;
+ break;
+ case ERR_HDLC_TOO_LONG:
+ dev->stats.rx_length_errors++;
+ dev->stats.rx_errors++;
+ break;
+ default: /* FIXME - remove printk */
+ printk(KERN_ERR "%s: hss_hdlc_poll: status 0x%02X"
+ " errors %u\n", dev->name, desc->status,
+ desc->error_count);
+ dev->stats.rx_errors++;
+ }
+
+ if (!skb) {
+ /* put the desc back on RX-ready queue */
+ desc->buf_len = RX_SIZE;
+ desc->pkt_len = desc->status = 0;
+ queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
+ continue;
+ }
+
+ /* process received frame */
+#ifdef __ARMEB__
+ temp = skb;
+ skb = port->rx_buff_tab[n];
+ dma_unmap_single(&dev->dev, desc->data,
+ RX_SIZE, DMA_FROM_DEVICE);
+#else
+ dma_sync_single(&dev->dev, desc->data,
+ RX_SIZE, DMA_FROM_DEVICE);
+ memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
+ ALIGN(desc->pkt_len, 4) / 4);
+#endif
+ skb_put(skb, desc->pkt_len);
+
+ debug_pkt(dev, "hss_hdlc_poll", skb->data, skb->len);
+
+ skb->protocol = hdlc_type_trans(skb, dev);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += skb->len;
+ netif_receive_skb(skb);
+
+ /* put the new buffer on RX-free queue */
+#ifdef __ARMEB__
+ port->rx_buff_tab[n] = temp;
+ desc->data = phys;
+#endif
+ desc->buf_len = RX_SIZE;
+ desc->pkt_len = 0;
+ queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
+ received++;
+ }
+#if DEBUG_RX
+ printk(KERN_DEBUG "hss_hdlc_poll: end, not all work done\n");
+#endif
+ return received; /* not all work done */
+}
+
+
+static void hss_hdlc_txdone_irq(void *pdev)
+{
+ struct net_device *dev = pdev;
+ struct port *port = dev_to_port(dev);
+ int n_desc;
+
+#if DEBUG_TX
+ printk(KERN_DEBUG DRV_NAME ": hss_hdlc_txdone_irq\n");
+#endif
+ while ((n_desc = queue_get_desc(queue_ids[port->id].txdone,
+ port, 1)) >= 0) {
+ struct desc *desc;
+ int start;
+
+ desc = tx_desc_ptr(port, n_desc);
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += desc->pkt_len;
+
+ dma_unmap_tx(port, desc);
+#if DEBUG_TX
+ printk(KERN_DEBUG "%s: hss_hdlc_txdone_irq free %p\n",
+ dev->name, port->tx_buff_tab[n_desc]);
+#endif
+ free_buffer_irq(port->tx_buff_tab[n_desc]);
+ port->tx_buff_tab[n_desc] = NULL;
+
+ start = qmgr_stat_empty(port->plat->txreadyq);
+ queue_put_desc(port->plat->txreadyq,
+ tx_desc_phys(port, n_desc), desc);
+ if (start) {
+#if DEBUG_TX
+ printk(KERN_DEBUG "%s: hss_hdlc_txdone_irq xmit"
+ " ready\n", dev->name);
+#endif
+ netif_wake_queue(dev);
+ }
+ }
+}
+
+static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct port *port = dev_to_port(dev);
+ unsigned int txreadyq = port->plat->txreadyq;
+ int len, offset, bytes, n;
+ void *mem;
+ u32 phys;
+ struct desc *desc;
+
+#if DEBUG_TX
+ printk(KERN_DEBUG "%s: hss_hdlc_xmit\n", dev->name);
+#endif
+
+ if (unlikely(skb->len > HDLC_MAX_MRU)) {
+ dev_kfree_skb(skb);
+ dev->stats.tx_errors++;
+ return NETDEV_TX_OK;
+ }
+
+ debug_pkt(dev, "hss_hdlc_xmit", skb->data, skb->len);
+
+ len = skb->len;
+#ifdef __ARMEB__
+ offset = 0; /* no need to keep alignment */
+ bytes = len;
+ mem = skb->data;
+#else
+ offset = (int)skb->data & 3; /* keep 32-bit alignment */
+ bytes = ALIGN(offset + len, 4);
+ if (!(mem = kmalloc(bytes, GFP_ATOMIC))) {
+ dev_kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+ memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
+ dev_kfree_skb(skb);
+#endif
+
+ phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(&dev->dev, phys)) {
+#ifdef __ARMEB__
+ dev_kfree_skb(skb);
+#else
+ kfree(mem);
+#endif
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ n = queue_get_desc(txreadyq, port, 1);
+ BUG_ON(n < 0);
+ desc = tx_desc_ptr(port, n);
+
+#ifdef __ARMEB__
+ port->tx_buff_tab[n] = skb;
+#else
+ port->tx_buff_tab[n] = mem;
+#endif
+ desc->data = phys + offset;
+ desc->buf_len = desc->pkt_len = len;
+
+ wmb();
+ queue_put_desc(queue_ids[port->id].tx, tx_desc_phys(port, n), desc);
+ dev->trans_start = jiffies;
+
+ if (qmgr_stat_empty(txreadyq)) {
+#if DEBUG_TX
+ printk(KERN_DEBUG "%s: hss_hdlc_xmit queue full\n", dev->name);
+#endif
+ netif_stop_queue(dev);
+ /* we could miss TX ready interrupt */
+ if (!qmgr_stat_empty(txreadyq)) {
+#if DEBUG_TX
+ printk(KERN_DEBUG "%s: hss_hdlc_xmit ready again\n",
+ dev->name);
+#endif
+ netif_wake_queue(dev);
+ }
+ }
+
+#if DEBUG_TX
+ printk(KERN_DEBUG "%s: hss_hdlc_xmit end\n", dev->name);
+#endif
+ return NETDEV_TX_OK;
+}
+
+
+static int request_hdlc_queues(struct port *port)
+{
+ int err;
+
+ err = qmgr_request_queue(queue_ids[port->id].rxfree, RX_DESCS, 0, 0,
+ "%s:RX-free", port->netdev->name);
+ if (err)
+ return err;
+
+ err = qmgr_request_queue(queue_ids[port->id].rx, RX_DESCS, 0, 0,
+ "%s:RX", port->netdev->name);
+ if (err)
+ goto rel_rxfree;
+
+ err = qmgr_request_queue(queue_ids[port->id].tx, TX_DESCS, 0, 0,
+ "%s:TX", port->netdev->name);
+ if (err)
+ goto rel_rx;
+
+ err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0,
+ "%s:TX-ready", port->netdev->name);
+ if (err)
+ goto rel_tx;
+
+ err = qmgr_request_queue(queue_ids[port->id].txdone, TX_DESCS, 0, 0,
+ "%s:TX-done", port->netdev->name);
+ if (err)
+ goto rel_txready;
+ return 0;
+
+rel_txready:
+ qmgr_release_queue(port->plat->txreadyq);
+rel_tx:
+ qmgr_release_queue(queue_ids[port->id].tx);
+rel_rx:
+ qmgr_release_queue(queue_ids[port->id].rx);
+rel_rxfree:
+ qmgr_release_queue(queue_ids[port->id].rxfree);
+ printk(KERN_DEBUG "%s: unable to request hardware queues\n",
+ port->netdev->name);
+ return err;
+}
+
+static void release_hdlc_queues(struct port *port)
+{
+ qmgr_release_queue(queue_ids[port->id].rxfree);
+ qmgr_release_queue(queue_ids[port->id].rx);
+ qmgr_release_queue(queue_ids[port->id].txdone);
+ qmgr_release_queue(queue_ids[port->id].tx);
+ qmgr_release_queue(port->plat->txreadyq);
+}
+
+static int init_hdlc_queues(struct port *port)
+{
+ int i;
+
+ if (!ports_open)
+ if (!(dma_pool = dma_pool_create(DRV_NAME, NULL,
+ POOL_ALLOC_SIZE, 32, 0)))
+ return -ENOMEM;
+
+ if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
+ &port->desc_tab_phys)))
+ return -ENOMEM;
+ memset(port->desc_tab, 0, POOL_ALLOC_SIZE);
+ memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
+ memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab));
+
+ /* Setup RX buffers */
+ for (i = 0; i < RX_DESCS; i++) {
+ struct desc *desc = rx_desc_ptr(port, i);
+ buffer_t *buff;
+ void *data;
+#ifdef __ARMEB__
+ if (!(buff = netdev_alloc_skb(port->netdev, RX_SIZE)))
+ return -ENOMEM;
+ data = buff->data;
+#else
+ if (!(buff = kmalloc(RX_SIZE, GFP_KERNEL)))
+ return -ENOMEM;
+ data = buff;
+#endif
+ desc->buf_len = RX_SIZE;
+ desc->data = dma_map_single(&port->netdev->dev, data,
+ RX_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&port->netdev->dev, desc->data)) {
+ free_buffer(buff);
+ return -EIO;
+ }
+ port->rx_buff_tab[i] = buff;
+ }
+
+ return 0;
+}
+
+static void destroy_hdlc_queues(struct port *port)
+{
+ int i;
+
+ if (port->desc_tab) {
+ for (i = 0; i < RX_DESCS; i++) {
+ struct desc *desc = rx_desc_ptr(port, i);
+ buffer_t *buff = port->rx_buff_tab[i];
+ if (buff) {
+ dma_unmap_single(&port->netdev->dev,
+ desc->data, RX_SIZE,
+ DMA_FROM_DEVICE);
+ free_buffer(buff);
+ }
+ }
+ for (i = 0; i < TX_DESCS; i++) {
+ struct desc *desc = tx_desc_ptr(port, i);
+ buffer_t *buff = port->tx_buff_tab[i];
+ if (buff) {
+ dma_unmap_tx(port, desc);
+ free_buffer(buff);
+ }
+ }
+ dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys);
+ port->desc_tab = NULL;
+ }
+
+ if (!ports_open && dma_pool) {
+ dma_pool_destroy(dma_pool);
+ dma_pool = NULL;
+ }
+}
+
+static int hss_hdlc_open(struct net_device *dev)
+{
+ struct port *port = dev_to_port(dev);
+ unsigned long flags;
+ int i, err = 0;
+
+ if ((err = hdlc_open(dev)))
+ return err;
+
+ if ((err = hss_load_firmware(port)))
+ goto err_hdlc_close;
+
+ if ((err = request_hdlc_queues(port)))
+ goto err_hdlc_close;
+
+ if ((err = init_hdlc_queues(port)))
+ goto err_destroy_queues;
+
+ spin_lock_irqsave(&npe_lock, flags);
+ if (port->plat->open)
+ if ((err = port->plat->open(port->id, dev,
+ hss_hdlc_set_carrier)))
+ goto err_unlock;
+ spin_unlock_irqrestore(&npe_lock, flags);
+
+ /* Populate queues with buffers, no failure after this point */
+ for (i = 0; i < TX_DESCS; i++)
+ queue_put_desc(port->plat->txreadyq,
+ tx_desc_phys(port, i), tx_desc_ptr(port, i));
+
+ for (i = 0; i < RX_DESCS; i++)
+ queue_put_desc(queue_ids[port->id].rxfree,
+ rx_desc_phys(port, i), rx_desc_ptr(port, i));
+
+ napi_enable(&port->napi);
+ netif_start_queue(dev);
+
+ qmgr_set_irq(queue_ids[port->id].rx, QUEUE_IRQ_SRC_NOT_EMPTY,
+ hss_hdlc_rx_irq, dev);
+
+ qmgr_set_irq(queue_ids[port->id].txdone, QUEUE_IRQ_SRC_NOT_EMPTY,
+ hss_hdlc_txdone_irq, dev);
+ qmgr_enable_irq(queue_ids[port->id].txdone);
+
+ ports_open++;
+
+ hss_set_hdlc_cfg(port);
+ hss_config(port);
+
+ hss_start_hdlc(port);
+
+ /* we may already have RX data, enables IRQ */
+ netif_rx_schedule(dev, &port->napi);
+ return 0;
+
+err_unlock:
+ spin_unlock_irqrestore(&npe_lock, flags);
+err_destroy_queues:
+ destroy_hdlc_queues(port);
+ release_hdlc_queues(port);
+err_hdlc_close:
+ hdlc_close(dev);
+ return err;
+}
+
+static int hss_hdlc_close(struct net_device *dev)
+{
+ struct port *port = dev_to_port(dev);
+ unsigned long flags;
+ int i, buffs = RX_DESCS; /* allocated RX buffers */
+
+ spin_lock_irqsave(&npe_lock, flags);
+ ports_open--;
+ qmgr_disable_irq(queue_ids[port->id].rx);
+ netif_stop_queue(dev);
+ napi_disable(&port->napi);
+
+ hss_stop_hdlc(port);
+
+ while (queue_get_desc(queue_ids[port->id].rxfree, port, 0) >= 0)
+ buffs--;
+ while (queue_get_desc(queue_ids[port->id].rx, port, 0) >= 0)
+ buffs--;
+
+ if (buffs)
+ printk(KERN_CRIT "%s: unable to drain RX queue, %i buffer(s)"
+ " left in NPE\n", dev->name, buffs);
+
+ buffs = TX_DESCS;
+ while (queue_get_desc(queue_ids[port->id].tx, port, 1) >= 0)
+ buffs--; /* cancel TX */
+
+ i = 0;
+ do {
+ while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0)
+ buffs--;
+ if (!buffs)
+ break;
+ } while (++i < MAX_CLOSE_WAIT);
+
+ if (buffs)
+ printk(KERN_CRIT "%s: unable to drain TX queue, %i buffer(s) "
+ "left in NPE\n", dev->name, buffs);
+#if DEBUG_CLOSE
+ if (!buffs)
+ printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i);
+#endif
+ qmgr_disable_irq(queue_ids[port->id].txdone);
+
+ if (port->plat->close)
+ port->plat->close(port->id, dev);
+ spin_unlock_irqrestore(&npe_lock, flags);
+
+ destroy_hdlc_queues(port);
+ release_hdlc_queues(port);
+ hdlc_close(dev);
+ return 0;
+}
+
+
+static int hss_hdlc_attach(struct net_device *dev, unsigned short encoding,
+ unsigned short parity)
+{
+ struct port *port = dev_to_port(dev);
+
+ if (encoding != ENCODING_NRZ)
+ return -EINVAL;
+
+ switch(parity) {
+ case PARITY_CRC16_PR1_CCITT:
+ port->hdlc_cfg = 0;
+ return 0;
+
+ case PARITY_CRC32_PR1_CCITT:
+ port->hdlc_cfg = PKT_HDLC_CRC_32;
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+
+static int hss_hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ const size_t size = sizeof(sync_serial_settings);
+ sync_serial_settings new_line;
+ sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
+ struct port *port = dev_to_port(dev);
+ unsigned long flags;
+ int clk;
+
+ if (cmd != SIOCWANDEV)
+ return hdlc_ioctl(dev, ifr, cmd);
+
+ switch(ifr->ifr_settings.type) {
+ case IF_GET_IFACE:
+ ifr->ifr_settings.type = IF_IFACE_V35;
+ if (ifr->ifr_settings.size < size) {
+ ifr->ifr_settings.size = size; /* data size wanted */
+ return -ENOBUFS;
+ }
+ memset(&new_line, 0, sizeof(new_line));
+ new_line.clock_type = port->clock_type;
+ new_line.clock_rate = 2048000; /* FIXME */
+ new_line.loopback = port->loopback;
+ if (copy_to_user(line, &new_line, size))
+ return -EFAULT;
+ return 0;
+
+ case IF_IFACE_SYNC_SERIAL:
+ case IF_IFACE_V35:
+ if(!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (copy_from_user(&new_line, line, size))
+ return -EFAULT;
+
+ clk = new_line.clock_type;
+ if (port->plat->set_clock)
+ clk = port->plat->set_clock(port->id, clk);
+
+ if (clk != CLOCK_EXT && clk != CLOCK_INT)
+ return -EINVAL; /* No such clock setting */
+
+ if (new_line.loopback != 0 && new_line.loopback != 1)
+ return -EINVAL;
+
+ port->clock_type = clk; /* Update settings */
+ /* FIXME port->clock_rate = new_line.clock_rate */;
+ port->loopback = new_line.loopback;
+
+ spin_lock_irqsave(&npe_lock, flags);
+
+ if (dev->flags & IFF_UP)
+ hss_config(port);
+
+ if (port->loopback || port->carrier)
+ netif_carrier_on(port->netdev);
+ else
+ netif_carrier_off(port->netdev);
+ spin_unlock_irqrestore(&npe_lock, flags);
+
+ return 0;
+
+ default:
+ return hdlc_ioctl(dev, ifr, cmd);
+ }
+}
+
+/*****************************************************************************
+ * initialization
+ ****************************************************************************/
+
+static int __devinit hss_init_one(struct platform_device *pdev)
+{
+ struct port *port;
+ struct net_device *dev;
+ hdlc_device *hdlc;
+ int err;
+
+ if ((port = kzalloc(sizeof(*port), GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+
+ if ((port->npe = npe_request(0)) == NULL) {
+ err = -ENOSYS;
+ goto err_free;
+ }
+
+ if ((port->netdev = dev = alloc_hdlcdev(port)) == NULL) {
+ err = -ENOMEM;
+ goto err_plat;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ hdlc = dev_to_hdlc(dev);
+ hdlc->attach = hss_hdlc_attach;
+ hdlc->xmit = hss_hdlc_xmit;
+ dev->open = hss_hdlc_open;
+ dev->stop = hss_hdlc_close;
+ dev->do_ioctl = hss_hdlc_ioctl;
+ dev->tx_queue_len = 100;
+ port->clock_type = CLOCK_EXT;
+ port->clock_rate = 2048000;
+ port->id = pdev->id;
+ port->dev = &pdev->dev;
+ port->plat = pdev->dev.platform_data;
+ netif_napi_add(dev, &port->napi, hss_hdlc_poll, NAPI_WEIGHT);
+
+ if ((err = register_hdlc_device(dev)))
+ goto err_free_netdev;
+
+ platform_set_drvdata(pdev, port);
+
+ printk(KERN_INFO "%s: HSS-%i\n", dev->name, port->id);
+ return 0;
+
+err_free_netdev:
+ free_netdev(dev);
+err_plat:
+ npe_release(port->npe);
+err_free:
+ kfree(port);
+ return err;
+}
+
+static int __devexit hss_remove_one(struct platform_device *pdev)
+{
+ struct port *port = platform_get_drvdata(pdev);
+
+ unregister_hdlc_device(port->netdev);
+ free_netdev(port->netdev);
+ npe_release(port->npe);
+ platform_set_drvdata(pdev, NULL);
+ kfree(port);
+ return 0;
+}
+
+static struct platform_driver ixp4xx_hss_driver = {
+ .driver.name = DRV_NAME,
+ .probe = hss_init_one,
+ .remove = hss_remove_one,
+};
+
+static int __init hss_init_module(void)
+{
+ if ((ixp4xx_read_feature_bits() &
+ (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS)) !=
+ (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS))
+ return -ENOSYS;
+
+ spin_lock_init(&npe_lock);
+
+ return platform_driver_register(&ixp4xx_hss_driver);
+}
+
+static void __exit hss_cleanup_module(void)
+{
+ platform_driver_unregister(&ixp4xx_hss_driver);
+}
+
+MODULE_AUTHOR("Krzysztof Halasa");
+MODULE_DESCRIPTION("Intel IXP4xx HSS driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:ixp4xx_hss");
+module_init(hss_init_module);
+module_exit(hss_cleanup_module);
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 24fd613466b..5b61b3eef45 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -143,7 +143,6 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb)
*ptr = 0x00;
skb->protocol = x25_type_trans(skb, dev);
- skb->dev->last_rx = jiffies;
return netif_rx(skb);
}
@@ -235,7 +234,6 @@ static void lapbeth_connected(struct net_device *dev, int reason)
*ptr = 0x01;
skb->protocol = x25_type_trans(skb, dev);
- skb->dev->last_rx = jiffies;
netif_rx(skb);
}
@@ -253,7 +251,6 @@ static void lapbeth_disconnected(struct net_device *dev, int reason)
*ptr = 0x02;
skb->protocol = x25_type_trans(skb, dev);
- skb->dev->last_rx = jiffies;
netif_rx(skb);
}
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index d7bb63e616b..feac3b99f8f 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -1594,7 +1594,6 @@ static int lmc_rx(struct net_device *dev)
goto skip_packet;
}
- dev->last_rx = jiffies;
sc->lmc_device->stats.rx_packets++;
sc->lmc_device->stats.rx_bytes += len;
diff --git a/drivers/net/wan/lmc/lmc_proto.c b/drivers/net/wan/lmc/lmc_proto.c
index be9877ff551..94b4c208b01 100644
--- a/drivers/net/wan/lmc/lmc_proto.c
+++ b/drivers/net/wan/lmc/lmc_proto.c
@@ -142,7 +142,6 @@ void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
case LMC_PPP:
case LMC_NET:
default:
- skb->dev->last_rx = jiffies;
netif_rx(skb);
break;
case LMC_RAW:
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
index 0a566b0daac..697715ae80f 100644
--- a/drivers/net/wan/n2.c
+++ b/drivers/net/wan/n2.c
@@ -53,7 +53,7 @@ static const char* devname = "RISCom/N2";
#define NEED_SCA_MSCI_INTR
#define MAX_TX_BUFFERS 10
-static char *hw = NULL; /* pointer to hw=xxx command line string */
+static char *hw; /* pointer to hw=xxx command line string */
/* RISCom/N2 Board Registers */
@@ -145,7 +145,6 @@ static card_t **new_card = &first_card;
&(card)->ports[port] : NULL)
-
static __inline__ u8 sca_get_page(card_t *card)
{
return inb(card->io + N2_PSR) & PSR_PAGEBITS;
@@ -159,9 +158,7 @@ static __inline__ void openwin(card_t *card, u8 page)
}
-
-#include "hd6457x.c"
-
+#include "hd64570.c"
static void n2_set_iface(port_t *port)
@@ -478,7 +475,7 @@ static int __init n2_run(unsigned long io, unsigned long irq,
n2_destroy_card(card);
return -ENOBUFS;
}
- sca_init_sync_port(port); /* Set up SCA memory */
+ sca_init_port(port); /* Set up SCA memory */
printk(KERN_INFO "%s: RISCom/N2 node %d\n",
dev->name, port->phy_node);
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index d0a8d1e352a..c23fde0c034 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -1769,7 +1769,7 @@ cpc_trace(struct net_device *dev, struct sk_buff *skb_main, char rx_tx)
static void cpc_tx_timeout(struct net_device *dev)
{
- pc300dev_t *d = (pc300dev_t *) dev->priv;
+ pc300dev_t *d = (pc300dev_t *) dev_to_hdlc(dev)->priv;
pc300ch_t *chan = (pc300ch_t *) d->chan;
pc300_t *card = (pc300_t *) chan->card;
int ch = chan->channel;
@@ -1796,7 +1796,7 @@ static void cpc_tx_timeout(struct net_device *dev)
static int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev)
{
- pc300dev_t *d = (pc300dev_t *) dev->priv;
+ pc300dev_t *d = (pc300dev_t *) dev_to_hdlc(dev)->priv;
pc300ch_t *chan = (pc300ch_t *) d->chan;
pc300_t *card = (pc300_t *) chan->card;
int ch = chan->channel;
@@ -1874,7 +1874,7 @@ static int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev)
static void cpc_net_rx(struct net_device *dev)
{
- pc300dev_t *d = (pc300dev_t *) dev->priv;
+ pc300dev_t *d = (pc300dev_t *) dev_to_hdlc(dev)->priv;
pc300ch_t *chan = (pc300ch_t *) d->chan;
pc300_t *card = (pc300_t *) chan->card;
int ch = chan->channel;
@@ -2522,7 +2522,7 @@ static int cpc_change_mtu(struct net_device *dev, int new_mtu)
static int cpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- pc300dev_t *d = (pc300dev_t *) dev->priv;
+ pc300dev_t *d = (pc300dev_t *) dev_to_hdlc(dev)->priv;
pc300ch_t *chan = (pc300ch_t *) d->chan;
pc300_t *card = (pc300_t *) chan->card;
pc300conf_t conf_aux;
@@ -2718,9 +2718,8 @@ static int cpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
pc300patrntst.num_errors =
falc_pattern_test_error(card, ch);
- if (!arg
- || copy_to_user(arg, &pc300patrntst,
- sizeof (pc300patterntst_t)))
+ if (copy_to_user(arg, &pc300patrntst,
+ sizeof(pc300patterntst_t)))
return -EINVAL;
} else {
falc_pattern_test(card, ch, pc300patrntst.patrntst_on);
@@ -3058,7 +3057,7 @@ static int tx_config(pc300dev_t * d)
static int cpc_attach(struct net_device *dev, unsigned short encoding,
unsigned short parity)
{
- pc300dev_t *d = (pc300dev_t *)dev->priv;
+ pc300dev_t *d = (pc300dev_t *)dev_to_hdlc(dev)->priv;
pc300ch_t *chan = (pc300ch_t *)d->chan;
pc300_t *card = (pc300_t *)chan->card;
pc300chconf_t *conf = (pc300chconf_t *)&chan->conf;
@@ -3138,7 +3137,7 @@ static void cpc_closech(pc300dev_t * d)
int cpc_open(struct net_device *dev)
{
- pc300dev_t *d = (pc300dev_t *) dev->priv;
+ pc300dev_t *d = (pc300dev_t *) dev_to_hdlc(dev)->priv;
struct ifreq ifr;
int result;
@@ -3166,7 +3165,7 @@ err_out:
static int cpc_close(struct net_device *dev)
{
- pc300dev_t *d = (pc300dev_t *) dev->priv;
+ pc300dev_t *d = (pc300dev_t *) dev_to_hdlc(dev)->priv;
pc300ch_t *chan = (pc300ch_t *) d->chan;
pc300_t *card = (pc300_t *) chan->card;
unsigned long flags;
@@ -3347,7 +3346,7 @@ static void cpc_init_card(pc300_t * card)
d->line_on = 0;
d->line_off = 0;
- dev = alloc_hdlcdev(NULL);
+ dev = alloc_hdlcdev(d);
if (dev == NULL)
continue;
@@ -3372,7 +3371,6 @@ static void cpc_init_card(pc300_t * card)
dev->do_ioctl = cpc_ioctl;
if (register_hdlc_device(dev) == 0) {
- dev->priv = d; /* We need 'priv', hdlc doesn't */
printk("%s: Cyclades-PC300/", dev->name);
switch (card->hw.type) {
case PC300_TE:
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
index bf1b0159042..f247e5d9002 100644
--- a/drivers/net/wan/pc300too.c
+++ b/drivers/net/wan/pc300too.c
@@ -1,7 +1,7 @@
/*
* Cyclades PC300 synchronous serial card driver for Linux
*
- * Copyright (C) 2000-2007 Krzysztof Halasa <khc@pm.waw.pl>
+ * Copyright (C) 2000-2008 Krzysztof Halasa <khc@pm.waw.pl>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
@@ -11,7 +11,7 @@
*
* Sources of information:
* Hitachi HD64572 SCA-II User's Manual
- * Cyclades PC300 Linux driver
+ * Original Cyclades PC300 Linux driver
*
* This driver currently supports only PC300/RSV (V.24/V.35) and
* PC300/X21 cards.
@@ -37,17 +37,11 @@
#include "hd64572.h"
-static const char* version = "Cyclades PC300 driver version: 1.17";
-static const char* devname = "PC300";
-
#undef DEBUG_PKT
#define DEBUG_RINGS
#define PC300_PLX_SIZE 0x80 /* PLX control window size (128 B) */
#define PC300_SCA_SIZE 0x400 /* SCA window size (1 KB) */
-#define ALL_PAGES_ALWAYS_MAPPED
-#define NEED_DETECT_RAM
-#define NEED_SCA_MSCI_INTR
#define MAX_TX_BUFFERS 10
static int pci_clock_freq = 33000000;
@@ -81,7 +75,8 @@ typedef struct {
typedef struct port_s {
- struct net_device *dev;
+ struct napi_struct napi;
+ struct net_device *netdev;
struct card_s *card;
spinlock_t lock; /* TX lock */
sync_serial_settings settings;
@@ -93,7 +88,7 @@ typedef struct port_s {
u16 txin; /* tx ring buffer 'in' and 'last' pointers */
u16 txlast;
u8 rxs, txs, tmc; /* SCA registers */
- u8 phy_node; /* physical port # - 0 or 1 */
+ u8 chan; /* physical port # - 0 or 1 */
}port_t;
@@ -114,21 +109,10 @@ typedef struct card_s {
}card_t;
-#define sca_in(reg, card) readb(card->scabase + (reg))
-#define sca_out(value, reg, card) writeb(value, card->scabase + (reg))
-#define sca_inw(reg, card) readw(card->scabase + (reg))
-#define sca_outw(value, reg, card) writew(value, card->scabase + (reg))
-#define sca_inl(reg, card) readl(card->scabase + (reg))
-#define sca_outl(value, reg, card) writel(value, card->scabase + (reg))
-
-#define port_to_card(port) (port->card)
-#define log_node(port) (port->phy_node)
-#define phy_node(port) (port->phy_node)
-#define winbase(card) (card->rambase)
#define get_port(card, port) ((port) < (card)->n_ports ? \
(&(card)->ports[port]) : (NULL))
-#include "hd6457x.c"
+#include "hd64572.c"
static void pc300_set_iface(port_t *port)
@@ -139,8 +123,8 @@ static void pc300_set_iface(port_t *port)
u8 rxs = port->rxs & CLK_BRG_MASK;
u8 txs = port->txs & CLK_BRG_MASK;
- sca_out(EXS_TES1, (phy_node(port) ? MSCI1_OFFSET : MSCI0_OFFSET) + EXS,
- port_to_card(port));
+ sca_out(EXS_TES1, (port->chan ? MSCI1_OFFSET : MSCI0_OFFSET) + EXS,
+ port->card);
switch(port->settings.clock_type) {
case CLOCK_INT:
rxs |= CLK_BRG; /* BRG output */
@@ -172,10 +156,10 @@ static void pc300_set_iface(port_t *port)
if (port->card->type == PC300_RSV) {
if (port->iface == IF_IFACE_V35)
writel(card->init_ctrl_value |
- PC300_CHMEDIA_MASK(port->phy_node), init_ctrl);
+ PC300_CHMEDIA_MASK(port->chan), init_ctrl);
else
writel(card->init_ctrl_value &
- ~PC300_CHMEDIA_MASK(port->phy_node), init_ctrl);
+ ~PC300_CHMEDIA_MASK(port->chan), init_ctrl);
}
}
@@ -280,10 +264,8 @@ static void pc300_pci_remove_one(struct pci_dev *pdev)
card_t *card = pci_get_drvdata(pdev);
for (i = 0; i < 2; i++)
- if (card->ports[i].card) {
- struct net_device *dev = port_to_dev(&card->ports[i]);
- unregister_hdlc_device(dev);
- }
+ if (card->ports[i].card)
+ unregister_hdlc_device(card->ports[i].netdev);
if (card->irq)
free_irq(card->irq, card);
@@ -298,10 +280,10 @@ static void pc300_pci_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
- if (card->ports[0].dev)
- free_netdev(card->ports[0].dev);
- if (card->ports[1].dev)
- free_netdev(card->ports[1].dev);
+ if (card->ports[0].netdev)
+ free_netdev(card->ports[0].netdev);
+ if (card->ports[1].netdev)
+ free_netdev(card->ports[1].netdev);
kfree(card);
}
@@ -318,12 +300,6 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
u32 scaphys; /* SCA memory base */
u32 plxphys; /* PLX registers memory base */
-#ifndef MODULE
- static int printed_version;
- if (!printed_version++)
- printk(KERN_INFO "%s\n", version);
-#endif
-
i = pci_enable_device(pdev);
if (i)
return i;
@@ -343,27 +319,6 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
}
pci_set_drvdata(pdev, card);
- if (pdev->device == PCI_DEVICE_ID_PC300_TE_1 ||
- pdev->device == PCI_DEVICE_ID_PC300_TE_2)
- card->type = PC300_TE; /* not fully supported */
- else if (card->init_ctrl_value & PC300_CTYPE_MASK)
- card->type = PC300_X21;
- else
- card->type = PC300_RSV;
-
- if (pdev->device == PCI_DEVICE_ID_PC300_RX_1 ||
- pdev->device == PCI_DEVICE_ID_PC300_TE_1)
- card->n_ports = 1;
- else
- card->n_ports = 2;
-
- for (i = 0; i < card->n_ports; i++)
- if (!(card->ports[i].dev = alloc_hdlcdev(&card->ports[i]))) {
- printk(KERN_ERR "pc300: unable to allocate memory\n");
- pc300_pci_remove_one(pdev);
- return -ENOMEM;
- }
-
if (pci_resource_len(pdev, 0) != PC300_PLX_SIZE ||
pci_resource_len(pdev, 2) != PC300_SCA_SIZE ||
pci_resource_len(pdev, 3) < 16384) {
@@ -372,14 +327,14 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
return -EFAULT;
}
- plxphys = pci_resource_start(pdev,0) & PCI_BASE_ADDRESS_MEM_MASK;
+ plxphys = pci_resource_start(pdev, 0) & PCI_BASE_ADDRESS_MEM_MASK;
card->plxbase = ioremap(plxphys, PC300_PLX_SIZE);
- scaphys = pci_resource_start(pdev,2) & PCI_BASE_ADDRESS_MEM_MASK;
+ scaphys = pci_resource_start(pdev, 2) & PCI_BASE_ADDRESS_MEM_MASK;
card->scabase = ioremap(scaphys, PC300_SCA_SIZE);
- ramphys = pci_resource_start(pdev,3) & PCI_BASE_ADDRESS_MEM_MASK;
- card->rambase = ioremap(ramphys, pci_resource_len(pdev,3));
+ ramphys = pci_resource_start(pdev, 3) & PCI_BASE_ADDRESS_MEM_MASK;
+ card->rambase = pci_ioremap_bar(pdev, 3);
if (card->plxbase == NULL ||
card->scabase == NULL ||
@@ -393,6 +348,27 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
card->init_ctrl_value = readl(&((plx9050 __iomem *)card->scabase)->init_ctrl);
pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, plxphys);
+ if (pdev->device == PCI_DEVICE_ID_PC300_TE_1 ||
+ pdev->device == PCI_DEVICE_ID_PC300_TE_2)
+ card->type = PC300_TE; /* not fully supported */
+ else if (card->init_ctrl_value & PC300_CTYPE_MASK)
+ card->type = PC300_X21;
+ else
+ card->type = PC300_RSV;
+
+ if (pdev->device == PCI_DEVICE_ID_PC300_RX_1 ||
+ pdev->device == PCI_DEVICE_ID_PC300_TE_1)
+ card->n_ports = 1;
+ else
+ card->n_ports = 2;
+
+ for (i = 0; i < card->n_ports; i++)
+ if (!(card->ports[i].netdev = alloc_hdlcdev(&card->ports[i]))) {
+ printk(KERN_ERR "pc300: unable to allocate memory\n");
+ pc300_pci_remove_one(pdev);
+ return -ENOMEM;
+ }
+
/* Reset PLX */
p = &card->plxbase->init_ctrl;
writel(card->init_ctrl_value | 0x40000000, p);
@@ -446,7 +422,7 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
writew(0x0041, &card->plxbase->intr_ctrl_stat);
/* Allocate IRQ */
- if (request_irq(pdev->irq, sca_intr, IRQF_SHARED, devname, card)) {
+ if (request_irq(pdev->irq, sca_intr, IRQF_SHARED, "pc300", card)) {
printk(KERN_WARNING "pc300: could not allocate IRQ%d.\n",
pdev->irq);
pc300_pci_remove_one(pdev);
@@ -463,9 +439,9 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
for (i = 0; i < card->n_ports; i++) {
port_t *port = &card->ports[i];
- struct net_device *dev = port_to_dev(port);
+ struct net_device *dev = port->netdev;
hdlc_device *hdlc = dev_to_hdlc(dev);
- port->phy_node = i;
+ port->chan = i;
spin_lock_init(&port->lock);
dev->irq = card->irq;
@@ -484,6 +460,7 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
else
port->iface = IF_IFACE_V35;
+ sca_init_port(port);
if (register_hdlc_device(dev)) {
printk(KERN_ERR "pc300: unable to register hdlc "
"device\n");
@@ -491,10 +468,9 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
pc300_pci_remove_one(pdev);
return -ENOBUFS;
}
- sca_init_sync_port(port); /* Set up SCA memory */
- printk(KERN_INFO "%s: PC300 node %d\n",
- dev->name, port->phy_node);
+ printk(KERN_INFO "%s: PC300 channel %d\n",
+ dev->name, port->chan);
}
return 0;
}
@@ -524,9 +500,6 @@ static struct pci_driver pc300_pci_driver = {
static int __init pc300_init_module(void)
{
-#ifdef MODULE
- printk(KERN_INFO "%s\n", version);
-#endif
if (pci_clock_freq < 1000000 || pci_clock_freq > 80000000) {
printk(KERN_ERR "pc300: Invalid PCI clock frequency\n");
return -EINVAL;
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index b595b64e753..1104d3a692f 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -1,7 +1,7 @@
/*
* Goramo PCI200SYN synchronous serial card driver for Linux
*
- * Copyright (C) 2002-2003 Krzysztof Halasa <khc@pm.waw.pl>
+ * Copyright (C) 2002-2008 Krzysztof Halasa <khc@pm.waw.pl>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
@@ -33,17 +33,11 @@
#include "hd64572.h"
-static const char* version = "Goramo PCI200SYN driver version: 1.16";
-static const char* devname = "PCI200SYN";
-
#undef DEBUG_PKT
#define DEBUG_RINGS
#define PCI200SYN_PLX_SIZE 0x80 /* PLX control window size (128b) */
#define PCI200SYN_SCA_SIZE 0x400 /* SCA window size (1Kb) */
-#define ALL_PAGES_ALWAYS_MAPPED
-#define NEED_DETECT_RAM
-#define NEED_SCA_MSCI_INTR
#define MAX_TX_BUFFERS 10
static int pci_clock_freq = 33000000;
@@ -68,7 +62,8 @@ typedef struct {
typedef struct port_s {
- struct net_device *dev;
+ struct napi_struct napi;
+ struct net_device *netdev;
struct card_s *card;
spinlock_t lock; /* TX lock */
sync_serial_settings settings;
@@ -79,7 +74,7 @@ typedef struct port_s {
u16 txin; /* tx ring buffer 'in' and 'last' pointers */
u16 txlast;
u8 rxs, txs, tmc; /* SCA registers */
- u8 phy_node; /* physical port # - 0 or 1 */
+ u8 chan; /* physical port # - 0 or 1 */
}port_t;
@@ -97,17 +92,6 @@ typedef struct card_s {
}card_t;
-#define sca_in(reg, card) readb(card->scabase + (reg))
-#define sca_out(value, reg, card) writeb(value, card->scabase + (reg))
-#define sca_inw(reg, card) readw(card->scabase + (reg))
-#define sca_outw(value, reg, card) writew(value, card->scabase + (reg))
-#define sca_inl(reg, card) readl(card->scabase + (reg))
-#define sca_outl(value, reg, card) writel(value, card->scabase + (reg))
-
-#define port_to_card(port) (port->card)
-#define log_node(port) (port->phy_node)
-#define phy_node(port) (port->phy_node)
-#define winbase(card) (card->rambase)
#define get_port(card, port) (&card->ports[port])
#define sca_flush(card) (sca_in(IER0, card));
@@ -127,7 +111,7 @@ static inline void new_memcpy_toio(char __iomem *dest, char *src, int length)
#undef memcpy_toio
#define memcpy_toio new_memcpy_toio
-#include "hd6457x.c"
+#include "hd64572.c"
static void pci200_set_iface(port_t *port)
@@ -137,8 +121,8 @@ static void pci200_set_iface(port_t *port)
u8 rxs = port->rxs & CLK_BRG_MASK;
u8 txs = port->txs & CLK_BRG_MASK;
- sca_out(EXS_TES1, (phy_node(port) ? MSCI1_OFFSET : MSCI0_OFFSET) + EXS,
- port_to_card(port));
+ sca_out(EXS_TES1, (port->chan ? MSCI1_OFFSET : MSCI0_OFFSET) + EXS,
+ port->card);
switch(port->settings.clock_type) {
case CLOCK_INT:
rxs |= CLK_BRG; /* BRG output */
@@ -180,7 +164,7 @@ static int pci200_open(struct net_device *dev)
sca_open(dev);
pci200_set_iface(port);
- sca_flush(port_to_card(port));
+ sca_flush(port->card);
return 0;
}
@@ -189,7 +173,7 @@ static int pci200_open(struct net_device *dev)
static int pci200_close(struct net_device *dev)
{
sca_close(dev);
- sca_flush(port_to_card(dev_to_port(dev)));
+ sca_flush(dev_to_port(dev)->card);
hdlc_close(dev);
return 0;
}
@@ -242,7 +226,7 @@ static int pci200_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
memcpy(&port->settings, &new_line, size); /* Update settings */
pci200_set_iface(port);
- sca_flush(port_to_card(port));
+ sca_flush(port->card);
return 0;
default:
@@ -258,10 +242,8 @@ static void pci200_pci_remove_one(struct pci_dev *pdev)
card_t *card = pci_get_drvdata(pdev);
for (i = 0; i < 2; i++)
- if (card->ports[i].card) {
- struct net_device *dev = port_to_dev(&card->ports[i]);
- unregister_hdlc_device(dev);
- }
+ if (card->ports[i].card)
+ unregister_hdlc_device(card->ports[i].netdev);
if (card->irq)
free_irq(card->irq, card);
@@ -276,10 +258,10 @@ static void pci200_pci_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
- if (card->ports[0].dev)
- free_netdev(card->ports[0].dev);
- if (card->ports[1].dev)
- free_netdev(card->ports[1].dev);
+ if (card->ports[0].netdev)
+ free_netdev(card->ports[0].netdev);
+ if (card->ports[1].netdev)
+ free_netdev(card->ports[1].netdev);
kfree(card);
}
@@ -296,12 +278,6 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
u32 scaphys; /* SCA memory base */
u32 plxphys; /* PLX registers memory base */
-#ifndef MODULE
- static int printed_version;
- if (!printed_version++)
- printk(KERN_INFO "%s\n", version);
-#endif
-
i = pci_enable_device(pdev);
if (i)
return i;
@@ -320,9 +296,9 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
return -ENOBUFS;
}
pci_set_drvdata(pdev, card);
- card->ports[0].dev = alloc_hdlcdev(&card->ports[0]);
- card->ports[1].dev = alloc_hdlcdev(&card->ports[1]);
- if (!card->ports[0].dev || !card->ports[1].dev) {
+ card->ports[0].netdev = alloc_hdlcdev(&card->ports[0]);
+ card->ports[1].netdev = alloc_hdlcdev(&card->ports[1]);
+ if (!card->ports[0].netdev || !card->ports[1].netdev) {
printk(KERN_ERR "pci200syn: unable to allocate memory\n");
pci200_pci_remove_one(pdev);
return -ENOMEM;
@@ -343,7 +319,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
card->scabase = ioremap(scaphys, PCI200SYN_SCA_SIZE);
ramphys = pci_resource_start(pdev,3) & PCI_BASE_ADDRESS_MEM_MASK;
- card->rambase = ioremap(ramphys, pci_resource_len(pdev,3));
+ card->rambase = pci_ioremap_bar(pdev, 3);
if (card->plxbase == NULL ||
card->scabase == NULL ||
@@ -398,7 +374,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
writew(readw(p) | 0x0040, p);
/* Allocate IRQ */
- if (request_irq(pdev->irq, sca_intr, IRQF_SHARED, devname, card)) {
+ if (request_irq(pdev->irq, sca_intr, IRQF_SHARED, "pci200syn", card)) {
printk(KERN_WARNING "pci200syn: could not allocate IRQ%d.\n",
pdev->irq);
pci200_pci_remove_one(pdev);
@@ -410,9 +386,9 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
for (i = 0; i < 2; i++) {
port_t *port = &card->ports[i];
- struct net_device *dev = port_to_dev(port);
+ struct net_device *dev = port->netdev;
hdlc_device *hdlc = dev_to_hdlc(dev);
- port->phy_node = i;
+ port->chan = i;
spin_lock_init(&port->lock);
dev->irq = card->irq;
@@ -426,6 +402,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
hdlc->xmit = sca_xmit;
port->settings.clock_type = CLOCK_EXT;
port->card = card;
+ sca_init_port(port);
if (register_hdlc_device(dev)) {
printk(KERN_ERR "pci200syn: unable to register hdlc "
"device\n");
@@ -433,10 +410,9 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
pci200_pci_remove_one(pdev);
return -ENOBUFS;
}
- sca_init_sync_port(port); /* Set up SCA memory */
- printk(KERN_INFO "%s: PCI200SYN node %d\n",
- dev->name, port->phy_node);
+ printk(KERN_INFO "%s: PCI200SYN channel %d\n",
+ dev->name, port->chan);
}
sca_flush(card);
@@ -464,9 +440,6 @@ static struct pci_driver pci200_pci_driver = {
static int __init pci200_init_module(void)
{
-#ifdef MODULE
- printk(KERN_INFO "%s\n", version);
-#endif
if (pci_clock_freq < 1000000 || pci_clock_freq > 80000000) {
printk(KERN_ERR "pci200syn: Invalid PCI clock frequency\n");
return -EINVAL;
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index ee51b6a5e60..0aa28e1d436 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -186,6 +186,7 @@ static unsigned int netcard_portlist[ ] __initdata = {
0x2b0, 0x2b4, 0x2c0, 0x2c4, 0x2d0, 0x2d4, 0x2e0, 0x2e4, 0x2f0, 0x2f4,
0 };
+#define NET_LOCAL_LOCK(dev) (((struct net_local *)netdev_priv(dev))->lock)
/*
* Look for SBNI card which addr stored in dev->base_addr, if nonzero.
@@ -287,7 +288,7 @@ static int __init sbni_init(struct net_device *dev)
}
-int __init
+static int __init
sbni_pci_probe( struct net_device *dev )
{
struct pci_dev *pdev = NULL;
@@ -378,22 +379,23 @@ sbni_probe1( struct net_device *dev, unsigned long ioaddr, int irq )
dev->irq = irq;
dev->base_addr = ioaddr;
- /* Allocate dev->priv and fill in sbni-specific dev fields. */
- nl = dev->priv;
+ /* Fill in sbni-specific dev fields. */
+ nl = netdev_priv(dev);
if( !nl ) {
printk( KERN_ERR "%s: unable to get memory!\n", dev->name );
release_region( ioaddr, SBNI_IO_EXTENT );
return NULL;
}
- dev->priv = nl;
memset( nl, 0, sizeof(struct net_local) );
spin_lock_init( &nl->lock );
/* store MAC address (generate if that isn't known) */
*(__be16 *)dev->dev_addr = htons( 0x00ff );
*(__be32 *)(dev->dev_addr + 2) = htonl( 0x01000000 |
- ( (mac[num] ? mac[num] : (u32)((long)dev->priv)) & 0x00ffffff) );
+ ((mac[num] ?
+ mac[num] :
+ (u32)((long)netdev_priv(dev))) & 0x00ffffff));
/* store link settings (speed, receive level ) */
nl->maxframe = DEFAULT_FRAME_LEN;
@@ -447,7 +449,7 @@ sbni_start_xmit( struct sk_buff *skb, struct net_device *dev )
/* Looking for idle device in the list */
for( p = dev; p; ) {
- struct net_local *nl = (struct net_local *) p->priv;
+ struct net_local *nl = netdev_priv(p);
spin_lock( &nl->lock );
if( nl->tx_buf_p || (nl->state & FL_LINE_DOWN) ) {
p = nl->link;
@@ -469,7 +471,7 @@ sbni_start_xmit( struct sk_buff *skb, struct net_device *dev )
static int
sbni_start_xmit( struct sk_buff *skb, struct net_device *dev )
{
- struct net_local *nl = (struct net_local *) dev->priv;
+ struct net_local *nl = netdev_priv(dev);
netif_stop_queue( dev );
spin_lock( &nl->lock );
@@ -503,12 +505,12 @@ static irqreturn_t
sbni_interrupt( int irq, void *dev_id )
{
struct net_device *dev = dev_id;
- struct net_local *nl = dev->priv;
+ struct net_local *nl = netdev_priv(dev);
int repeat;
spin_lock( &nl->lock );
if( nl->second )
- spin_lock( &((struct net_local *) nl->second->priv)->lock );
+ spin_lock(&NET_LOCAL_LOCK(nl->second));
do {
repeat = 0;
@@ -522,7 +524,7 @@ sbni_interrupt( int irq, void *dev_id )
} while( repeat );
if( nl->second )
- spin_unlock( &((struct net_local *)nl->second->priv)->lock );
+ spin_unlock(&NET_LOCAL_LOCK(nl->second));
spin_unlock( &nl->lock );
return IRQ_HANDLED;
}
@@ -531,7 +533,7 @@ sbni_interrupt( int irq, void *dev_id )
static void
handle_channel( struct net_device *dev )
{
- struct net_local *nl = (struct net_local *) dev->priv;
+ struct net_local *nl = netdev_priv(dev);
unsigned long ioaddr = dev->base_addr;
int req_ans;
@@ -540,7 +542,7 @@ handle_channel( struct net_device *dev )
#ifdef CONFIG_SBNI_MULTILINE
/* Lock the master device because we going to change its local data */
if( nl->state & FL_SLAVE )
- spin_lock( &((struct net_local *) nl->master->priv)->lock );
+ spin_lock(&NET_LOCAL_LOCK(nl->master));
#endif
outb( (inb( ioaddr + CSR0 ) & ~EN_INT) | TR_REQ, ioaddr + CSR0 );
@@ -576,7 +578,7 @@ handle_channel( struct net_device *dev )
#ifdef CONFIG_SBNI_MULTILINE
if( nl->state & FL_SLAVE )
- spin_unlock( &((struct net_local *) nl->master->priv)->lock );
+ spin_unlock(&NET_LOCAL_LOCK(nl->master));
#endif
}
@@ -589,7 +591,7 @@ handle_channel( struct net_device *dev )
static int
recv_frame( struct net_device *dev )
{
- struct net_local *nl = (struct net_local *) dev->priv;
+ struct net_local *nl = netdev_priv(dev);
unsigned long ioaddr = dev->base_addr;
u32 crc = CRC32_INITIAL;
@@ -623,7 +625,7 @@ recv_frame( struct net_device *dev )
static void
send_frame( struct net_device *dev )
{
- struct net_local *nl = (struct net_local *) dev->priv;
+ struct net_local *nl = netdev_priv(dev);
u32 crc = CRC32_INITIAL;
@@ -680,7 +682,7 @@ do_send:
static void
download_data( struct net_device *dev, u32 *crc_p )
{
- struct net_local *nl = (struct net_local *) dev->priv;
+ struct net_local *nl = netdev_priv(dev);
struct sk_buff *skb = nl->tx_buf_p;
unsigned len = min_t(unsigned int, skb->len - nl->outpos, nl->framelen);
@@ -699,7 +701,7 @@ static int
upload_data( struct net_device *dev, unsigned framelen, unsigned frameno,
unsigned is_first, u32 crc )
{
- struct net_local *nl = (struct net_local *) dev->priv;
+ struct net_local *nl = netdev_priv(dev);
int frame_ok;
@@ -721,9 +723,9 @@ upload_data( struct net_device *dev, unsigned framelen, unsigned frameno,
nl->wait_frameno = 0,
nl->inppos = 0,
#ifdef CONFIG_SBNI_MULTILINE
- ((struct net_local *) nl->master->priv)
+ ((struct net_local *)netdev_priv(nl->master))
->stats.rx_errors++,
- ((struct net_local *) nl->master->priv)
+ ((struct net_local *)netdev_priv(nl->master))
->stats.rx_missed_errors++;
#else
nl->stats.rx_errors++,
@@ -740,8 +742,10 @@ upload_data( struct net_device *dev, unsigned framelen, unsigned frameno,
*/
nl->wait_frameno = 0,
#ifdef CONFIG_SBNI_MULTILINE
- ((struct net_local *) nl->master->priv)->stats.rx_errors++,
- ((struct net_local *) nl->master->priv)->stats.rx_crc_errors++;
+ ((struct net_local *)netdev_priv(nl->master))
+ ->stats.rx_errors++,
+ ((struct net_local *)netdev_priv(nl->master))
+ ->stats.rx_crc_errors++;
#else
nl->stats.rx_errors++,
nl->stats.rx_crc_errors++;
@@ -755,8 +759,8 @@ static inline void
send_complete( struct net_local *nl )
{
#ifdef CONFIG_SBNI_MULTILINE
- ((struct net_local *) nl->master->priv)->stats.tx_packets++;
- ((struct net_local *) nl->master->priv)->stats.tx_bytes
+ ((struct net_local *)netdev_priv(nl->master))->stats.tx_packets++;
+ ((struct net_local *)netdev_priv(nl->master))->stats.tx_bytes
+= nl->tx_buf_p->len;
#else
nl->stats.tx_packets++;
@@ -775,7 +779,7 @@ send_complete( struct net_local *nl )
static void
interpret_ack( struct net_device *dev, unsigned ack )
{
- struct net_local *nl = (struct net_local *) dev->priv;
+ struct net_local *nl = netdev_priv(dev);
if( ack == FRAME_SENT_OK ) {
nl->state &= ~FL_NEED_RESEND;
@@ -809,7 +813,7 @@ interpret_ack( struct net_device *dev, unsigned ack )
static int
append_frame_to_pkt( struct net_device *dev, unsigned framelen, u32 crc )
{
- struct net_local *nl = (struct net_local *) dev->priv;
+ struct net_local *nl = netdev_priv(dev);
u8 *p;
@@ -840,7 +844,7 @@ append_frame_to_pkt( struct net_device *dev, unsigned framelen, u32 crc )
static void
prepare_to_send( struct sk_buff *skb, struct net_device *dev )
{
- struct net_local *nl = (struct net_local *) dev->priv;
+ struct net_local *nl = netdev_priv(dev);
unsigned int len;
@@ -871,15 +875,15 @@ prepare_to_send( struct sk_buff *skb, struct net_device *dev )
static void
drop_xmit_queue( struct net_device *dev )
{
- struct net_local *nl = (struct net_local *) dev->priv;
+ struct net_local *nl = netdev_priv(dev);
if( nl->tx_buf_p )
dev_kfree_skb_any( nl->tx_buf_p ),
nl->tx_buf_p = NULL,
#ifdef CONFIG_SBNI_MULTILINE
- ((struct net_local *) nl->master->priv)
+ ((struct net_local *)netdev_priv(nl->master))
->stats.tx_errors++,
- ((struct net_local *) nl->master->priv)
+ ((struct net_local *)netdev_priv(nl->master))
->stats.tx_carrier_errors++;
#else
nl->stats.tx_errors++,
@@ -903,7 +907,7 @@ drop_xmit_queue( struct net_device *dev )
static void
send_frame_header( struct net_device *dev, u32 *crc_p )
{
- struct net_local *nl = (struct net_local *) dev->priv;
+ struct net_local *nl = netdev_priv(dev);
u32 crc = *crc_p;
u32 len_field = nl->framelen + 6; /* CRC + frameno + reserved */
@@ -1005,7 +1009,7 @@ get_rx_buf( struct net_device *dev )
static void
indicate_pkt( struct net_device *dev )
{
- struct net_local *nl = (struct net_local *) dev->priv;
+ struct net_local *nl = netdev_priv(dev);
struct sk_buff *skb = nl->rx_buf_p;
skb_put( skb, nl->inppos );
@@ -1013,13 +1017,12 @@ indicate_pkt( struct net_device *dev )
#ifdef CONFIG_SBNI_MULTILINE
skb->protocol = eth_type_trans( skb, nl->master );
netif_rx( skb );
- dev->last_rx = jiffies;
- ++((struct net_local *) nl->master->priv)->stats.rx_packets;
- ((struct net_local *) nl->master->priv)->stats.rx_bytes += nl->inppos;
+ ++((struct net_local *)netdev_priv(nl->master))->stats.rx_packets;
+ ((struct net_local *)netdev_priv(nl->master))->stats.rx_bytes +=
+ nl->inppos;
#else
skb->protocol = eth_type_trans( skb, dev );
netif_rx( skb );
- dev->last_rx = jiffies;
++nl->stats.rx_packets;
nl->stats.rx_bytes += nl->inppos;
#endif
@@ -1038,7 +1041,7 @@ static void
sbni_watchdog( unsigned long arg )
{
struct net_device *dev = (struct net_device *) arg;
- struct net_local *nl = (struct net_local *) dev->priv;
+ struct net_local *nl = netdev_priv(dev);
struct timer_list *w = &nl->watchdog;
unsigned long flags;
unsigned char csr0;
@@ -1091,7 +1094,7 @@ static unsigned char timeout_rxl_tab[] = {
static void
card_start( struct net_device *dev )
{
- struct net_local *nl = (struct net_local *) dev->priv;
+ struct net_local *nl = netdev_priv(dev);
nl->timer_ticks = CHANGE_LEVEL_START_TICKS;
nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
@@ -1113,7 +1116,7 @@ card_start( struct net_device *dev )
static void
change_level( struct net_device *dev )
{
- struct net_local *nl = (struct net_local *) dev->priv;
+ struct net_local *nl = netdev_priv(dev);
if( nl->delta_rxl == 0 ) /* do not auto-negotiate RxL */
return;
@@ -1137,7 +1140,7 @@ change_level( struct net_device *dev )
static void
timeout_change_level( struct net_device *dev )
{
- struct net_local *nl = (struct net_local *) dev->priv;
+ struct net_local *nl = netdev_priv(dev);
nl->cur_rxl_index = timeout_rxl_tab[ nl->timeout_rxl ];
if( ++nl->timeout_rxl >= 4 )
@@ -1160,7 +1163,7 @@ timeout_change_level( struct net_device *dev )
static int
sbni_open( struct net_device *dev )
{
- struct net_local *nl = (struct net_local *) dev->priv;
+ struct net_local *nl = netdev_priv(dev);
struct timer_list *w = &nl->watchdog;
/*
@@ -1176,7 +1179,7 @@ sbni_open( struct net_device *dev )
|| (*p)->base_addr == dev->base_addr - 4)
&& (*p)->flags & IFF_UP ) {
- ((struct net_local *) ((*p)->priv))
+ ((struct net_local *) (netdev_priv(*p)))
->second = dev;
printk( KERN_NOTICE "%s: using shared irq "
"with %s\n", dev->name, (*p)->name );
@@ -1216,7 +1219,7 @@ handler_attached:
static int
sbni_close( struct net_device *dev )
{
- struct net_local *nl = (struct net_local *) dev->priv;
+ struct net_local *nl = netdev_priv(dev);
if( nl->second && nl->second->flags & IFF_UP ) {
printk( KERN_NOTICE "Secondary channel (%s) is active!\n",
@@ -1300,7 +1303,7 @@ sbni_card_probe( unsigned long ioaddr )
static int
sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd )
{
- struct net_local *nl = (struct net_local *) dev->priv;
+ struct net_local *nl = netdev_priv(dev);
struct sbni_flags flags;
int error = 0;
@@ -1390,8 +1393,8 @@ sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd )
static int
enslave( struct net_device *dev, struct net_device *slave_dev )
{
- struct net_local *nl = (struct net_local *) dev->priv;
- struct net_local *snl = (struct net_local *) slave_dev->priv;
+ struct net_local *nl = netdev_priv(dev);
+ struct net_local *snl = netdev_priv(slave_dev);
if( nl->state & FL_SLAVE ) /* This isn't master or free device */
return -EBUSY;
@@ -1425,9 +1428,9 @@ enslave( struct net_device *dev, struct net_device *slave_dev )
static int
emancipate( struct net_device *dev )
{
- struct net_local *snl = (struct net_local *) dev->priv;
+ struct net_local *snl = netdev_priv(dev);
struct net_device *p = snl->master;
- struct net_local *nl = (struct net_local *) p->priv;
+ struct net_local *nl = netdev_priv(p);
if( !(snl->state & FL_SLAVE) )
return -EINVAL;
@@ -1438,7 +1441,7 @@ emancipate( struct net_device *dev )
/* exclude from list */
for(;;) { /* must be in list */
- struct net_local *t = (struct net_local *) p->priv;
+ struct net_local *t = netdev_priv(p);
if( t->link == dev ) {
t->link = snl->link;
break;
@@ -1465,7 +1468,7 @@ emancipate( struct net_device *dev )
static struct net_device_stats *
sbni_get_stats( struct net_device *dev )
{
- return &((struct net_local *) dev->priv)->stats;
+ return &((struct net_local *)netdev_priv(dev))->stats;
}
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index 73e2f278093..6a07ba9371d 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -185,7 +185,7 @@ static void sdla_stop(struct net_device *dev)
{
struct frad_local *flp;
- flp = dev->priv;
+ flp = netdev_priv(dev);
switch(flp->type)
{
case SDLA_S502A:
@@ -212,7 +212,7 @@ static void sdla_start(struct net_device *dev)
{
struct frad_local *flp;
- flp = dev->priv;
+ flp = netdev_priv(dev);
switch(flp->type)
{
case SDLA_S502A:
@@ -432,7 +432,7 @@ static int sdla_cmd(struct net_device *dev, int cmd, short dlci, short flags,
int ret, waiting, len;
long window;
- flp = dev->priv;
+ flp = netdev_priv(dev);
window = flp->type == SDLA_S508 ? SDLA_508_CMD_BUF : SDLA_502_CMD_BUF;
cmd_buf = (struct sdla_cmd *)(dev->mem_start + (window & SDLA_ADDR_MASK));
ret = 0;
@@ -509,7 +509,7 @@ static int sdla_activate(struct net_device *slave, struct net_device *master)
struct frad_local *flp;
int i;
- flp = slave->priv;
+ flp = netdev_priv(slave);
for(i=0;i<CONFIG_DLCI_MAX;i++)
if (flp->master[i] == master)
@@ -531,7 +531,7 @@ static int sdla_deactivate(struct net_device *slave, struct net_device *master)
struct frad_local *flp;
int i;
- flp = slave->priv;
+ flp = netdev_priv(slave);
for(i=0;i<CONFIG_DLCI_MAX;i++)
if (flp->master[i] == master)
@@ -556,7 +556,7 @@ static int sdla_assoc(struct net_device *slave, struct net_device *master)
if (master->type != ARPHRD_DLCI)
return(-EINVAL);
- flp = slave->priv;
+ flp = netdev_priv(slave);
for(i=0;i<CONFIG_DLCI_MAX;i++)
{
@@ -589,7 +589,7 @@ static int sdla_deassoc(struct net_device *slave, struct net_device *master)
struct frad_local *flp;
int i;
- flp = slave->priv;
+ flp = netdev_priv(slave);
for(i=0;i<CONFIG_DLCI_MAX;i++)
if (flp->master[i] == master)
@@ -619,7 +619,7 @@ static int sdla_dlci_conf(struct net_device *slave, struct net_device *master, i
int i;
short len, ret;
- flp = slave->priv;
+ flp = netdev_priv(slave);
for(i=0;i<CONFIG_DLCI_MAX;i++)
if (flp->master[i] == master)
@@ -628,7 +628,7 @@ static int sdla_dlci_conf(struct net_device *slave, struct net_device *master, i
if (i == CONFIG_DLCI_MAX)
return(-ENODEV);
- dlp = master->priv;
+ dlp = netdev_priv(master);
ret = SDLA_RET_OK;
len = sizeof(struct dlci_conf);
@@ -659,7 +659,7 @@ static int sdla_transmit(struct sk_buff *skb, struct net_device *dev)
unsigned long flags;
struct buf_entry *pbuf;
- flp = dev->priv;
+ flp = netdev_priv(dev);
ret = 0;
accept = 1;
@@ -755,7 +755,7 @@ static void sdla_receive(struct net_device *dev)
int i=0, received, success, addr, buf_base, buf_top;
short dlci, len, len2, split;
- flp = dev->priv;
+ flp = netdev_priv(dev);
success = 1;
received = addr = buf_top = buf_base = 0;
len = dlci = 0;
@@ -860,7 +860,7 @@ static void sdla_receive(struct net_device *dev)
if (success)
{
flp->stats.rx_packets++;
- dlp = master->priv;
+ dlp = netdev_priv(master);
(*dlp->receive)(skb, master);
}
@@ -925,7 +925,7 @@ static void sdla_poll(unsigned long device)
struct frad_local *flp;
dev = (struct net_device *) device;
- flp = dev->priv;
+ flp = netdev_priv(dev);
if (sdla_byte(dev, SDLA_502_RCV_BUF))
sdla_receive(dev);
@@ -941,7 +941,7 @@ static int sdla_close(struct net_device *dev)
int len, i;
short dlcis[CONFIG_DLCI_MAX];
- flp = dev->priv;
+ flp = netdev_priv(dev);
len = 0;
for(i=0;i<CONFIG_DLCI_MAX;i++)
@@ -1002,7 +1002,7 @@ static int sdla_open(struct net_device *dev)
int len, i;
char byte;
- flp = dev->priv;
+ flp = netdev_priv(dev);
if (!flp->initialized)
return(-EPERM);
@@ -1079,7 +1079,7 @@ static int sdla_open(struct net_device *dev)
for(i=0;i<CONFIG_DLCI_MAX;i++)
if (flp->dlci[i])
{
- dlp = flp->master[i]->priv;
+ dlp = netdev_priv(flp->master[i]);
if (dlp->configured)
sdla_cmd(dev, SDLA_SET_DLCI_CONFIGURATION, abs(flp->dlci[i]), 0, &dlp->config, sizeof(struct dlci_conf), NULL, NULL);
}
@@ -1099,7 +1099,7 @@ static int sdla_config(struct net_device *dev, struct frad_conf __user *conf, in
if (dev->type == 0xFFFF)
return(-EUNATCH);
- flp = dev->priv;
+ flp = netdev_priv(dev);
if (!get)
{
@@ -1230,7 +1230,7 @@ static int sdla_reconfig(struct net_device *dev)
struct conf_data data;
int i, len;
- flp = dev->priv;
+ flp = netdev_priv(dev);
len = 0;
for(i=0;i<CONFIG_DLCI_MAX;i++)
@@ -1255,7 +1255,7 @@ static int sdla_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if(!capable(CAP_NET_ADMIN))
return -EPERM;
- flp = dev->priv;
+ flp = netdev_priv(dev);
if (!flp->initialized)
return(-EINVAL);
@@ -1321,7 +1321,7 @@ static int sdla_change_mtu(struct net_device *dev, int new_mtu)
{
struct frad_local *flp;
- flp = dev->priv;
+ flp = netdev_priv(dev);
if (netif_running(dev))
return(-EBUSY);
@@ -1338,7 +1338,7 @@ static int sdla_set_config(struct net_device *dev, struct ifmap *map)
unsigned base;
int err = -EINVAL;
- flp = dev->priv;
+ flp = netdev_priv(dev);
if (flp->initialized)
return(-EINVAL);
@@ -1593,14 +1593,14 @@ fail:
static struct net_device_stats *sdla_stats(struct net_device *dev)
{
struct frad_local *flp;
- flp = dev->priv;
+ flp = netdev_priv(dev);
return(&flp->stats);
}
static void setup_sdla(struct net_device *dev)
{
- struct frad_local *flp = dev->priv;
+ struct frad_local *flp = netdev_priv(dev);
netdev_boot_setup_check(dev);
@@ -1651,7 +1651,7 @@ static int __init init_sdla(void)
static void __exit exit_sdla(void)
{
- struct frad_local *flp = sdla->priv;
+ struct frad_local *flp = netdev_priv(sdla);
unregister_netdev(sdla);
if (flp->initialized) {
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
index c0235844a4d..0941a26f6e3 100644
--- a/drivers/net/wan/sealevel.c
+++ b/drivers/net/wan/sealevel.c
@@ -68,7 +68,6 @@ static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb)
skb_reset_mac_header(skb);
skb->dev = c->netdevice;
netif_rx(skb);
- c->netdevice->last_rx = jiffies;
}
/*
diff --git a/drivers/net/wan/syncppp.c b/drivers/net/wan/syncppp.c
deleted file mode 100644
index 6e92f7b44b1..00000000000
--- a/drivers/net/wan/syncppp.c
+++ /dev/null
@@ -1,1480 +0,0 @@
-/*
- * NET3: A (fairly minimal) implementation of synchronous PPP for Linux
- * as well as a CISCO HDLC implementation. See the copyright
- * message below for the original source.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the license, or (at your option) any later version.
- *
- * Note however. This code is also used in a different form by FreeBSD.
- * Therefore when making any non OS specific change please consider
- * contributing it back to the original author under the terms
- * below in addition.
- * -- Alan
- *
- * Port for Linux-2.1 by Jan "Yenya" Kasprzak <kas@fi.muni.cz>
- */
-
-/*
- * Synchronous PPP/Cisco link level subroutines.
- * Keepalive protocol implemented in both Cisco and PPP modes.
- *
- * Copyright (C) 1994 Cronyx Ltd.
- * Author: Serge Vakulenko, <vak@zebub.msk.su>
- *
- * This software is distributed with NO WARRANTIES, not even the implied
- * warranties for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
- * Authors grant any other persons or organisations permission to use
- * or modify this software as long as this message is kept with the software,
- * all derivative works or modified versions.
- *
- * Version 1.9, Wed Oct 4 18:58:15 MSK 1995
- *
- * $Id: syncppp.c,v 1.18 2000/04/11 05:25:31 asj Exp $
- */
-#undef DEBUG
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/if_arp.h>
-#include <linux/skbuff.h>
-#include <linux/route.h>
-#include <linux/netdevice.h>
-#include <linux/inetdevice.h>
-#include <linux/random.h>
-#include <linux/pkt_sched.h>
-#include <linux/spinlock.h>
-#include <linux/rcupdate.h>
-
-#include <net/net_namespace.h>
-#include <net/syncppp.h>
-
-#include <asm/byteorder.h>
-#include <asm/uaccess.h>
-
-#define MAXALIVECNT 6 /* max. alive packets */
-
-#define PPP_ALLSTATIONS 0xff /* All-Stations broadcast address */
-#define PPP_UI 0x03 /* Unnumbered Information */
-#define PPP_IP 0x0021 /* Internet Protocol */
-#define PPP_ISO 0x0023 /* ISO OSI Protocol */
-#define PPP_XNS 0x0025 /* Xerox NS Protocol */
-#define PPP_IPX 0x002b /* Novell IPX Protocol */
-#define PPP_LCP 0xc021 /* Link Control Protocol */
-#define PPP_IPCP 0x8021 /* Internet Protocol Control Protocol */
-
-#define LCP_CONF_REQ 1 /* PPP LCP configure request */
-#define LCP_CONF_ACK 2 /* PPP LCP configure acknowledge */
-#define LCP_CONF_NAK 3 /* PPP LCP configure negative ack */
-#define LCP_CONF_REJ 4 /* PPP LCP configure reject */
-#define LCP_TERM_REQ 5 /* PPP LCP terminate request */
-#define LCP_TERM_ACK 6 /* PPP LCP terminate acknowledge */
-#define LCP_CODE_REJ 7 /* PPP LCP code reject */
-#define LCP_PROTO_REJ 8 /* PPP LCP protocol reject */
-#define LCP_ECHO_REQ 9 /* PPP LCP echo request */
-#define LCP_ECHO_REPLY 10 /* PPP LCP echo reply */
-#define LCP_DISC_REQ 11 /* PPP LCP discard request */
-
-#define LCP_OPT_MRU 1 /* maximum receive unit */
-#define LCP_OPT_ASYNC_MAP 2 /* async control character map */
-#define LCP_OPT_AUTH_PROTO 3 /* authentication protocol */
-#define LCP_OPT_QUAL_PROTO 4 /* quality protocol */
-#define LCP_OPT_MAGIC 5 /* magic number */
-#define LCP_OPT_RESERVED 6 /* reserved */
-#define LCP_OPT_PROTO_COMP 7 /* protocol field compression */
-#define LCP_OPT_ADDR_COMP 8 /* address/control field compression */
-
-#define IPCP_CONF_REQ LCP_CONF_REQ /* PPP IPCP configure request */
-#define IPCP_CONF_ACK LCP_CONF_ACK /* PPP IPCP configure acknowledge */
-#define IPCP_CONF_NAK LCP_CONF_NAK /* PPP IPCP configure negative ack */
-#define IPCP_CONF_REJ LCP_CONF_REJ /* PPP IPCP configure reject */
-#define IPCP_TERM_REQ LCP_TERM_REQ /* PPP IPCP terminate request */
-#define IPCP_TERM_ACK LCP_TERM_ACK /* PPP IPCP terminate acknowledge */
-#define IPCP_CODE_REJ LCP_CODE_REJ /* PPP IPCP code reject */
-
-#define CISCO_MULTICAST 0x8f /* Cisco multicast address */
-#define CISCO_UNICAST 0x0f /* Cisco unicast address */
-#define CISCO_KEEPALIVE 0x8035 /* Cisco keepalive protocol */
-#define CISCO_ADDR_REQ 0 /* Cisco address request */
-#define CISCO_ADDR_REPLY 1 /* Cisco address reply */
-#define CISCO_KEEPALIVE_REQ 2 /* Cisco keepalive request */
-
-struct ppp_header {
- u8 address;
- u8 control;
- __be16 protocol;
-};
-#define PPP_HEADER_LEN sizeof (struct ppp_header)
-
-struct lcp_header {
- u8 type;
- u8 ident;
- __be16 len;
-};
-#define LCP_HEADER_LEN sizeof (struct lcp_header)
-
-struct cisco_packet {
- __be32 type;
- __be32 par1;
- __be32 par2;
- __be16 rel;
- __be16 time0;
- __be16 time1;
-};
-#define CISCO_PACKET_LEN 18
-#define CISCO_BIG_PACKET_LEN 20
-
-static struct sppp *spppq;
-static struct timer_list sppp_keepalive_timer;
-static DEFINE_SPINLOCK(spppq_lock);
-
-/* global xmit queue for sending packets while spinlock is held */
-static struct sk_buff_head tx_queue;
-
-static void sppp_keepalive (unsigned long dummy);
-static void sppp_cp_send (struct sppp *sp, u16 proto, u8 type,
- u8 ident, u16 len, void *data);
-static void sppp_cisco_send (struct sppp *sp, int type, u32 par1, u32 par2);
-static void sppp_lcp_input (struct sppp *sp, struct sk_buff *m);
-static void sppp_cisco_input (struct sppp *sp, struct sk_buff *m);
-static void sppp_ipcp_input (struct sppp *sp, struct sk_buff *m);
-static void sppp_lcp_open (struct sppp *sp);
-static void sppp_ipcp_open (struct sppp *sp);
-static int sppp_lcp_conf_parse_options (struct sppp *sp, struct lcp_header *h,
- int len, u32 *magic);
-static void sppp_cp_timeout (unsigned long arg);
-static char *sppp_lcp_type_name (u8 type);
-static char *sppp_ipcp_type_name (u8 type);
-static void sppp_print_bytes (u8 *p, u16 len);
-
-static int debug;
-
-/* Flush global outgoing packet queue to dev_queue_xmit().
- *
- * dev_queue_xmit() must be called with interrupts enabled
- * which means it can't be called with spinlocks held.
- * If a packet needs to be sent while a spinlock is held,
- * then put the packet into tx_queue, and call sppp_flush_xmit()
- * after spinlock is released.
- */
-static void sppp_flush_xmit(void)
-{
- struct sk_buff *skb;
- while ((skb = skb_dequeue(&tx_queue)) != NULL)
- dev_queue_xmit(skb);
-}
-
-/*
- * Interface down stub
- */
-
-static void if_down(struct net_device *dev)
-{
- struct sppp *sp = (struct sppp *)sppp_of(dev);
-
- sp->pp_link_state=SPPP_LINK_DOWN;
-}
-
-/*
- * Timeout routine activations.
- */
-
-static void sppp_set_timeout(struct sppp *p,int s)
-{
- if (! (p->pp_flags & PP_TIMO))
- {
- init_timer(&p->pp_timer);
- p->pp_timer.function=sppp_cp_timeout;
- p->pp_timer.expires=jiffies+s*HZ;
- p->pp_timer.data=(unsigned long)p;
- p->pp_flags |= PP_TIMO;
- add_timer(&p->pp_timer);
- }
-}
-
-static void sppp_clear_timeout(struct sppp *p)
-{
- if (p->pp_flags & PP_TIMO)
- {
- del_timer(&p->pp_timer);
- p->pp_flags &= ~PP_TIMO;
- }
-}
-
-/**
- * sppp_input - receive and process a WAN PPP frame
- * @skb: The buffer to process
- * @dev: The device it arrived on
- *
- * This can be called directly by cards that do not have
- * timing constraints but is normally called from the network layer
- * after interrupt servicing to process frames queued via netif_rx().
- *
- * We process the options in the card. If the frame is destined for
- * the protocol stacks then it requeues the frame for the upper level
- * protocol. If it is a control from it is processed and discarded
- * here.
- */
-
-static void sppp_input (struct net_device *dev, struct sk_buff *skb)
-{
- struct ppp_header *h;
- struct sppp *sp = (struct sppp *)sppp_of(dev);
- unsigned long flags;
-
- skb->dev=dev;
- skb_reset_mac_header(skb);
-
- if (!pskb_may_pull(skb, PPP_HEADER_LEN)) {
- /* Too small packet, drop it. */
- if (sp->pp_flags & PP_DEBUG)
- printk (KERN_DEBUG "%s: input packet is too small, %d bytes\n",
- dev->name, skb->len);
- kfree_skb(skb);
- return;
- }
-
- /* Get PPP header. */
- h = (struct ppp_header *)skb->data;
- skb_pull(skb,sizeof(struct ppp_header));
-
- spin_lock_irqsave(&sp->lock, flags);
-
- switch (h->address) {
- default: /* Invalid PPP packet. */
- goto invalid;
- case PPP_ALLSTATIONS:
- if (h->control != PPP_UI)
- goto invalid;
- if (sp->pp_flags & PP_CISCO) {
- if (sp->pp_flags & PP_DEBUG)
- printk (KERN_WARNING "%s: PPP packet in Cisco mode <0x%x 0x%x 0x%x>\n",
- dev->name,
- h->address, h->control, ntohs (h->protocol));
- goto drop;
- }
- switch (ntohs (h->protocol)) {
- default:
- if (sp->lcp.state == LCP_STATE_OPENED)
- sppp_cp_send (sp, PPP_LCP, LCP_PROTO_REJ,
- ++sp->pp_seq, skb->len + 2,
- &h->protocol);
- if (sp->pp_flags & PP_DEBUG)
- printk (KERN_WARNING "%s: invalid input protocol <0x%x 0x%x 0x%x>\n",
- dev->name,
- h->address, h->control, ntohs (h->protocol));
- goto drop;
- case PPP_LCP:
- sppp_lcp_input (sp, skb);
- goto drop;
- case PPP_IPCP:
- if (sp->lcp.state == LCP_STATE_OPENED)
- sppp_ipcp_input (sp, skb);
- else
- printk(KERN_DEBUG "IPCP when still waiting LCP finish.\n");
- goto drop;
- case PPP_IP:
- if (sp->ipcp.state == IPCP_STATE_OPENED) {
- if(sp->pp_flags&PP_DEBUG)
- printk(KERN_DEBUG "Yow an IP frame.\n");
- skb->protocol=htons(ETH_P_IP);
- netif_rx(skb);
- dev->last_rx = jiffies;
- goto done;
- }
- break;
-#ifdef IPX
- case PPP_IPX:
- /* IPX IPXCP not implemented yet */
- if (sp->lcp.state == LCP_STATE_OPENED) {
- skb->protocol=htons(ETH_P_IPX);
- netif_rx(skb);
- dev->last_rx = jiffies;
- goto done;
- }
- break;
-#endif
- }
- break;
- case CISCO_MULTICAST:
- case CISCO_UNICAST:
- /* Don't check the control field here (RFC 1547). */
- if (! (sp->pp_flags & PP_CISCO)) {
- if (sp->pp_flags & PP_DEBUG)
- printk (KERN_WARNING "%s: Cisco packet in PPP mode <0x%x 0x%x 0x%x>\n",
- dev->name,
- h->address, h->control, ntohs (h->protocol));
- goto drop;
- }
- switch (ntohs (h->protocol)) {
- default:
- goto invalid;
- case CISCO_KEEPALIVE:
- sppp_cisco_input (sp, skb);
- goto drop;
-#ifdef CONFIG_INET
- case ETH_P_IP:
- skb->protocol=htons(ETH_P_IP);
- netif_rx(skb);
- dev->last_rx = jiffies;
- goto done;
-#endif
-#ifdef CONFIG_IPX
- case ETH_P_IPX:
- skb->protocol=htons(ETH_P_IPX);
- netif_rx(skb);
- dev->last_rx = jiffies;
- goto done;
-#endif
- }
- break;
- }
- goto drop;
-
-invalid:
- if (sp->pp_flags & PP_DEBUG)
- printk (KERN_WARNING "%s: invalid input packet <0x%x 0x%x 0x%x>\n",
- dev->name, h->address, h->control, ntohs (h->protocol));
-drop:
- kfree_skb(skb);
-done:
- spin_unlock_irqrestore(&sp->lock, flags);
- sppp_flush_xmit();
- return;
-}
-
-/*
- * Handle transmit packets.
- */
-
-static int sppp_hard_header(struct sk_buff *skb,
- struct net_device *dev, __u16 type,
- const void *daddr, const void *saddr,
- unsigned int len)
-{
- struct sppp *sp = (struct sppp *)sppp_of(dev);
- struct ppp_header *h;
- skb_push(skb,sizeof(struct ppp_header));
- h=(struct ppp_header *)skb->data;
- if(sp->pp_flags&PP_CISCO)
- {
- h->address = CISCO_UNICAST;
- h->control = 0;
- }
- else
- {
- h->address = PPP_ALLSTATIONS;
- h->control = PPP_UI;
- }
- if(sp->pp_flags & PP_CISCO)
- {
- h->protocol = htons(type);
- }
- else switch(type)
- {
- case ETH_P_IP:
- h->protocol = htons(PPP_IP);
- break;
- case ETH_P_IPX:
- h->protocol = htons(PPP_IPX);
- break;
- }
- return sizeof(struct ppp_header);
-}
-
-static const struct header_ops sppp_header_ops = {
- .create = sppp_hard_header,
-};
-
-/*
- * Send keepalive packets, every 10 seconds.
- */
-
-static void sppp_keepalive (unsigned long dummy)
-{
- struct sppp *sp;
- unsigned long flags;
-
- spin_lock_irqsave(&spppq_lock, flags);
-
- for (sp=spppq; sp; sp=sp->pp_next)
- {
- struct net_device *dev = sp->pp_if;
-
- /* Keepalive mode disabled or channel down? */
- if (! (sp->pp_flags & PP_KEEPALIVE) ||
- ! (dev->flags & IFF_UP))
- continue;
-
- spin_lock(&sp->lock);
-
- /* No keepalive in PPP mode if LCP not opened yet. */
- if (! (sp->pp_flags & PP_CISCO) &&
- sp->lcp.state != LCP_STATE_OPENED) {
- spin_unlock(&sp->lock);
- continue;
- }
-
- if (sp->pp_alivecnt == MAXALIVECNT) {
- /* No keepalive packets got. Stop the interface. */
- printk (KERN_WARNING "%s: protocol down\n", dev->name);
- if_down (dev);
- if (! (sp->pp_flags & PP_CISCO)) {
- /* Shut down the PPP link. */
- sp->lcp.magic = jiffies;
- sp->lcp.state = LCP_STATE_CLOSED;
- sp->ipcp.state = IPCP_STATE_CLOSED;
- sppp_clear_timeout (sp);
- /* Initiate negotiation. */
- sppp_lcp_open (sp);
- }
- }
- if (sp->pp_alivecnt <= MAXALIVECNT)
- ++sp->pp_alivecnt;
- if (sp->pp_flags & PP_CISCO)
- sppp_cisco_send (sp, CISCO_KEEPALIVE_REQ, ++sp->pp_seq,
- sp->pp_rseq);
- else if (sp->lcp.state == LCP_STATE_OPENED) {
- __be32 nmagic = htonl (sp->lcp.magic);
- sp->lcp.echoid = ++sp->pp_seq;
- sppp_cp_send (sp, PPP_LCP, LCP_ECHO_REQ,
- sp->lcp.echoid, 4, &nmagic);
- }
-
- spin_unlock(&sp->lock);
- }
- spin_unlock_irqrestore(&spppq_lock, flags);
- sppp_flush_xmit();
- sppp_keepalive_timer.expires=jiffies+10*HZ;
- add_timer(&sppp_keepalive_timer);
-}
-
-/*
- * Handle incoming PPP Link Control Protocol packets.
- */
-
-static void sppp_lcp_input (struct sppp *sp, struct sk_buff *skb)
-{
- struct lcp_header *h;
- struct net_device *dev = sp->pp_if;
- int len = skb->len;
- u8 *p, opt[6];
- u32 rmagic = 0;
-
- if (!pskb_may_pull(skb, sizeof(struct lcp_header))) {
- if (sp->pp_flags & PP_DEBUG)
- printk (KERN_WARNING "%s: invalid lcp packet length: %d bytes\n",
- dev->name, len);
- return;
- }
- h = (struct lcp_header *)skb->data;
- skb_pull(skb,sizeof(struct lcp_header *));
-
- if (sp->pp_flags & PP_DEBUG)
- {
- char state = '?';
- switch (sp->lcp.state) {
- case LCP_STATE_CLOSED: state = 'C'; break;
- case LCP_STATE_ACK_RCVD: state = 'R'; break;
- case LCP_STATE_ACK_SENT: state = 'S'; break;
- case LCP_STATE_OPENED: state = 'O'; break;
- }
- printk (KERN_WARNING "%s: lcp input(%c): %d bytes <%s id=%xh len=%xh",
- dev->name, state, len,
- sppp_lcp_type_name (h->type), h->ident, ntohs (h->len));
- if (len > 4)
- sppp_print_bytes ((u8*) (h+1), len-4);
- printk (">\n");
- }
- if (len > ntohs (h->len))
- len = ntohs (h->len);
- switch (h->type) {
- default:
- /* Unknown packet type -- send Code-Reject packet. */
- sppp_cp_send (sp, PPP_LCP, LCP_CODE_REJ, ++sp->pp_seq,
- skb->len, h);
- break;
- case LCP_CONF_REQ:
- if (len < 4) {
- if (sp->pp_flags & PP_DEBUG)
- printk (KERN_DEBUG"%s: invalid lcp configure request packet length: %d bytes\n",
- dev->name, len);
- break;
- }
- if (len>4 && !sppp_lcp_conf_parse_options (sp, h, len, &rmagic))
- goto badreq;
- if (rmagic == sp->lcp.magic) {
- /* Local and remote magics equal -- loopback? */
- if (sp->pp_loopcnt >= MAXALIVECNT*5) {
- printk (KERN_WARNING "%s: loopback\n",
- dev->name);
- sp->pp_loopcnt = 0;
- if (dev->flags & IFF_UP) {
- if_down (dev);
- }
- } else if (sp->pp_flags & PP_DEBUG)
- printk (KERN_DEBUG "%s: conf req: magic glitch\n",
- dev->name);
- ++sp->pp_loopcnt;
-
- /* MUST send Conf-Nack packet. */
- rmagic = ~sp->lcp.magic;
- opt[0] = LCP_OPT_MAGIC;
- opt[1] = sizeof (opt);
- opt[2] = rmagic >> 24;
- opt[3] = rmagic >> 16;
- opt[4] = rmagic >> 8;
- opt[5] = rmagic;
- sppp_cp_send (sp, PPP_LCP, LCP_CONF_NAK,
- h->ident, sizeof (opt), &opt);
-badreq:
- switch (sp->lcp.state) {
- case LCP_STATE_OPENED:
- /* Initiate renegotiation. */
- sppp_lcp_open (sp);
- /* fall through... */
- case LCP_STATE_ACK_SENT:
- /* Go to closed state. */
- sp->lcp.state = LCP_STATE_CLOSED;
- sp->ipcp.state = IPCP_STATE_CLOSED;
- }
- break;
- }
- /* Send Configure-Ack packet. */
- sp->pp_loopcnt = 0;
- if (sp->lcp.state != LCP_STATE_OPENED) {
- sppp_cp_send (sp, PPP_LCP, LCP_CONF_ACK,
- h->ident, len-4, h+1);
- }
- /* Change the state. */
- switch (sp->lcp.state) {
- case LCP_STATE_CLOSED:
- sp->lcp.state = LCP_STATE_ACK_SENT;
- break;
- case LCP_STATE_ACK_RCVD:
- sp->lcp.state = LCP_STATE_OPENED;
- sppp_ipcp_open (sp);
- break;
- case LCP_STATE_OPENED:
- /* Remote magic changed -- close session. */
- sp->lcp.state = LCP_STATE_CLOSED;
- sp->ipcp.state = IPCP_STATE_CLOSED;
- /* Initiate renegotiation. */
- sppp_lcp_open (sp);
- /* Send ACK after our REQ in attempt to break loop */
- sppp_cp_send (sp, PPP_LCP, LCP_CONF_ACK,
- h->ident, len-4, h+1);
- sp->lcp.state = LCP_STATE_ACK_SENT;
- break;
- }
- break;
- case LCP_CONF_ACK:
- if (h->ident != sp->lcp.confid)
- break;
- sppp_clear_timeout (sp);
- if ((sp->pp_link_state != SPPP_LINK_UP) &&
- (dev->flags & IFF_UP)) {
- /* Coming out of loopback mode. */
- sp->pp_link_state=SPPP_LINK_UP;
- printk (KERN_INFO "%s: protocol up\n", dev->name);
- }
- switch (sp->lcp.state) {
- case LCP_STATE_CLOSED:
- sp->lcp.state = LCP_STATE_ACK_RCVD;
- sppp_set_timeout (sp, 5);
- break;
- case LCP_STATE_ACK_SENT:
- sp->lcp.state = LCP_STATE_OPENED;
- sppp_ipcp_open (sp);
- break;
- }
- break;
- case LCP_CONF_NAK:
- if (h->ident != sp->lcp.confid)
- break;
- p = (u8*) (h+1);
- if (len>=10 && p[0] == LCP_OPT_MAGIC && p[1] >= 4) {
- rmagic = (u32)p[2] << 24 |
- (u32)p[3] << 16 | p[4] << 8 | p[5];
- if (rmagic == ~sp->lcp.magic) {
- int newmagic;
- if (sp->pp_flags & PP_DEBUG)
- printk (KERN_DEBUG "%s: conf nak: magic glitch\n",
- dev->name);
- get_random_bytes(&newmagic, sizeof(newmagic));
- sp->lcp.magic += newmagic;
- } else
- sp->lcp.magic = rmagic;
- }
- if (sp->lcp.state != LCP_STATE_ACK_SENT) {
- /* Go to closed state. */
- sp->lcp.state = LCP_STATE_CLOSED;
- sp->ipcp.state = IPCP_STATE_CLOSED;
- }
- /* The link will be renegotiated after timeout,
- * to avoid endless req-nack loop. */
- sppp_clear_timeout (sp);
- sppp_set_timeout (sp, 2);
- break;
- case LCP_CONF_REJ:
- if (h->ident != sp->lcp.confid)
- break;
- sppp_clear_timeout (sp);
- /* Initiate renegotiation. */
- sppp_lcp_open (sp);
- if (sp->lcp.state != LCP_STATE_ACK_SENT) {
- /* Go to closed state. */
- sp->lcp.state = LCP_STATE_CLOSED;
- sp->ipcp.state = IPCP_STATE_CLOSED;
- }
- break;
- case LCP_TERM_REQ:
- sppp_clear_timeout (sp);
- /* Send Terminate-Ack packet. */
- sppp_cp_send (sp, PPP_LCP, LCP_TERM_ACK, h->ident, 0, NULL);
- /* Go to closed state. */
- sp->lcp.state = LCP_STATE_CLOSED;
- sp->ipcp.state = IPCP_STATE_CLOSED;
- /* Initiate renegotiation. */
- sppp_lcp_open (sp);
- break;
- case LCP_TERM_ACK:
- case LCP_CODE_REJ:
- case LCP_PROTO_REJ:
- /* Ignore for now. */
- break;
- case LCP_DISC_REQ:
- /* Discard the packet. */
- break;
- case LCP_ECHO_REQ:
- if (sp->lcp.state != LCP_STATE_OPENED)
- break;
- if (len < 8) {
- if (sp->pp_flags & PP_DEBUG)
- printk (KERN_WARNING "%s: invalid lcp echo request packet length: %d bytes\n",
- dev->name, len);
- break;
- }
- if (ntohl (*(__be32*)(h+1)) == sp->lcp.magic) {
- /* Line loopback mode detected. */
- printk (KERN_WARNING "%s: loopback\n", dev->name);
- if_down (dev);
-
- /* Shut down the PPP link. */
- sp->lcp.state = LCP_STATE_CLOSED;
- sp->ipcp.state = IPCP_STATE_CLOSED;
- sppp_clear_timeout (sp);
- /* Initiate negotiation. */
- sppp_lcp_open (sp);
- break;
- }
- *(__be32 *)(h+1) = htonl (sp->lcp.magic);
- sppp_cp_send (sp, PPP_LCP, LCP_ECHO_REPLY, h->ident, len-4, h+1);
- break;
- case LCP_ECHO_REPLY:
- if (h->ident != sp->lcp.echoid)
- break;
- if (len < 8) {
- if (sp->pp_flags & PP_DEBUG)
- printk (KERN_WARNING "%s: invalid lcp echo reply packet length: %d bytes\n",
- dev->name, len);
- break;
- }
- if (ntohl(*(__be32 *)(h+1)) != sp->lcp.magic)
- sp->pp_alivecnt = 0;
- break;
- }
-}
-
-/*
- * Handle incoming Cisco keepalive protocol packets.
- */
-
-static void sppp_cisco_input (struct sppp *sp, struct sk_buff *skb)
-{
- struct cisco_packet *h;
- struct net_device *dev = sp->pp_if;
-
- if (!pskb_may_pull(skb, sizeof(struct cisco_packet))
- || (skb->len != CISCO_PACKET_LEN
- && skb->len != CISCO_BIG_PACKET_LEN)) {
- if (sp->pp_flags & PP_DEBUG)
- printk (KERN_WARNING "%s: invalid cisco packet length: %d bytes\n",
- dev->name, skb->len);
- return;
- }
- h = (struct cisco_packet *)skb->data;
- skb_pull(skb, sizeof(struct cisco_packet*));
- if (sp->pp_flags & PP_DEBUG)
- printk (KERN_WARNING "%s: cisco input: %d bytes <%xh %xh %xh %xh %xh-%xh>\n",
- dev->name, skb->len,
- ntohl (h->type), h->par1, h->par2, h->rel,
- h->time0, h->time1);
- switch (ntohl (h->type)) {
- default:
- if (sp->pp_flags & PP_DEBUG)
- printk (KERN_WARNING "%s: unknown cisco packet type: 0x%x\n",
- dev->name, ntohl (h->type));
- break;
- case CISCO_ADDR_REPLY:
- /* Reply on address request, ignore */
- break;
- case CISCO_KEEPALIVE_REQ:
- sp->pp_alivecnt = 0;
- sp->pp_rseq = ntohl (h->par1);
- if (sp->pp_seq == sp->pp_rseq) {
- /* Local and remote sequence numbers are equal.
- * Probably, the line is in loopback mode. */
- int newseq;
- if (sp->pp_loopcnt >= MAXALIVECNT) {
- printk (KERN_WARNING "%s: loopback\n",
- dev->name);
- sp->pp_loopcnt = 0;
- if (dev->flags & IFF_UP) {
- if_down (dev);
- }
- }
- ++sp->pp_loopcnt;
-
- /* Generate new local sequence number */
- get_random_bytes(&newseq, sizeof(newseq));
- sp->pp_seq ^= newseq;
- break;
- }
- sp->pp_loopcnt = 0;
- if (sp->pp_link_state==SPPP_LINK_DOWN &&
- (dev->flags & IFF_UP)) {
- sp->pp_link_state=SPPP_LINK_UP;
- printk (KERN_INFO "%s: protocol up\n", dev->name);
- }
- break;
- case CISCO_ADDR_REQ:
- /* Stolen from net/ipv4/devinet.c -- SIOCGIFADDR ioctl */
- {
- __be32 addr = 0, mask = htonl(~0U); /* FIXME: is the mask correct? */
-#ifdef CONFIG_INET
- struct in_device *in_dev;
- struct in_ifaddr *ifa;
-
- rcu_read_lock();
- if ((in_dev = __in_dev_get_rcu(dev)) != NULL)
- {
- for (ifa=in_dev->ifa_list; ifa != NULL;
- ifa=ifa->ifa_next) {
- if (strcmp(dev->name, ifa->ifa_label) == 0)
- {
- addr = ifa->ifa_local;
- mask = ifa->ifa_mask;
- break;
- }
- }
- }
- rcu_read_unlock();
-#endif
- sppp_cisco_send (sp, CISCO_ADDR_REPLY, ntohl(addr), ntohl(mask));
- break;
- }
- }
-}
-
-
-/*
- * Send PPP LCP packet.
- */
-
-static void sppp_cp_send (struct sppp *sp, u16 proto, u8 type,
- u8 ident, u16 len, void *data)
-{
- struct ppp_header *h;
- struct lcp_header *lh;
- struct sk_buff *skb;
- struct net_device *dev = sp->pp_if;
-
- skb=alloc_skb(dev->hard_header_len+PPP_HEADER_LEN+LCP_HEADER_LEN+len,
- GFP_ATOMIC);
- if (skb==NULL)
- return;
-
- skb_reserve(skb,dev->hard_header_len);
-
- h = (struct ppp_header *)skb_put(skb, sizeof(struct ppp_header));
- h->address = PPP_ALLSTATIONS; /* broadcast address */
- h->control = PPP_UI; /* Unnumbered Info */
- h->protocol = htons (proto); /* Link Control Protocol */
-
- lh = (struct lcp_header *)skb_put(skb, sizeof(struct lcp_header));
- lh->type = type;
- lh->ident = ident;
- lh->len = htons (LCP_HEADER_LEN + len);
-
- if (len)
- memcpy(skb_put(skb,len),data, len);
-
- if (sp->pp_flags & PP_DEBUG) {
- printk (KERN_WARNING "%s: %s output <%s id=%xh len=%xh",
- dev->name,
- proto==PPP_LCP ? "lcp" : "ipcp",
- proto==PPP_LCP ? sppp_lcp_type_name (lh->type) :
- sppp_ipcp_type_name (lh->type), lh->ident,
- ntohs (lh->len));
- if (len)
- sppp_print_bytes ((u8*) (lh+1), len);
- printk (">\n");
- }
- /* Control is high priority so it doesn't get queued behind data */
- skb->priority=TC_PRIO_CONTROL;
- skb->dev = dev;
- skb_queue_tail(&tx_queue, skb);
-}
-
-/*
- * Send Cisco keepalive packet.
- */
-
-static void sppp_cisco_send (struct sppp *sp, int type, u32 par1, u32 par2)
-{
- struct ppp_header *h;
- struct cisco_packet *ch;
- struct sk_buff *skb;
- struct net_device *dev = sp->pp_if;
- u32 t = jiffies * 1000/HZ;
-
- skb=alloc_skb(dev->hard_header_len+PPP_HEADER_LEN+CISCO_PACKET_LEN,
- GFP_ATOMIC);
-
- if(skb==NULL)
- return;
-
- skb_reserve(skb, dev->hard_header_len);
- h = (struct ppp_header *)skb_put (skb, sizeof(struct ppp_header));
- h->address = CISCO_MULTICAST;
- h->control = 0;
- h->protocol = htons (CISCO_KEEPALIVE);
-
- ch = (struct cisco_packet*)skb_put(skb, CISCO_PACKET_LEN);
- ch->type = htonl (type);
- ch->par1 = htonl (par1);
- ch->par2 = htonl (par2);
- ch->rel = htons(0xffff);
- ch->time0 = htons ((u16) (t >> 16));
- ch->time1 = htons ((u16) t);
-
- if (sp->pp_flags & PP_DEBUG)
- printk (KERN_WARNING "%s: cisco output: <%xh %xh %xh %xh %xh-%xh>\n",
- dev->name, ntohl (ch->type), ch->par1,
- ch->par2, ch->rel, ch->time0, ch->time1);
- skb->priority=TC_PRIO_CONTROL;
- skb->dev = dev;
- skb_queue_tail(&tx_queue, skb);
-}
-
-/**
- * sppp_close - close down a synchronous PPP or Cisco HDLC link
- * @dev: The network device to drop the link of
- *
- * This drops the logical interface to the channel. It is not
- * done politely as we assume we will also be dropping DTR. Any
- * timeouts are killed.
- */
-
-int sppp_close (struct net_device *dev)
-{
- struct sppp *sp = (struct sppp *)sppp_of(dev);
- unsigned long flags;
-
- spin_lock_irqsave(&sp->lock, flags);
- sp->pp_link_state = SPPP_LINK_DOWN;
- sp->lcp.state = LCP_STATE_CLOSED;
- sp->ipcp.state = IPCP_STATE_CLOSED;
- sppp_clear_timeout (sp);
- spin_unlock_irqrestore(&sp->lock, flags);
-
- return 0;
-}
-
-EXPORT_SYMBOL(sppp_close);
-
-/**
- * sppp_open - open a synchronous PPP or Cisco HDLC link
- * @dev: Network device to activate
- *
- * Close down any existing synchronous session and commence
- * from scratch. In the PPP case this means negotiating LCP/IPCP
- * and friends, while for Cisco HDLC we simply need to start sending
- * keepalives
- */
-
-int sppp_open (struct net_device *dev)
-{
- struct sppp *sp = (struct sppp *)sppp_of(dev);
- unsigned long flags;
-
- sppp_close(dev);
-
- spin_lock_irqsave(&sp->lock, flags);
- if (!(sp->pp_flags & PP_CISCO)) {
- sppp_lcp_open (sp);
- }
- sp->pp_link_state = SPPP_LINK_DOWN;
- spin_unlock_irqrestore(&sp->lock, flags);
- sppp_flush_xmit();
-
- return 0;
-}
-
-EXPORT_SYMBOL(sppp_open);
-
-/**
- * sppp_reopen - notify of physical link loss
- * @dev: Device that lost the link
- *
- * This function informs the synchronous protocol code that
- * the underlying link died (for example a carrier drop on X.21)
- *
- * We increment the magic numbers to ensure that if the other end
- * failed to notice we will correctly start a new session. It happens
- * do to the nature of telco circuits is that you can lose carrier on
- * one endonly.
- *
- * Having done this we go back to negotiating. This function may
- * be called from an interrupt context.
- */
-
-int sppp_reopen (struct net_device *dev)
-{
- struct sppp *sp = (struct sppp *)sppp_of(dev);
- unsigned long flags;
-
- sppp_close(dev);
-
- spin_lock_irqsave(&sp->lock, flags);
- if (!(sp->pp_flags & PP_CISCO))
- {
- sp->lcp.magic = jiffies;
- ++sp->pp_seq;
- sp->lcp.state = LCP_STATE_CLOSED;
- sp->ipcp.state = IPCP_STATE_CLOSED;
- /* Give it a moment for the line to settle then go */
- sppp_set_timeout (sp, 1);
- }
- sp->pp_link_state=SPPP_LINK_DOWN;
- spin_unlock_irqrestore(&sp->lock, flags);
-
- return 0;
-}
-
-EXPORT_SYMBOL(sppp_reopen);
-
-/**
- * sppp_change_mtu - Change the link MTU
- * @dev: Device to change MTU on
- * @new_mtu: New MTU
- *
- * Change the MTU on the link. This can only be called with
- * the link down. It returns an error if the link is up or
- * the mtu is out of range.
- */
-
-static int sppp_change_mtu(struct net_device *dev, int new_mtu)
-{
- if(new_mtu<128||new_mtu>PPP_MTU||(dev->flags&IFF_UP))
- return -EINVAL;
- dev->mtu=new_mtu;
- return 0;
-}
-
-/**
- * sppp_do_ioctl - Ioctl handler for ppp/hdlc
- * @dev: Device subject to ioctl
- * @ifr: Interface request block from the user
- * @cmd: Command that is being issued
- *
- * This function handles the ioctls that may be issued by the user
- * to control the settings of a PPP/HDLC link. It does both busy
- * and security checks. This function is intended to be wrapped by
- * callers who wish to add additional ioctl calls of their own.
- */
-
-int sppp_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- struct sppp *sp = (struct sppp *)sppp_of(dev);
-
- if(dev->flags&IFF_UP)
- return -EBUSY;
-
- if(!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- switch(cmd)
- {
- case SPPPIOCCISCO:
- sp->pp_flags|=PP_CISCO;
- dev->type = ARPHRD_HDLC;
- break;
- case SPPPIOCPPP:
- sp->pp_flags&=~PP_CISCO;
- dev->type = ARPHRD_PPP;
- break;
- case SPPPIOCDEBUG:
- sp->pp_flags&=~PP_DEBUG;
- if(ifr->ifr_flags)
- sp->pp_flags|=PP_DEBUG;
- break;
- case SPPPIOCGFLAGS:
- if(copy_to_user(ifr->ifr_data, &sp->pp_flags, sizeof(sp->pp_flags)))
- return -EFAULT;
- break;
- case SPPPIOCSFLAGS:
- if(copy_from_user(&sp->pp_flags, ifr->ifr_data, sizeof(sp->pp_flags)))
- return -EFAULT;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-EXPORT_SYMBOL(sppp_do_ioctl);
-
-/**
- * sppp_attach - attach synchronous PPP/HDLC to a device
- * @pd: PPP device to initialise
- *
- * This initialises the PPP/HDLC support on an interface. At the
- * time of calling the dev element must point to the network device
- * that this interface is attached to. The interface should not yet
- * be registered.
- */
-
-void sppp_attach(struct ppp_device *pd)
-{
- struct net_device *dev = pd->dev;
- struct sppp *sp = &pd->sppp;
- unsigned long flags;
-
- /* Make sure embedding is safe for sppp_of */
- BUG_ON(sppp_of(dev) != sp);
-
- spin_lock_irqsave(&spppq_lock, flags);
- /* Initialize keepalive handler. */
- if (! spppq)
- {
- init_timer(&sppp_keepalive_timer);
- sppp_keepalive_timer.expires=jiffies+10*HZ;
- sppp_keepalive_timer.function=sppp_keepalive;
- add_timer(&sppp_keepalive_timer);
- }
- /* Insert new entry into the keepalive list. */
- sp->pp_next = spppq;
- spppq = sp;
- spin_unlock_irqrestore(&spppq_lock, flags);
-
- sp->pp_loopcnt = 0;
- sp->pp_alivecnt = 0;
- sp->pp_seq = 0;
- sp->pp_rseq = 0;
- sp->pp_flags = PP_KEEPALIVE|PP_CISCO|debug;/*PP_DEBUG;*/
- sp->lcp.magic = 0;
- sp->lcp.state = LCP_STATE_CLOSED;
- sp->ipcp.state = IPCP_STATE_CLOSED;
- sp->pp_if = dev;
- spin_lock_init(&sp->lock);
-
- /*
- * Device specific setup. All but interrupt handler and
- * hard_start_xmit.
- */
-
- dev->header_ops = &sppp_header_ops;
-
- dev->tx_queue_len = 10;
- dev->type = ARPHRD_HDLC;
- dev->addr_len = 0;
- dev->hard_header_len = sizeof(struct ppp_header);
- dev->mtu = PPP_MTU;
- /*
- * These 4 are callers but MUST also call sppp_ functions
- */
- dev->do_ioctl = sppp_do_ioctl;
-#if 0
- dev->get_stats = NULL; /* Let the driver override these */
- dev->open = sppp_open;
- dev->stop = sppp_close;
-#endif
- dev->change_mtu = sppp_change_mtu;
- dev->flags = IFF_MULTICAST|IFF_POINTOPOINT|IFF_NOARP;
-}
-
-EXPORT_SYMBOL(sppp_attach);
-
-/**
- * sppp_detach - release PPP resources from a device
- * @dev: Network device to release
- *
- * Stop and free up any PPP/HDLC resources used by this
- * interface. This must be called before the device is
- * freed.
- */
-
-void sppp_detach (struct net_device *dev)
-{
- struct sppp **q, *p, *sp = (struct sppp *)sppp_of(dev);
- unsigned long flags;
-
- spin_lock_irqsave(&spppq_lock, flags);
- /* Remove the entry from the keepalive list. */
- for (q = &spppq; (p = *q); q = &p->pp_next)
- if (p == sp) {
- *q = p->pp_next;
- break;
- }
-
- /* Stop keepalive handler. */
- if (! spppq)
- del_timer(&sppp_keepalive_timer);
- sppp_clear_timeout (sp);
- spin_unlock_irqrestore(&spppq_lock, flags);
-}
-
-EXPORT_SYMBOL(sppp_detach);
-
-/*
- * Analyze the LCP Configure-Request options list
- * for the presence of unknown options.
- * If the request contains unknown options, build and
- * send Configure-reject packet, containing only unknown options.
- */
-static int
-sppp_lcp_conf_parse_options (struct sppp *sp, struct lcp_header *h,
- int len, u32 *magic)
-{
- u8 *buf, *r, *p;
- int rlen;
-
- len -= 4;
- buf = r = kmalloc (len, GFP_ATOMIC);
- if (! buf)
- return (0);
-
- p = (void*) (h+1);
- for (rlen=0; len>1 && p[1]; len-=p[1], p+=p[1]) {
- switch (*p) {
- case LCP_OPT_MAGIC:
- /* Magic number -- extract. */
- if (len >= 6 && p[1] == 6) {
- *magic = (u32)p[2] << 24 |
- (u32)p[3] << 16 | p[4] << 8 | p[5];
- continue;
- }
- break;
- case LCP_OPT_ASYNC_MAP:
- /* Async control character map -- check to be zero. */
- if (len >= 6 && p[1] == 6 && ! p[2] && ! p[3] &&
- ! p[4] && ! p[5])
- continue;
- break;
- case LCP_OPT_MRU:
- /* Maximum receive unit -- always OK. */
- continue;
- default:
- /* Others not supported. */
- break;
- }
- /* Add the option to rejected list. */
- memcpy(r, p, p[1]);
- r += p[1];
- rlen += p[1];
- }
- if (rlen)
- sppp_cp_send (sp, PPP_LCP, LCP_CONF_REJ, h->ident, rlen, buf);
- kfree(buf);
- return (rlen == 0);
-}
-
-static void sppp_ipcp_input (struct sppp *sp, struct sk_buff *skb)
-{
- struct lcp_header *h;
- struct net_device *dev = sp->pp_if;
- int len = skb->len;
-
- if (!pskb_may_pull(skb, sizeof(struct lcp_header))) {
- if (sp->pp_flags & PP_DEBUG)
- printk (KERN_WARNING "%s: invalid ipcp packet length: %d bytes\n",
- dev->name, len);
- return;
- }
- h = (struct lcp_header *)skb->data;
- skb_pull(skb,sizeof(struct lcp_header));
- if (sp->pp_flags & PP_DEBUG) {
- printk (KERN_WARNING "%s: ipcp input: %d bytes <%s id=%xh len=%xh",
- dev->name, len,
- sppp_ipcp_type_name (h->type), h->ident, ntohs (h->len));
- if (len > 4)
- sppp_print_bytes ((u8*) (h+1), len-4);
- printk (">\n");
- }
- if (len > ntohs (h->len))
- len = ntohs (h->len);
- switch (h->type) {
- default:
- /* Unknown packet type -- send Code-Reject packet. */
- sppp_cp_send (sp, PPP_IPCP, IPCP_CODE_REJ, ++sp->pp_seq, len, h);
- break;
- case IPCP_CONF_REQ:
- if (len < 4) {
- if (sp->pp_flags & PP_DEBUG)
- printk (KERN_WARNING "%s: invalid ipcp configure request packet length: %d bytes\n",
- dev->name, len);
- return;
- }
- if (len > 4) {
- sppp_cp_send (sp, PPP_IPCP, LCP_CONF_REJ, h->ident,
- len-4, h+1);
-
- switch (sp->ipcp.state) {
- case IPCP_STATE_OPENED:
- /* Initiate renegotiation. */
- sppp_ipcp_open (sp);
- /* fall through... */
- case IPCP_STATE_ACK_SENT:
- /* Go to closed state. */
- sp->ipcp.state = IPCP_STATE_CLOSED;
- }
- } else {
- /* Send Configure-Ack packet. */
- sppp_cp_send (sp, PPP_IPCP, IPCP_CONF_ACK, h->ident,
- 0, NULL);
- /* Change the state. */
- if (sp->ipcp.state == IPCP_STATE_ACK_RCVD)
- sp->ipcp.state = IPCP_STATE_OPENED;
- else
- sp->ipcp.state = IPCP_STATE_ACK_SENT;
- }
- break;
- case IPCP_CONF_ACK:
- if (h->ident != sp->ipcp.confid)
- break;
- sppp_clear_timeout (sp);
- switch (sp->ipcp.state) {
- case IPCP_STATE_CLOSED:
- sp->ipcp.state = IPCP_STATE_ACK_RCVD;
- sppp_set_timeout (sp, 5);
- break;
- case IPCP_STATE_ACK_SENT:
- sp->ipcp.state = IPCP_STATE_OPENED;
- break;
- }
- break;
- case IPCP_CONF_NAK:
- case IPCP_CONF_REJ:
- if (h->ident != sp->ipcp.confid)
- break;
- sppp_clear_timeout (sp);
- /* Initiate renegotiation. */
- sppp_ipcp_open (sp);
- if (sp->ipcp.state != IPCP_STATE_ACK_SENT)
- /* Go to closed state. */
- sp->ipcp.state = IPCP_STATE_CLOSED;
- break;
- case IPCP_TERM_REQ:
- /* Send Terminate-Ack packet. */
- sppp_cp_send (sp, PPP_IPCP, IPCP_TERM_ACK, h->ident, 0, NULL);
- /* Go to closed state. */
- sp->ipcp.state = IPCP_STATE_CLOSED;
- /* Initiate renegotiation. */
- sppp_ipcp_open (sp);
- break;
- case IPCP_TERM_ACK:
- /* Ignore for now. */
- case IPCP_CODE_REJ:
- /* Ignore for now. */
- break;
- }
-}
-
-static void sppp_lcp_open (struct sppp *sp)
-{
- char opt[6];
-
- if (! sp->lcp.magic)
- sp->lcp.magic = jiffies;
- opt[0] = LCP_OPT_MAGIC;
- opt[1] = sizeof (opt);
- opt[2] = sp->lcp.magic >> 24;
- opt[3] = sp->lcp.magic >> 16;
- opt[4] = sp->lcp.magic >> 8;
- opt[5] = sp->lcp.magic;
- sp->lcp.confid = ++sp->pp_seq;
- sppp_cp_send (sp, PPP_LCP, LCP_CONF_REQ, sp->lcp.confid,
- sizeof (opt), &opt);
- sppp_set_timeout (sp, 2);
-}
-
-static void sppp_ipcp_open (struct sppp *sp)
-{
- sp->ipcp.confid = ++sp->pp_seq;
- sppp_cp_send (sp, PPP_IPCP, IPCP_CONF_REQ, sp->ipcp.confid, 0, NULL);
- sppp_set_timeout (sp, 2);
-}
-
-/*
- * Process PPP control protocol timeouts.
- */
-
-static void sppp_cp_timeout (unsigned long arg)
-{
- struct sppp *sp = (struct sppp*) arg;
- unsigned long flags;
-
- spin_lock_irqsave(&sp->lock, flags);
-
- sp->pp_flags &= ~PP_TIMO;
- if (! (sp->pp_if->flags & IFF_UP) || (sp->pp_flags & PP_CISCO)) {
- spin_unlock_irqrestore(&sp->lock, flags);
- return;
- }
- switch (sp->lcp.state) {
- case LCP_STATE_CLOSED:
- /* No ACK for Configure-Request, retry. */
- sppp_lcp_open (sp);
- break;
- case LCP_STATE_ACK_RCVD:
- /* ACK got, but no Configure-Request for peer, retry. */
- sppp_lcp_open (sp);
- sp->lcp.state = LCP_STATE_CLOSED;
- break;
- case LCP_STATE_ACK_SENT:
- /* ACK sent but no ACK for Configure-Request, retry. */
- sppp_lcp_open (sp);
- break;
- case LCP_STATE_OPENED:
- /* LCP is already OK, try IPCP. */
- switch (sp->ipcp.state) {
- case IPCP_STATE_CLOSED:
- /* No ACK for Configure-Request, retry. */
- sppp_ipcp_open (sp);
- break;
- case IPCP_STATE_ACK_RCVD:
- /* ACK got, but no Configure-Request for peer, retry. */
- sppp_ipcp_open (sp);
- sp->ipcp.state = IPCP_STATE_CLOSED;
- break;
- case IPCP_STATE_ACK_SENT:
- /* ACK sent but no ACK for Configure-Request, retry. */
- sppp_ipcp_open (sp);
- break;
- case IPCP_STATE_OPENED:
- /* IPCP is OK. */
- break;
- }
- break;
- }
- spin_unlock_irqrestore(&sp->lock, flags);
- sppp_flush_xmit();
-}
-
-static char *sppp_lcp_type_name (u8 type)
-{
- static char buf [8];
- switch (type) {
- case LCP_CONF_REQ: return ("conf-req");
- case LCP_CONF_ACK: return ("conf-ack");
- case LCP_CONF_NAK: return ("conf-nack");
- case LCP_CONF_REJ: return ("conf-rej");
- case LCP_TERM_REQ: return ("term-req");
- case LCP_TERM_ACK: return ("term-ack");
- case LCP_CODE_REJ: return ("code-rej");
- case LCP_PROTO_REJ: return ("proto-rej");
- case LCP_ECHO_REQ: return ("echo-req");
- case LCP_ECHO_REPLY: return ("echo-reply");
- case LCP_DISC_REQ: return ("discard-req");
- }
- sprintf (buf, "%xh", type);
- return (buf);
-}
-
-static char *sppp_ipcp_type_name (u8 type)
-{
- static char buf [8];
- switch (type) {
- case IPCP_CONF_REQ: return ("conf-req");
- case IPCP_CONF_ACK: return ("conf-ack");
- case IPCP_CONF_NAK: return ("conf-nack");
- case IPCP_CONF_REJ: return ("conf-rej");
- case IPCP_TERM_REQ: return ("term-req");
- case IPCP_TERM_ACK: return ("term-ack");
- case IPCP_CODE_REJ: return ("code-rej");
- }
- sprintf (buf, "%xh", type);
- return (buf);
-}
-
-static void sppp_print_bytes (u_char *p, u16 len)
-{
- printk (" %x", *p++);
- while (--len > 0)
- printk ("-%x", *p++);
-}
-
-/**
- * sppp_rcv - receive and process a WAN PPP frame
- * @skb: The buffer to process
- * @dev: The device it arrived on
- * @p: Unused
- * @orig_dev: Unused
- *
- * Protocol glue. This drives the deferred processing mode the poorer
- * cards use. This can be called directly by cards that do not have
- * timing constraints but is normally called from the network layer
- * after interrupt servicing to process frames queued via netif_rx.
- */
-
-static int sppp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *p, struct net_device *orig_dev)
-{
- if (dev_net(dev) != &init_net) {
- kfree_skb(skb);
- return 0;
- }
-
- if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
- return NET_RX_DROP;
- sppp_input(dev,skb);
- return 0;
-}
-
-static struct packet_type sppp_packet_type = {
- .type = __constant_htons(ETH_P_WAN_PPP),
- .func = sppp_rcv,
-};
-
-static char banner[] __initdata =
- KERN_INFO "Cronyx Ltd, Synchronous PPP and CISCO HDLC (c) 1994\n"
- KERN_INFO "Linux port (c) 1998 Building Number Three Ltd & "
- "Jan \"Yenya\" Kasprzak.\n";
-
-static int __init sync_ppp_init(void)
-{
- if(debug)
- debug=PP_DEBUG;
- printk(banner);
- skb_queue_head_init(&tx_queue);
- dev_add_pack(&sppp_packet_type);
- return 0;
-}
-
-
-static void __exit sync_ppp_cleanup(void)
-{
- dev_remove_pack(&sppp_packet_type);
-}
-
-module_init(sync_ppp_init);
-module_exit(sync_ppp_cleanup);
-module_param(debug, int, 0);
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index a8a5ca0ee6c..4bffb67ebca 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -220,7 +220,6 @@ static inline void wanxl_rx_intr(card_t *card)
#endif
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
- dev->last_rx = jiffies;
skb->protocol = hdlc_type_trans(skb, dev);
netif_rx(skb);
skb = NULL;
@@ -411,12 +410,12 @@ static int wanxl_open(struct net_device *dev)
writel(1 << (DOORBELL_TO_CARD_OPEN_0 + port->node), dbr);
timeout = jiffies + HZ;
- do
+ do {
if (get_status(port)->open) {
netif_start_queue(dev);
return 0;
}
- while (time_after(timeout, jiffies));
+ } while (time_after(timeout, jiffies));
printk(KERN_ERR "%s: unable to open port\n", dev->name);
/* ask the card to close the port, should it be still alive */
@@ -438,10 +437,10 @@ static int wanxl_close(struct net_device *dev)
port->card->plx + PLX_DOORBELL_TO_CARD);
timeout = jiffies + HZ;
- do
+ do {
if (!get_status(port)->open)
break;
- while (time_after(timeout, jiffies));
+ } while (time_after(timeout, jiffies));
if (get_status(port)->open)
printk(KERN_ERR "%s: unable to close port\n", dev->name);
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 2a6c7a60756..e6e2ce3e7bc 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -64,7 +64,7 @@ static struct x25_asy *x25_asy_alloc(void)
if (dev == NULL)
break;
- sl = dev->priv;
+ sl = netdev_priv(dev);
/* Not in use ? */
if (!test_and_set_bit(SLF_INUSE, &sl->flags))
return sl;
@@ -86,7 +86,7 @@ static struct x25_asy *x25_asy_alloc(void)
return NULL;
/* Initialize channel control data */
- sl = dev->priv;
+ sl = netdev_priv(dev);
dev->base_addr = i;
/* register device so that it can be ifconfig'ed */
@@ -120,7 +120,7 @@ static void x25_asy_free(struct x25_asy *sl)
static int x25_asy_change_mtu(struct net_device *dev, int newmtu)
{
- struct x25_asy *sl = dev->priv;
+ struct x25_asy *sl = netdev_priv(dev);
unsigned char *xbuff, *rbuff;
int len = 2 * newmtu;
@@ -211,7 +211,6 @@ static void x25_asy_bump(struct x25_asy *sl)
printk(KERN_DEBUG "x25_asy: data received err - %d\n", err);
} else {
netif_rx(skb);
- sl->dev->last_rx = jiffies;
sl->stats.rx_packets++;
}
}
@@ -243,7 +242,7 @@ static void x25_asy_encaps(struct x25_asy *sl, unsigned char *icp, int len)
* if we did not request it before write operation.
* 14 Oct 1994 Dmitry Gorodchanin.
*/
- sl->tty->flags |= (1 << TTY_DO_WRITE_WAKEUP);
+ set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
actual = sl->tty->ops->write(sl->tty, sl->xbuff, count);
sl->xleft = count - actual;
sl->xhead = sl->xbuff + actual;
@@ -258,7 +257,7 @@ static void x25_asy_encaps(struct x25_asy *sl, unsigned char *icp, int len)
static void x25_asy_write_wakeup(struct tty_struct *tty)
{
int actual;
- struct x25_asy *sl = (struct x25_asy *) tty->disc_data;
+ struct x25_asy *sl = tty->disc_data;
/* First make sure we're connected. */
if (!sl || sl->magic != X25_ASY_MAGIC || !netif_running(sl->dev))
@@ -268,7 +267,7 @@ static void x25_asy_write_wakeup(struct tty_struct *tty)
/* Now serial buffer is almost free & we can start
* transmission of another packet */
sl->stats.tx_packets++;
- tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
x25_asy_unlock(sl);
return;
}
@@ -280,7 +279,7 @@ static void x25_asy_write_wakeup(struct tty_struct *tty)
static void x25_asy_timeout(struct net_device *dev)
{
- struct x25_asy *sl = dev->priv;
+ struct x25_asy *sl = netdev_priv(dev);
spin_lock(&sl->lock);
if (netif_queue_stopped(dev)) {
@@ -291,7 +290,7 @@ static void x25_asy_timeout(struct net_device *dev)
(tty_chars_in_buffer(sl->tty) || sl->xleft) ?
"bad line quality" : "driver error");
sl->xleft = 0;
- sl->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
+ clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
x25_asy_unlock(sl);
}
spin_unlock(&sl->lock);
@@ -301,7 +300,7 @@ static void x25_asy_timeout(struct net_device *dev)
static int x25_asy_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct x25_asy *sl = dev->priv;
+ struct x25_asy *sl = netdev_priv(dev);
int err;
if (!netif_running(sl->dev)) {
@@ -361,7 +360,6 @@ static int x25_asy_xmit(struct sk_buff *skb, struct net_device *dev)
static int x25_asy_data_indication(struct net_device *dev, struct sk_buff *skb)
{
- skb->dev->last_rx = jiffies;
return netif_rx(skb);
}
@@ -373,7 +371,7 @@ static int x25_asy_data_indication(struct net_device *dev, struct sk_buff *skb)
static void x25_asy_data_transmit(struct net_device *dev, struct sk_buff *skb)
{
- struct x25_asy *sl = dev->priv;
+ struct x25_asy *sl = netdev_priv(dev);
spin_lock(&sl->lock);
if (netif_queue_stopped(sl->dev) || sl->tty == NULL) {
@@ -398,7 +396,7 @@ static void x25_asy_data_transmit(struct net_device *dev, struct sk_buff *skb)
static void x25_asy_connected(struct net_device *dev, int reason)
{
- struct x25_asy *sl = dev->priv;
+ struct x25_asy *sl = netdev_priv(dev);
struct sk_buff *skb;
unsigned char *ptr;
@@ -413,12 +411,11 @@ static void x25_asy_connected(struct net_device *dev, int reason)
skb->protocol = x25_type_trans(skb, sl->dev);
netif_rx(skb);
- sl->dev->last_rx = jiffies;
}
static void x25_asy_disconnected(struct net_device *dev, int reason)
{
- struct x25_asy *sl = dev->priv;
+ struct x25_asy *sl = netdev_priv(dev);
struct sk_buff *skb;
unsigned char *ptr;
@@ -433,7 +430,6 @@ static void x25_asy_disconnected(struct net_device *dev, int reason)
skb->protocol = x25_type_trans(skb, sl->dev);
netif_rx(skb);
- sl->dev->last_rx = jiffies;
}
static struct lapb_register_struct x25_asy_callbacks = {
@@ -450,7 +446,7 @@ static struct lapb_register_struct x25_asy_callbacks = {
/* Open the low-level part of the X.25 channel. Easy! */
static int x25_asy_open(struct net_device *dev)
{
- struct x25_asy *sl = dev->priv;
+ struct x25_asy *sl = netdev_priv(dev);
unsigned long len;
int err;
@@ -499,12 +495,12 @@ norbuff:
/* Close the low-level part of the X.25 channel. Easy! */
static int x25_asy_close(struct net_device *dev)
{
- struct x25_asy *sl = dev->priv;
+ struct x25_asy *sl = netdev_priv(dev);
int err;
spin_lock(&sl->lock);
if (sl->tty)
- sl->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
+ clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
netif_stop_queue(dev);
sl->rcount = 0;
@@ -527,7 +523,7 @@ static int x25_asy_close(struct net_device *dev)
static void x25_asy_receive_buf(struct tty_struct *tty,
const unsigned char *cp, char *fp, int count)
{
- struct x25_asy *sl = (struct x25_asy *) tty->disc_data;
+ struct x25_asy *sl = tty->disc_data;
if (!sl || sl->magic != X25_ASY_MAGIC || !netif_running(sl->dev))
return;
@@ -555,7 +551,7 @@ static void x25_asy_receive_buf(struct tty_struct *tty,
static int x25_asy_open_tty(struct tty_struct *tty)
{
- struct x25_asy *sl = (struct x25_asy *) tty->disc_data;
+ struct x25_asy *sl = tty->disc_data;
int err;
if (tty->ops->write == NULL)
@@ -596,7 +592,7 @@ static int x25_asy_open_tty(struct tty_struct *tty)
*/
static void x25_asy_close_tty(struct tty_struct *tty)
{
- struct x25_asy *sl = (struct x25_asy *) tty->disc_data;
+ struct x25_asy *sl = tty->disc_data;
/* First make sure we're connected. */
if (!sl || sl->magic != X25_ASY_MAGIC)
@@ -615,7 +611,7 @@ static void x25_asy_close_tty(struct tty_struct *tty)
static struct net_device_stats *x25_asy_get_stats(struct net_device *dev)
{
- struct x25_asy *sl = dev->priv;
+ struct x25_asy *sl = netdev_priv(dev);
return &sl->stats;
}
@@ -624,7 +620,7 @@ static struct net_device_stats *x25_asy_get_stats(struct net_device *dev)
* STANDARD X.25 ENCAPSULATION *
************************************************************************/
-int x25_asy_esc(unsigned char *s, unsigned char *d, int len)
+static int x25_asy_esc(unsigned char *s, unsigned char *d, int len)
{
unsigned char *ptr = d;
unsigned char c;
@@ -696,7 +692,7 @@ static void x25_asy_unesc(struct x25_asy *sl, unsigned char s)
static int x25_asy_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg)
{
- struct x25_asy *sl = (struct x25_asy *) tty->disc_data;
+ struct x25_asy *sl = tty->disc_data;
/* First make sure we're connected. */
if (!sl || sl->magic != X25_ASY_MAGIC)
@@ -717,7 +713,7 @@ static int x25_asy_ioctl(struct tty_struct *tty, struct file *file,
static int x25_asy_open_dev(struct net_device *dev)
{
- struct x25_asy *sl = dev->priv;
+ struct x25_asy *sl = netdev_priv(dev);
if (sl->tty == NULL)
return -ENODEV;
return 0;
@@ -726,7 +722,7 @@ static int x25_asy_open_dev(struct net_device *dev)
/* Initialise the X.25 driver. Called by the device init code */
static void x25_asy_setup(struct net_device *dev)
{
- struct x25_asy *sl = dev->priv;
+ struct x25_asy *sl = netdev_priv(dev);
sl->magic = X25_ASY_MAGIC;
sl->dev = dev;
@@ -793,7 +789,7 @@ static void __exit exit_x25_asy(void)
for (i = 0; i < x25_asy_maxdev; i++) {
dev = x25_asy_devs[i];
if (dev) {
- struct x25_asy *sl = dev->priv;
+ struct x25_asy *sl = netdev_priv(dev);
spin_lock_bh(&sl->lock);
if (sl->tty)
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
index 5bf7e01ef0e..3d00971fe5e 100644
--- a/drivers/net/wan/z85230.c
+++ b/drivers/net/wan/z85230.c
@@ -601,24 +601,18 @@ static void z8530_dma_status(struct z8530_channel *chan)
write_zsctrl(chan, RES_H_IUS);
}
-struct z8530_irqhandler z8530_dma_sync=
-{
+static struct z8530_irqhandler z8530_dma_sync = {
z8530_dma_rx,
z8530_dma_tx,
z8530_dma_status
};
-EXPORT_SYMBOL(z8530_dma_sync);
-
-struct z8530_irqhandler z8530_txdma_sync=
-{
+static struct z8530_irqhandler z8530_txdma_sync = {
z8530_rx,
z8530_dma_tx,
z8530_dma_status
};
-EXPORT_SYMBOL(z8530_txdma_sync);
-
/**
* z8530_rx_clear - Handle RX events from a stopped chip
* @c: Z8530 channel to shut up
@@ -710,7 +704,7 @@ EXPORT_SYMBOL(z8530_nop);
irqreturn_t z8530_interrupt(int irq, void *dev_id)
{
struct z8530_dev *dev=dev_id;
- u8 intr;
+ u8 uninitialized_var(intr);
static volatile int locker=0;
int work=0;
struct z8530_irqhandler *irqs;