aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/irda/sir_dev.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/irda/sir_dev.c')
-rw-r--r--drivers/net/irda/sir_dev.c136
1 files changed, 67 insertions, 69 deletions
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index 6078e03de9a..43e9ab4f4d7 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -11,8 +11,10 @@
*
********************************************************************/
+#include <linux/hardirq.h>
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/slab.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -80,7 +82,7 @@ static int sirdev_tx_complete_fsm(struct sir_dev *dev)
return 0;
default:
- IRDA_ERROR("%s - undefined state\n", __FUNCTION__);
+ IRDA_ERROR("%s - undefined state\n", __func__);
return -EINVAL;
}
fsm->substate = next_state;
@@ -107,11 +109,11 @@ static void sirdev_config_fsm(struct work_struct *work)
int ret = -1;
unsigned delay;
- IRDA_DEBUG(2, "%s(), <%ld>\n", __FUNCTION__, jiffies);
+ IRDA_DEBUG(2, "%s(), <%ld>\n", __func__, jiffies);
do {
IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n",
- __FUNCTION__, fsm->state, fsm->substate);
+ __func__, fsm->state, fsm->substate);
next_state = fsm->state;
delay = 0;
@@ -220,7 +222,7 @@ static void sirdev_config_fsm(struct work_struct *work)
break;
case SIRDEV_STATE_DONGLE_SPEED:
- if (dev->dongle_drv->reset) {
+ if (dev->dongle_drv->set_speed) {
ret = dev->dongle_drv->set_speed(dev, fsm->param);
if (ret < 0) {
fsm->result = ret;
@@ -249,12 +251,12 @@ static void sirdev_config_fsm(struct work_struct *work)
break;
default:
- IRDA_ERROR("%s - undefined state\n", __FUNCTION__);
+ IRDA_ERROR("%s - undefined state\n", __func__);
fsm->result = -EINVAL;
/* fall thru */
case SIRDEV_STATE_ERROR:
- IRDA_ERROR("%s - error: %d\n", __FUNCTION__, fsm->result);
+ IRDA_ERROR("%s - error: %d\n", __func__, fsm->result);
#if 0 /* don't enable this before we have netdev->tx_timeout to recover */
netif_stop_queue(dev->netdev);
@@ -284,11 +286,12 @@ int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned par
{
struct sir_fsm *fsm = &dev->fsm;
- IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __FUNCTION__, initial_state, param);
+ IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __func__,
+ initial_state, param);
if (down_trylock(&fsm->sem)) {
if (in_interrupt() || in_atomic() || irqs_disabled()) {
- IRDA_DEBUG(1, "%s(), state machine busy!\n", __FUNCTION__);
+ IRDA_DEBUG(1, "%s(), state machine busy!\n", __func__);
return -EWOULDBLOCK;
} else
down(&fsm->sem);
@@ -296,7 +299,7 @@ int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned par
if (fsm->state == SIRDEV_STATE_DEAD) {
/* race with sirdev_close should never happen */
- IRDA_ERROR("%s(), instance staled!\n", __FUNCTION__);
+ IRDA_ERROR("%s(), instance staled!\n", __func__);
up(&fsm->sem);
return -ESTALE; /* or better EPIPE? */
}
@@ -334,14 +337,14 @@ static int sirdev_is_receiving(struct sir_dev *dev)
if (!atomic_read(&dev->enable_rx))
return 0;
- return (dev->rx_buff.state != OUTSIDE_FRAME);
+ return dev->rx_buff.state != OUTSIDE_FRAME;
}
int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type)
{
int err;
- IRDA_DEBUG(3, "%s : requesting dongle %d.\n", __FUNCTION__, type);
+ IRDA_DEBUG(3, "%s : requesting dongle %d.\n", __func__, type);
err = sirdev_schedule_dongle_open(dev, type);
if (unlikely(err))
@@ -376,7 +379,7 @@ int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len)
ret = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
if (ret > 0) {
- IRDA_DEBUG(3, "%s(), raw-tx started\n", __FUNCTION__);
+ IRDA_DEBUG(3, "%s(), raw-tx started\n", __func__);
dev->tx_buff.data += ret;
dev->tx_buff.len -= ret;
@@ -437,7 +440,7 @@ void sirdev_write_complete(struct sir_dev *dev)
spin_lock_irqsave(&dev->tx_lock, flags);
IRDA_DEBUG(3, "%s() - dev->tx_buff.len = %d\n",
- __FUNCTION__, dev->tx_buff.len);
+ __func__, dev->tx_buff.len);
if (likely(dev->tx_buff.len > 0)) {
/* Write data left in transmit buffer */
@@ -450,12 +453,12 @@ void sirdev_write_complete(struct sir_dev *dev)
else if (unlikely(actual<0)) {
/* could be dropped later when we have tx_timeout to recover */
IRDA_ERROR("%s: drv->do_write failed (%d)\n",
- __FUNCTION__, actual);
+ __func__, actual);
if ((skb=dev->tx_skb) != NULL) {
dev->tx_skb = NULL;
dev_kfree_skb_any(skb);
- dev->stats.tx_errors++;
- dev->stats.tx_dropped++;
+ dev->netdev->stats.tx_errors++;
+ dev->netdev->stats.tx_dropped++;
}
dev->tx_buff.len = 0;
}
@@ -471,7 +474,7 @@ void sirdev_write_complete(struct sir_dev *dev)
* restarted when the irda-thread has completed the request.
*/
- IRDA_DEBUG(3, "%s(), raw-tx done\n", __FUNCTION__);
+ IRDA_DEBUG(3, "%s(), raw-tx done\n", __func__);
dev->raw_tx = 0;
goto done; /* no post-frame handling in raw mode */
}
@@ -488,24 +491,24 @@ void sirdev_write_complete(struct sir_dev *dev)
* re-activated.
*/
- IRDA_DEBUG(5, "%s(), finished with frame!\n", __FUNCTION__);
+ IRDA_DEBUG(5, "%s(), finished with frame!\n", __func__);
if ((skb=dev->tx_skb) != NULL) {
dev->tx_skb = NULL;
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
+ dev->netdev->stats.tx_packets++;
+ dev->netdev->stats.tx_bytes += skb->len;
dev_kfree_skb_any(skb);
}
if (unlikely(dev->new_speed > 0)) {
- IRDA_DEBUG(5, "%s(), Changing speed!\n", __FUNCTION__);
+ IRDA_DEBUG(5, "%s(), Changing speed!\n", __func__);
err = sirdev_schedule_speed(dev, dev->new_speed);
if (unlikely(err)) {
/* should never happen
* forget the speed change and hope the stack recovers
*/
IRDA_ERROR("%s - schedule speed change failed: %d\n",
- __FUNCTION__, err);
+ __func__, err);
netif_wake_queue(dev->netdev);
}
/* else: success
@@ -532,13 +535,13 @@ EXPORT_SYMBOL(sirdev_write_complete);
int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count)
{
if (!dev || !dev->netdev) {
- IRDA_WARNING("%s(), not ready yet!\n", __FUNCTION__);
+ IRDA_WARNING("%s(), not ready yet!\n", __func__);
return -1;
}
if (!dev->irlap) {
IRDA_WARNING("%s - too early: %p / %zd!\n",
- __FUNCTION__, cp, count);
+ __func__, cp, count);
return -1;
}
@@ -547,8 +550,8 @@ int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count)
* just update stats and set media busy
*/
irda_device_set_media_busy(dev->netdev, TRUE);
- dev->stats.rx_dropped++;
- IRDA_DEBUG(0, "%s; rx-drop: %zd\n", __FUNCTION__, count);
+ dev->netdev->stats.rx_dropped++;
+ IRDA_DEBUG(0, "%s; rx-drop: %zd\n", __func__, count);
return 0;
}
@@ -556,7 +559,7 @@ int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count)
if (likely(atomic_read(&dev->enable_rx))) {
while (count--)
/* Unwrap and destuff one byte */
- async_unwrap_char(dev->netdev, &dev->stats,
+ async_unwrap_char(dev->netdev, &dev->netdev->stats,
&dev->rx_buff, *cp++);
} else {
while (count--) {
@@ -581,26 +584,20 @@ EXPORT_SYMBOL(sirdev_receive);
/* callbacks from network layer */
-static struct net_device_stats *sirdev_get_stats(struct net_device *ndev)
-{
- struct sir_dev *dev = ndev->priv;
-
- return (dev) ? &dev->stats : NULL;
-}
-
-static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t sirdev_hard_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
{
- struct sir_dev *dev = ndev->priv;
+ struct sir_dev *dev = netdev_priv(ndev);
unsigned long flags;
int actual = 0;
int err;
s32 speed;
- IRDA_ASSERT(dev != NULL, return 0;);
+ IRDA_ASSERT(dev != NULL, return NETDEV_TX_OK;);
netif_stop_queue(ndev);
- IRDA_DEBUG(3, "%s(), skb->len = %d\n", __FUNCTION__, skb->len);
+ IRDA_DEBUG(3, "%s(), skb->len = %d\n", __func__, skb->len);
speed = irda_get_next_speed(skb);
if ((speed != dev->speed) && (speed != -1)) {
@@ -613,7 +610,7 @@ static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
* stopped so the network layer will retry after the
* fsm completes and wakes the queue.
*/
- return 1;
+ return NETDEV_TX_BUSY;
}
else if (unlikely(err)) {
/* other fatal error - forget the speed change and
@@ -627,7 +624,7 @@ static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
*/
dev_kfree_skb_any(skb);
- return 0;
+ return NETDEV_TX_OK;
} else
dev->new_speed = speed;
}
@@ -637,7 +634,7 @@ static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
/* Check problems */
if(spin_is_locked(&dev->tx_lock)) {
- IRDA_DEBUG(3, "%s(), write not completed\n", __FUNCTION__);
+ IRDA_DEBUG(3, "%s(), write not completed\n", __func__);
}
/* serialize with write completion */
@@ -653,28 +650,27 @@ static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
*/
atomic_set(&dev->enable_rx, 0);
if (unlikely(sirdev_is_receiving(dev)))
- dev->stats.collisions++;
+ dev->netdev->stats.collisions++;
actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
if (likely(actual > 0)) {
dev->tx_skb = skb;
- ndev->trans_start = jiffies;
dev->tx_buff.data += actual;
dev->tx_buff.len -= actual;
}
else if (unlikely(actual < 0)) {
/* could be dropped later when we have tx_timeout to recover */
IRDA_ERROR("%s: drv->do_write failed (%d)\n",
- __FUNCTION__, actual);
+ __func__, actual);
dev_kfree_skb_any(skb);
- dev->stats.tx_errors++;
- dev->stats.tx_dropped++;
+ dev->netdev->stats.tx_errors++;
+ dev->netdev->stats.tx_dropped++;
netif_wake_queue(ndev);
}
spin_unlock_irqrestore(&dev->tx_lock, flags);
- return 0;
+ return NETDEV_TX_OK;
}
/* called from network layer with rtnl hold */
@@ -682,12 +678,12 @@ static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
static int sirdev_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
struct if_irda_req *irq = (struct if_irda_req *) rq;
- struct sir_dev *dev = ndev->priv;
+ struct sir_dev *dev = netdev_priv(ndev);
int ret = 0;
IRDA_ASSERT(dev != NULL, return -1;);
- IRDA_DEBUG(3, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, ndev->name, cmd);
+ IRDA_DEBUG(3, "%s(), %s, (cmd=0x%X)\n", __func__, ndev->name, cmd);
switch (cmd) {
case SIOCSBANDWIDTH: /* Set bandwidth */
@@ -759,7 +755,8 @@ static int sirdev_alloc_buffers(struct sir_dev *dev)
dev->rx_buff.truesize = IRDA_SKB_MAX_MTU;
/* Bootstrap ZeroCopy Rx */
- dev->rx_buff.skb = __dev_alloc_skb(dev->rx_buff.truesize, GFP_KERNEL);
+ dev->rx_buff.skb = __netdev_alloc_skb(dev->netdev, dev->rx_buff.truesize,
+ GFP_KERNEL);
if (dev->rx_buff.skb == NULL)
return -ENOMEM;
skb_reserve(dev->rx_buff.skb, 1);
@@ -785,8 +782,7 @@ static int sirdev_alloc_buffers(struct sir_dev *dev)
static void sirdev_free_buffers(struct sir_dev *dev)
{
- if (dev->rx_buff.skb)
- kfree_skb(dev->rx_buff.skb);
+ kfree_skb(dev->rx_buff.skb);
kfree(dev->tx_buff.head);
dev->rx_buff.head = dev->tx_buff.head = NULL;
dev->rx_buff.skb = NULL;
@@ -794,7 +790,7 @@ static void sirdev_free_buffers(struct sir_dev *dev)
static int sirdev_open(struct net_device *ndev)
{
- struct sir_dev *dev = ndev->priv;
+ struct sir_dev *dev = netdev_priv(ndev);
const struct sir_driver *drv = dev->drv;
if (!drv)
@@ -804,7 +800,7 @@ static int sirdev_open(struct net_device *ndev)
if (!try_module_get(drv->owner))
return -ESTALE;
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s()\n", __func__);
if (sirdev_alloc_buffers(dev))
goto errout_dec;
@@ -822,7 +818,7 @@ static int sirdev_open(struct net_device *ndev)
netif_wake_queue(ndev);
- IRDA_DEBUG(2, "%s - done, speed = %d\n", __FUNCTION__, dev->speed);
+ IRDA_DEBUG(2, "%s - done, speed = %d\n", __func__, dev->speed);
return 0;
@@ -839,10 +835,10 @@ errout_dec:
static int sirdev_close(struct net_device *ndev)
{
- struct sir_dev *dev = ndev->priv;
+ struct sir_dev *dev = netdev_priv(ndev);
const struct sir_driver *drv;
-// IRDA_DEBUG(0, "%s\n", __FUNCTION__);
+// IRDA_DEBUG(0, "%s\n", __func__);
netif_stop_queue(ndev);
@@ -871,6 +867,12 @@ out:
return 0;
}
+static const struct net_device_ops sirdev_ops = {
+ .ndo_start_xmit = sirdev_hard_xmit,
+ .ndo_open = sirdev_open,
+ .ndo_stop = sirdev_close,
+ .ndo_do_ioctl = sirdev_ioctl,
+};
/* ----------------------------------------------------------------------------- */
struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *name)
@@ -878,7 +880,7 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n
struct net_device *ndev;
struct sir_dev *dev;
- IRDA_DEBUG(0, "%s - %s\n", __FUNCTION__, name);
+ IRDA_DEBUG(0, "%s - %s\n", __func__, name);
/* instead of adding tests to protect against drv->do_write==NULL
* at several places we refuse to create a sir_dev instance for
@@ -892,10 +894,10 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n
*/
ndev = alloc_irdadev(sizeof(*dev));
if (ndev == NULL) {
- IRDA_ERROR("%s - Can't allocate memory for IrDA control block!\n", __FUNCTION__);
+ IRDA_ERROR("%s - Can't allocate memory for IrDA control block!\n", __func__);
goto out;
}
- dev = ndev->priv;
+ dev = netdev_priv(ndev);
irda_init_max_qos_capabilies(&dev->qos);
dev->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
@@ -908,20 +910,16 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n
dev->tx_skb = NULL;
spin_lock_init(&dev->tx_lock);
- init_MUTEX(&dev->fsm.sem);
+ sema_init(&dev->fsm.sem, 1);
dev->drv = drv;
dev->netdev = ndev;
/* Override the network functions we need to use */
- ndev->hard_start_xmit = sirdev_hard_xmit;
- ndev->open = sirdev_open;
- ndev->stop = sirdev_close;
- ndev->get_stats = sirdev_get_stats;
- ndev->do_ioctl = sirdev_ioctl;
+ ndev->netdev_ops = &sirdev_ops;
if (register_netdev(ndev)) {
- IRDA_ERROR("%s(), register_netdev() failed!\n", __FUNCTION__);
+ IRDA_ERROR("%s(), register_netdev() failed!\n", __func__);
goto out_freenetdev;
}
@@ -938,7 +936,7 @@ int sirdev_put_instance(struct sir_dev *dev)
{
int err = 0;
- IRDA_DEBUG(0, "%s\n", __FUNCTION__);
+ IRDA_DEBUG(0, "%s\n", __func__);
atomic_set(&dev->enable_rx, 0);
@@ -948,7 +946,7 @@ int sirdev_put_instance(struct sir_dev *dev)
if (dev->dongle_drv)
err = sirdev_schedule_dongle_close(dev);
if (err)
- IRDA_ERROR("%s - error %d\n", __FUNCTION__, err);
+ IRDA_ERROR("%s - error %d\n", __func__, err);
sirdev_close(dev->netdev);