diff options
Diffstat (limited to 'drivers/net/caif')
| -rw-r--r-- | drivers/net/caif/Kconfig | 23 | ||||
| -rw-r--r-- | drivers/net/caif/Makefile | 7 | ||||
| -rw-r--r-- | drivers/net/caif/caif_hsi.c | 555 | ||||
| -rw-r--r-- | drivers/net/caif/caif_serial.c | 77 | ||||
| -rw-r--r-- | drivers/net/caif/caif_shm_u5500.c | 128 | ||||
| -rw-r--r-- | drivers/net/caif/caif_shmcore.c | 753 | ||||
| -rw-r--r-- | drivers/net/caif/caif_spi.c | 7 | ||||
| -rw-r--r-- | drivers/net/caif/caif_spi_slave.c | 4 | ||||
| -rw-r--r-- | drivers/net/caif/caif_virtio.c | 791 |
9 files changed, 1153 insertions, 1192 deletions
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig index abf4d7a9dcc..54709808677 100644 --- a/drivers/net/caif/Kconfig +++ b/drivers/net/caif/Kconfig @@ -6,7 +6,7 @@ comment "CAIF transport drivers" config CAIF_TTY tristate "CAIF TTY transport driver" - depends on CAIF + depends on CAIF && TTY default n ---help--- The CAIF TTY transport driver is a Line Discipline (ldisc) @@ -32,13 +32,6 @@ config CAIF_SPI_SYNC help to synchronize to the next transfer in case of over or under-runs. This option also needs to be enabled on the modem. -config CAIF_SHM - tristate "CAIF shared memory protocol driver" - depends on CAIF && U5500_MBOX - default n - ---help--- - The CAIF shared memory protocol driver for the STE UX5500 platform. - config CAIF_HSI tristate "CAIF HSI transport driver" depends on CAIF @@ -47,3 +40,17 @@ config CAIF_HSI The caif low level driver for CAIF over HSI. Be aware that if you enable this then you also need to enable a low-level HSI driver. + +config CAIF_VIRTIO + tristate "CAIF virtio transport driver" + depends on CAIF && HAS_DMA + select VHOST_RING + select VIRTIO + select GENERIC_ALLOCATOR + default n + ---help--- + The caif driver for CAIF over Virtio. + +if CAIF_VIRTIO +source "drivers/vhost/Kconfig" +endif diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile index 91dff861560..9bbd45391f6 100644 --- a/drivers/net/caif/Makefile +++ b/drivers/net/caif/Makefile @@ -7,9 +7,8 @@ obj-$(CONFIG_CAIF_TTY) += caif_serial.o cfspi_slave-objs := caif_spi.o caif_spi_slave.o obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o -# Shared memory -caif_shm-objs := caif_shmcore.o caif_shm_u5500.o -obj-$(CONFIG_CAIF_SHM) += caif_shm.o - # HSI interface obj-$(CONFIG_CAIF_HSI) += caif_hsi.o + +# Virtio interface +obj-$(CONFIG_CAIF_VIRTIO) += caif_virtio.o diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c index 4a27adb7ae6..5e40a8b68cb 100644 --- a/drivers/net/caif/caif_hsi.c +++ b/drivers/net/caif/caif_hsi.c @@ -1,8 +1,7 @@ /* * Copyright (C) ST-Ericsson AB 2010 - * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com - * Author: Daniel Martensson / daniel.martensson@stericsson.com - * Dmitry.Tarnyagin / dmitry.tarnyagin@stericsson.com + * Author: Daniel Martensson + * Dmitry.Tarnyagin / dmitry.tarnyagin@lockless.no * License terms: GNU General Public License (GPL) version 2. */ @@ -11,7 +10,6 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/device.h> -#include <linux/platform_device.h> #include <linux/netdevice.h> #include <linux/string.h> #include <linux/list.h> @@ -20,72 +18,59 @@ #include <linux/sched.h> #include <linux/if_arp.h> #include <linux/timer.h> -#include <linux/rtnetlink.h> +#include <net/rtnetlink.h> #include <linux/pkt_sched.h> #include <net/caif/caif_layer.h> #include <net/caif/caif_hsi.h> MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>"); +MODULE_AUTHOR("Daniel Martensson"); MODULE_DESCRIPTION("CAIF HSI driver"); /* Returns the number of padding bytes for alignment. */ #define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\ (((pow)-((x)&((pow)-1))))) -static int inactivity_timeout = 1000; -module_param(inactivity_timeout, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(inactivity_timeout, "Inactivity timeout on HSI, ms."); +static const struct cfhsi_config hsi_default_config = { -static int aggregation_timeout = 1; -module_param(aggregation_timeout, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(aggregation_timeout, "Aggregation timeout on HSI, ms."); + /* Inactivity timeout on HSI, ms */ + .inactivity_timeout = HZ, -/* - * HSI padding options. - * Warning: must be a base of 2 (& operation used) and can not be zero ! - */ -static int hsi_head_align = 4; -module_param(hsi_head_align, int, S_IRUGO); -MODULE_PARM_DESC(hsi_head_align, "HSI head alignment."); + /* Aggregation timeout (ms) of zero means no aggregation is done*/ + .aggregation_timeout = 1, -static int hsi_tail_align = 4; -module_param(hsi_tail_align, int, S_IRUGO); -MODULE_PARM_DESC(hsi_tail_align, "HSI tail alignment."); - -/* - * HSI link layer flowcontrol thresholds. - * Warning: A high threshold value migth increase throughput but it will at - * the same time prevent channel prioritization and increase the risk of - * flooding the modem. The high threshold should be above the low. - */ -static int hsi_high_threshold = 100; -module_param(hsi_high_threshold, int, S_IRUGO); -MODULE_PARM_DESC(hsi_high_threshold, "HSI high threshold (FLOW OFF)."); + /* + * HSI link layer flow-control thresholds. + * Threshold values for the HSI packet queue. Flow-control will be + * asserted when the number of packets exceeds q_high_mark. It will + * not be de-asserted before the number of packets drops below + * q_low_mark. + * Warning: A high threshold value might increase throughput but it + * will at the same time prevent channel prioritization and increase + * the risk of flooding the modem. The high threshold should be above + * the low. + */ + .q_high_mark = 100, + .q_low_mark = 50, -static int hsi_low_threshold = 50; -module_param(hsi_low_threshold, int, S_IRUGO); -MODULE_PARM_DESC(hsi_low_threshold, "HSI high threshold (FLOW ON)."); + /* + * HSI padding options. + * Warning: must be a base of 2 (& operation used) and can not be zero ! + */ + .head_align = 4, + .tail_align = 4, +}; #define ON 1 #define OFF 0 -/* - * Threshold values for the HSI packet queue. Flowcontrol will be asserted - * when the number of packets exceeds HIGH_WATER_MARK. It will not be - * de-asserted before the number of packets drops below LOW_WATER_MARK. - */ -#define LOW_WATER_MARK hsi_low_threshold -#define HIGH_WATER_MARK hsi_high_threshold - static LIST_HEAD(cfhsi_list); -static spinlock_t cfhsi_list_lock; static void cfhsi_inactivity_tout(unsigned long arg) { struct cfhsi *cfhsi = (struct cfhsi *)arg; - dev_dbg(&cfhsi->ndev->dev, "%s.\n", + netdev_dbg(cfhsi->ndev, "%s.\n", __func__); /* Schedule power down work queue. */ @@ -101,8 +86,8 @@ static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi, int hpad, tpad, len; info = (struct caif_payload_info *)&skb->cb; - hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align); - tpad = PAD_POW2((skb->len + hpad), hsi_tail_align); + hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align); + tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align); len = skb->len + hpad + tpad; if (direction > 0) @@ -115,7 +100,7 @@ static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi) { int i; - if (cfhsi->aggregation_timeout < 0) + if (cfhsi->cfg.aggregation_timeout == 0) return true; for (i = 0; i < CFHSI_PRIO_BEBK; ++i) { @@ -171,7 +156,7 @@ static void cfhsi_abort_tx(struct cfhsi *cfhsi) cfhsi->tx_state = CFHSI_TX_STATE_IDLE; if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) mod_timer(&cfhsi->inactivity_timer, - jiffies + cfhsi->inactivity_timeout); + jiffies + cfhsi->cfg.inactivity_timeout); spin_unlock_bh(&cfhsi->lock); } @@ -181,14 +166,14 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi) size_t fifo_occupancy; int ret; - dev_dbg(&cfhsi->ndev->dev, "%s.\n", + netdev_dbg(cfhsi->ndev, "%s.\n", __func__); do { - ret = cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev, + ret = cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops, &fifo_occupancy); if (ret) { - dev_warn(&cfhsi->ndev->dev, + netdev_warn(cfhsi->ndev, "%s: can't get FIFO occupancy: %d.\n", __func__, ret); break; @@ -198,11 +183,11 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi) fifo_occupancy = min(sizeof(buffer), fifo_occupancy); set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits); - ret = cfhsi->dev->cfhsi_rx(buffer, fifo_occupancy, - cfhsi->dev); + ret = cfhsi->ops->cfhsi_rx(buffer, fifo_occupancy, + cfhsi->ops); if (ret) { clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits); - dev_warn(&cfhsi->ndev->dev, + netdev_warn(cfhsi->ndev, "%s: can't read data: %d.\n", __func__, ret); break; @@ -213,13 +198,13 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi) !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret); if (ret < 0) { - dev_warn(&cfhsi->ndev->dev, + netdev_warn(cfhsi->ndev, "%s: can't wait for flush complete: %d.\n", __func__, ret); break; } else if (!ret) { ret = -ETIMEDOUT; - dev_warn(&cfhsi->ndev->dev, + netdev_warn(cfhsi->ndev, "%s: timeout waiting for flush complete.\n", __func__); break; @@ -246,14 +231,14 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi) /* Check if we can embed a CAIF frame. */ if (skb->len < CFHSI_MAX_EMB_FRM_SZ) { struct caif_payload_info *info; - int hpad = 0; - int tpad = 0; + int hpad; + int tpad; /* Calculate needed head alignment and tail alignment. */ info = (struct caif_payload_info *)&skb->cb; - hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align); - tpad = PAD_POW2((skb->len + hpad), hsi_tail_align); + hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align); + tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align); /* Check if frame still fits with added alignment. */ if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) { @@ -282,8 +267,8 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi) pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ; while (nfrms < CFHSI_MAX_PKTS) { struct caif_payload_info *info; - int hpad = 0; - int tpad = 0; + int hpad; + int tpad; if (!skb) skb = cfhsi_dequeue(cfhsi); @@ -294,8 +279,8 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi) /* Calculate needed head alignment and tail alignment. */ info = (struct caif_payload_info *)&skb->cb; - hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align); - tpad = PAD_POW2((skb->len + hpad), hsi_tail_align); + hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align); + tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align); /* Fill in CAIF frame length in descriptor. */ desc->cffrm_len[nfrms] = hpad + skb->len + tpad; @@ -348,7 +333,7 @@ static void cfhsi_start_tx(struct cfhsi *cfhsi) struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf; int len, res; - dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__); + netdev_dbg(cfhsi->ndev, "%s.\n", __func__); if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) return; @@ -366,22 +351,22 @@ static void cfhsi_start_tx(struct cfhsi *cfhsi) cfhsi->tx_state = CFHSI_TX_STATE_IDLE; /* Start inactivity timer. */ mod_timer(&cfhsi->inactivity_timer, - jiffies + cfhsi->inactivity_timeout); + jiffies + cfhsi->cfg.inactivity_timeout); spin_unlock_bh(&cfhsi->lock); break; } /* Set up new transfer. */ - res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev); + res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops); if (WARN_ON(res < 0)) - dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n", + netdev_err(cfhsi->ndev, "%s: TX error %d.\n", __func__, res); } while (res < 0); } static void cfhsi_tx_done(struct cfhsi *cfhsi) { - dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__); + netdev_dbg(cfhsi->ndev, "%s.\n", __func__); if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) return; @@ -392,7 +377,7 @@ static void cfhsi_tx_done(struct cfhsi *cfhsi) */ spin_lock_bh(&cfhsi->lock); if (cfhsi->flow_off_sent && - cfhsi_tx_queue_len(cfhsi) <= cfhsi->q_low_mark && + cfhsi_tx_queue_len(cfhsi) <= cfhsi->cfg.q_low_mark && cfhsi->cfdev.flowctrl) { cfhsi->flow_off_sent = 0; @@ -404,19 +389,19 @@ static void cfhsi_tx_done(struct cfhsi *cfhsi) cfhsi_start_tx(cfhsi); } else { mod_timer(&cfhsi->aggregation_timer, - jiffies + cfhsi->aggregation_timeout); + jiffies + cfhsi->cfg.aggregation_timeout); spin_unlock_bh(&cfhsi->lock); } return; } -static void cfhsi_tx_done_cb(struct cfhsi_drv *drv) +static void cfhsi_tx_done_cb(struct cfhsi_cb_ops *cb_ops) { struct cfhsi *cfhsi; - cfhsi = container_of(drv, struct cfhsi, drv); - dev_dbg(&cfhsi->ndev->dev, "%s.\n", + cfhsi = container_of(cb_ops, struct cfhsi, cb_ops); + netdev_dbg(cfhsi->ndev, "%s.\n", __func__); if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) @@ -433,7 +418,7 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi) if ((desc->header & ~CFHSI_PIGGY_DESC) || (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) { - dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n", + netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n", __func__); return -EPROTO; } @@ -455,7 +440,7 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi) /* Sanity check length of CAIF frame. */ if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) { - dev_err(&cfhsi->ndev->dev, "%s: Invalid length.\n", + netdev_err(cfhsi->ndev, "%s: Invalid length.\n", __func__); return -EPROTO; } @@ -463,7 +448,7 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi) /* Allocate SKB (OK even in IRQ context). */ skb = alloc_skb(len + 1, GFP_ATOMIC); if (!skb) { - dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n", + netdev_err(cfhsi->ndev, "%s: Out of memory !\n", __func__); return -ENOMEM; } @@ -477,8 +462,8 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi) skb->dev = cfhsi->ndev; /* - * We are called from a arch specific platform device. - * Unfortunately we don't know what context we're + * We are in a callback handler and + * unfortunately we don't know what context we're * running in. */ if (in_interrupt()) @@ -504,7 +489,7 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi) xfer_sz += CFHSI_DESC_SZ; if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) { - dev_err(&cfhsi->ndev->dev, + netdev_err(cfhsi->ndev, "%s: Invalid payload len: %d, ignored.\n", __func__, xfer_sz); return -EPROTO; @@ -551,7 +536,7 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi) /* Sanity check header and offset. */ if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) || (desc->offset > CFHSI_MAX_EMB_FRM_SZ))) { - dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n", + netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n", __func__); return -EPROTO; } @@ -573,7 +558,7 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi) struct sk_buff *skb; u8 *dst = NULL; u8 *pcffrm = NULL; - int len = 0; + int len; /* CAIF frame starts after head padding. */ pcffrm = pfrm + *pfrm + 1; @@ -585,7 +570,7 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi) /* Sanity check length of CAIF frames. */ if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) { - dev_err(&cfhsi->ndev->dev, "%s: Invalid length.\n", + netdev_err(cfhsi->ndev, "%s: Invalid length.\n", __func__); return -EPROTO; } @@ -593,7 +578,7 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi) /* Allocate SKB (OK even in IRQ context). */ skb = alloc_skb(len + 1, GFP_ATOMIC); if (!skb) { - dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n", + netdev_err(cfhsi->ndev, "%s: Out of memory !\n", __func__); cfhsi->rx_state.nfrms = nfrms; return -ENOMEM; @@ -608,7 +593,7 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi) skb->dev = cfhsi->ndev; /* - * We're called from a platform device, + * We're called in callback from HSI * and don't know the context we're running in. */ if (in_interrupt()) @@ -639,7 +624,7 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi) desc = (struct cfhsi_desc *)cfhsi->rx_buf; - dev_dbg(&cfhsi->ndev->dev, "%s\n", __func__); + netdev_dbg(cfhsi->ndev, "%s\n", __func__); if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) return; @@ -647,7 +632,7 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi) /* Update inactivity timer if pending. */ spin_lock_bh(&cfhsi->lock); mod_timer_pending(&cfhsi->inactivity_timer, - jiffies + cfhsi->inactivity_timeout); + jiffies + cfhsi->cfg.inactivity_timeout); spin_unlock_bh(&cfhsi->lock); if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) { @@ -680,12 +665,11 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi) if (desc_pld_len < 0) goto out_of_sync; - if (desc_pld_len > 0) + if (desc_pld_len > 0) { rx_len = desc_pld_len; - - if (desc_pld_len > 0 && - (piggy_desc->header & CFHSI_PIGGY_DESC)) - rx_len += CFHSI_DESC_SZ; + if (piggy_desc->header & CFHSI_PIGGY_DESC) + rx_len += CFHSI_DESC_SZ; + } /* * Copy needed information from the piggy-backed @@ -693,8 +677,6 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi) */ memcpy(rx_buf, (u8 *)piggy_desc, CFHSI_DESC_SHORT_SZ); - if (desc_pld_len == -EPROTO) - goto out_of_sync; } } @@ -710,13 +692,13 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi) /* Initiate next read */ if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) { /* Set up new transfer. */ - dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n", + netdev_dbg(cfhsi->ndev, "%s: Start RX.\n", __func__); - res = cfhsi->dev->cfhsi_rx(rx_ptr, rx_len, - cfhsi->dev); + res = cfhsi->ops->cfhsi_rx(rx_ptr, rx_len, + cfhsi->ops); if (WARN_ON(res < 0)) { - dev_err(&cfhsi->ndev->dev, "%s: RX error %d.\n", + netdev_err(cfhsi->ndev, "%s: RX error %d.\n", __func__, res); cfhsi->ndev->stats.rx_errors++; cfhsi->ndev->stats.rx_dropped++; @@ -753,7 +735,7 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi) return; out_of_sync: - dev_err(&cfhsi->ndev->dev, "%s: Out of sync.\n", __func__); + netdev_err(cfhsi->ndev, "%s: Out of sync.\n", __func__); print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE, cfhsi->rx_buf, CFHSI_DESC_SZ); schedule_work(&cfhsi->out_of_sync_work); @@ -763,18 +745,18 @@ static void cfhsi_rx_slowpath(unsigned long arg) { struct cfhsi *cfhsi = (struct cfhsi *)arg; - dev_dbg(&cfhsi->ndev->dev, "%s.\n", + netdev_dbg(cfhsi->ndev, "%s.\n", __func__); cfhsi_rx_done(cfhsi); } -static void cfhsi_rx_done_cb(struct cfhsi_drv *drv) +static void cfhsi_rx_done_cb(struct cfhsi_cb_ops *cb_ops) { struct cfhsi *cfhsi; - cfhsi = container_of(drv, struct cfhsi, drv); - dev_dbg(&cfhsi->ndev->dev, "%s.\n", + cfhsi = container_of(cb_ops, struct cfhsi, cb_ops); + netdev_dbg(cfhsi->ndev, "%s.\n", __func__); if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) @@ -807,9 +789,9 @@ static void cfhsi_wake_up(struct work_struct *work) } /* Activate wake line. */ - cfhsi->dev->cfhsi_wake_up(cfhsi->dev); + cfhsi->ops->cfhsi_wake_up(cfhsi->ops); - dev_dbg(&cfhsi->ndev->dev, "%s: Start waiting.\n", + netdev_dbg(cfhsi->ndev, "%s: Start waiting.\n", __func__); /* Wait for acknowledge. */ @@ -819,33 +801,33 @@ static void cfhsi_wake_up(struct work_struct *work) &cfhsi->bits), ret); if (unlikely(ret < 0)) { /* Interrupted by signal. */ - dev_err(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n", + netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n", __func__, ret); clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); - cfhsi->dev->cfhsi_wake_down(cfhsi->dev); + cfhsi->ops->cfhsi_wake_down(cfhsi->ops); return; } else if (!ret) { bool ca_wake = false; size_t fifo_occupancy = 0; /* Wakeup timeout */ - dev_dbg(&cfhsi->ndev->dev, "%s: Timeout.\n", + netdev_dbg(cfhsi->ndev, "%s: Timeout.\n", __func__); /* Check FIFO to check if modem has sent something. */ - WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev, + WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops, &fifo_occupancy)); - dev_dbg(&cfhsi->ndev->dev, "%s: Bytes in FIFO: %u.\n", + netdev_dbg(cfhsi->ndev, "%s: Bytes in FIFO: %u.\n", __func__, (unsigned) fifo_occupancy); /* Check if we misssed the interrupt. */ - WARN_ON(cfhsi->dev->cfhsi_get_peer_wake(cfhsi->dev, + WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops, &ca_wake)); if (ca_wake) { - dev_err(&cfhsi->ndev->dev, "%s: CA Wake missed !.\n", + netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n", __func__); /* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */ @@ -856,11 +838,11 @@ static void cfhsi_wake_up(struct work_struct *work) } clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); - cfhsi->dev->cfhsi_wake_down(cfhsi->dev); + cfhsi->ops->cfhsi_wake_down(cfhsi->ops); return; } wake_ack: - dev_dbg(&cfhsi->ndev->dev, "%s: Woken.\n", + netdev_dbg(cfhsi->ndev, "%s: Woken.\n", __func__); /* Clear power up bit. */ @@ -868,11 +850,11 @@ wake_ack: clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); /* Resume read operation. */ - dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n", __func__); - res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->dev); + netdev_dbg(cfhsi->ndev, "%s: Start RX.\n", __func__); + res = cfhsi->ops->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->ops); if (WARN_ON(res < 0)) - dev_err(&cfhsi->ndev->dev, "%s: RX err %d.\n", __func__, res); + netdev_err(cfhsi->ndev, "%s: RX err %d.\n", __func__, res); /* Clear power up acknowledment. */ clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); @@ -881,16 +863,16 @@ wake_ack: /* Resume transmit if queues are not empty. */ if (!cfhsi_tx_queue_len(cfhsi)) { - dev_dbg(&cfhsi->ndev->dev, "%s: Peer wake, start timer.\n", + netdev_dbg(cfhsi->ndev, "%s: Peer wake, start timer.\n", __func__); /* Start inactivity timer. */ mod_timer(&cfhsi->inactivity_timer, - jiffies + cfhsi->inactivity_timeout); + jiffies + cfhsi->cfg.inactivity_timeout); spin_unlock_bh(&cfhsi->lock); return; } - dev_dbg(&cfhsi->ndev->dev, "%s: Host wake.\n", + netdev_dbg(cfhsi->ndev, "%s: Host wake.\n", __func__); spin_unlock_bh(&cfhsi->lock); @@ -900,14 +882,14 @@ wake_ack: if (likely(len > 0)) { /* Set up new transfer. */ - res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev); + res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops); if (WARN_ON(res < 0)) { - dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n", + netdev_err(cfhsi->ndev, "%s: TX error %d.\n", __func__, res); cfhsi_abort_tx(cfhsi); } } else { - dev_err(&cfhsi->ndev->dev, + netdev_err(cfhsi->ndev, "%s: Failed to create HSI frame: %d.\n", __func__, len); } @@ -921,13 +903,13 @@ static void cfhsi_wake_down(struct work_struct *work) int retry = CFHSI_WAKE_TOUT; cfhsi = container_of(work, struct cfhsi, wake_down_work); - dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__); + netdev_dbg(cfhsi->ndev, "%s.\n", __func__); if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) return; /* Deactivate wake line. */ - cfhsi->dev->cfhsi_wake_down(cfhsi->dev); + cfhsi->ops->cfhsi_wake_down(cfhsi->ops); /* Wait for acknowledge. */ ret = CFHSI_WAKE_TOUT; @@ -936,26 +918,26 @@ static void cfhsi_wake_down(struct work_struct *work) &cfhsi->bits), ret); if (ret < 0) { /* Interrupted by signal. */ - dev_err(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n", + netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n", __func__, ret); return; } else if (!ret) { bool ca_wake = true; /* Timeout */ - dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n", __func__); + netdev_err(cfhsi->ndev, "%s: Timeout.\n", __func__); /* Check if we misssed the interrupt. */ - WARN_ON(cfhsi->dev->cfhsi_get_peer_wake(cfhsi->dev, + WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops, &ca_wake)); if (!ca_wake) - dev_err(&cfhsi->ndev->dev, "%s: CA Wake missed !.\n", + netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n", __func__); } /* Check FIFO occupancy. */ while (retry) { - WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev, + WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops, &fifo_occupancy)); if (!fifo_occupancy) @@ -967,14 +949,13 @@ static void cfhsi_wake_down(struct work_struct *work) } if (!retry) - dev_err(&cfhsi->ndev->dev, "%s: FIFO Timeout.\n", __func__); + netdev_err(cfhsi->ndev, "%s: FIFO Timeout.\n", __func__); /* Clear AWAKE condition. */ clear_bit(CFHSI_AWAKE, &cfhsi->bits); /* Cancel pending RX requests. */ - cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev); - + cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops); } static void cfhsi_out_of_sync(struct work_struct *work) @@ -988,12 +969,12 @@ static void cfhsi_out_of_sync(struct work_struct *work) rtnl_unlock(); } -static void cfhsi_wake_up_cb(struct cfhsi_drv *drv) +static void cfhsi_wake_up_cb(struct cfhsi_cb_ops *cb_ops) { struct cfhsi *cfhsi = NULL; - cfhsi = container_of(drv, struct cfhsi, drv); - dev_dbg(&cfhsi->ndev->dev, "%s.\n", + cfhsi = container_of(cb_ops, struct cfhsi, cb_ops); + netdev_dbg(cfhsi->ndev, "%s.\n", __func__); set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); @@ -1007,12 +988,12 @@ static void cfhsi_wake_up_cb(struct cfhsi_drv *drv) queue_work(cfhsi->wq, &cfhsi->wake_up_work); } -static void cfhsi_wake_down_cb(struct cfhsi_drv *drv) +static void cfhsi_wake_down_cb(struct cfhsi_cb_ops *cb_ops) { struct cfhsi *cfhsi = NULL; - cfhsi = container_of(drv, struct cfhsi, drv); - dev_dbg(&cfhsi->ndev->dev, "%s.\n", + cfhsi = container_of(cb_ops, struct cfhsi, cb_ops); + netdev_dbg(cfhsi->ndev, "%s.\n", __func__); /* Initiating low power is only permitted by the host (us). */ @@ -1024,7 +1005,7 @@ static void cfhsi_aggregation_tout(unsigned long arg) { struct cfhsi *cfhsi = (struct cfhsi *)arg; - dev_dbg(&cfhsi->ndev->dev, "%s.\n", + netdev_dbg(cfhsi->ndev, "%s.\n", __func__); cfhsi_start_tx(cfhsi); @@ -1077,7 +1058,7 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev) /* Send flow off if number of packets is above high water mark. */ if (!cfhsi->flow_off_sent && - cfhsi_tx_queue_len(cfhsi) > cfhsi->q_high_mark && + cfhsi_tx_queue_len(cfhsi) > cfhsi->cfg.q_high_mark && cfhsi->cfdev.flowctrl) { cfhsi->flow_off_sent = 1; cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF); @@ -1114,9 +1095,9 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev) WARN_ON(!len); /* Set up new transfer. */ - res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev); + res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops); if (WARN_ON(res < 0)) { - dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n", + netdev_err(cfhsi->ndev, "%s: TX error %d.\n", __func__, res); cfhsi_abort_tx(cfhsi); } @@ -1129,19 +1110,19 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev) return 0; } -static const struct net_device_ops cfhsi_ops; +static const struct net_device_ops cfhsi_netdevops; static void cfhsi_setup(struct net_device *dev) { int i; struct cfhsi *cfhsi = netdev_priv(dev); dev->features = 0; - dev->netdev_ops = &cfhsi_ops; dev->type = ARPHRD_CAIF; dev->flags = IFF_POINTOPOINT | IFF_NOARP; dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ; dev->tx_queue_len = 0; dev->destructor = free_netdev; + dev->netdev_ops = &cfhsi_netdevops; for (i = 0; i < CFHSI_PRIO_LAST; ++i) skb_queue_head_init(&cfhsi->qhead[i]); cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW; @@ -1149,43 +1130,7 @@ static void cfhsi_setup(struct net_device *dev) cfhsi->cfdev.use_stx = false; cfhsi->cfdev.use_fcs = false; cfhsi->ndev = dev; -} - -int cfhsi_probe(struct platform_device *pdev) -{ - struct cfhsi *cfhsi = NULL; - struct net_device *ndev; - - int res; - - ndev = alloc_netdev(sizeof(struct cfhsi), "cfhsi%d", cfhsi_setup); - if (!ndev) - return -ENODEV; - - cfhsi = netdev_priv(ndev); - cfhsi->ndev = ndev; - cfhsi->pdev = pdev; - - /* Assign the HSI device. */ - cfhsi->dev = pdev->dev.platform_data; - - /* Assign the driver to this HSI device. */ - cfhsi->dev->drv = &cfhsi->drv; - - /* Register network device. */ - res = register_netdev(ndev); - if (res) { - dev_err(&ndev->dev, "%s: Registration error: %d.\n", - __func__, res); - free_netdev(ndev); - return -ENODEV; - } - /* Add CAIF HSI device to list. */ - spin_lock(&cfhsi_list_lock); - list_add_tail(&cfhsi->list, &cfhsi_list); - spin_unlock(&cfhsi_list_lock); - - return res; + cfhsi->cfg = hsi_default_config; } static int cfhsi_open(struct net_device *ndev) @@ -1201,9 +1146,6 @@ static int cfhsi_open(struct net_device *ndev) /* Set flow info */ cfhsi->flow_off_sent = 0; - cfhsi->q_low_mark = LOW_WATER_MARK; - cfhsi->q_high_mark = HIGH_WATER_MARK; - /* * Allocate a TX buffer with the size of a HSI packet descriptors @@ -1231,20 +1173,8 @@ static int cfhsi_open(struct net_device *ndev) goto err_alloc_rx_flip; } - /* Pre-calculate inactivity timeout. */ - if (inactivity_timeout != -1) { - cfhsi->inactivity_timeout = - inactivity_timeout * HZ / 1000; - if (!cfhsi->inactivity_timeout) - cfhsi->inactivity_timeout = 1; - else if (cfhsi->inactivity_timeout > NEXT_TIMER_MAX_DELTA) - cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA; - } else { - cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA; - } - /* Initialize aggregation timeout */ - cfhsi->aggregation_timeout = aggregation_timeout; + cfhsi->cfg.aggregation_timeout = hsi_default_config.aggregation_timeout; /* Initialize recieve vaiables. */ cfhsi->rx_ptr = cfhsi->rx_buf; @@ -1254,10 +1184,10 @@ static int cfhsi_open(struct net_device *ndev) spin_lock_init(&cfhsi->lock); /* Set up the driver. */ - cfhsi->drv.tx_done_cb = cfhsi_tx_done_cb; - cfhsi->drv.rx_done_cb = cfhsi_rx_done_cb; - cfhsi->drv.wake_up_cb = cfhsi_wake_up_cb; - cfhsi->drv.wake_down_cb = cfhsi_wake_down_cb; + cfhsi->cb_ops.tx_done_cb = cfhsi_tx_done_cb; + cfhsi->cb_ops.rx_done_cb = cfhsi_rx_done_cb; + cfhsi->cb_ops.wake_up_cb = cfhsi_wake_up_cb; + cfhsi->cb_ops.wake_down_cb = cfhsi_wake_down_cb; /* Initialize the work queues. */ INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up); @@ -1271,9 +1201,9 @@ static int cfhsi_open(struct net_device *ndev) clear_bit(CFHSI_AWAKE, &cfhsi->bits); /* Create work thread. */ - cfhsi->wq = create_singlethread_workqueue(cfhsi->pdev->name); + cfhsi->wq = create_singlethread_workqueue(cfhsi->ndev->name); if (!cfhsi->wq) { - dev_err(&cfhsi->ndev->dev, "%s: Failed to create work queue.\n", + netdev_err(cfhsi->ndev, "%s: Failed to create work queue.\n", __func__); res = -ENODEV; goto err_create_wq; @@ -1298,9 +1228,9 @@ static int cfhsi_open(struct net_device *ndev) cfhsi->aggregation_timer.function = cfhsi_aggregation_tout; /* Activate HSI interface. */ - res = cfhsi->dev->cfhsi_up(cfhsi->dev); + res = cfhsi->ops->cfhsi_up(cfhsi->ops); if (res) { - dev_err(&cfhsi->ndev->dev, + netdev_err(cfhsi->ndev, "%s: can't activate HSI interface: %d.\n", __func__, res); goto err_activate; @@ -1309,14 +1239,14 @@ static int cfhsi_open(struct net_device *ndev) /* Flush FIFO */ res = cfhsi_flush_fifo(cfhsi); if (res) { - dev_err(&cfhsi->ndev->dev, "%s: Can't flush FIFO: %d.\n", + netdev_err(cfhsi->ndev, "%s: Can't flush FIFO: %d.\n", __func__, res); goto err_net_reg; } return res; err_net_reg: - cfhsi->dev->cfhsi_down(cfhsi->dev); + cfhsi->ops->cfhsi_down(cfhsi->ops); err_activate: destroy_workqueue(cfhsi->wq); err_create_wq: @@ -1346,7 +1276,7 @@ static int cfhsi_close(struct net_device *ndev) del_timer_sync(&cfhsi->aggregation_timer); /* Cancel pending RX request (if any) */ - cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev); + cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops); /* Destroy workqueue */ destroy_workqueue(cfhsi->wq); @@ -1359,7 +1289,7 @@ static int cfhsi_close(struct net_device *ndev) cfhsi_abort_tx(cfhsi); /* Deactivate interface */ - cfhsi->dev->cfhsi_down(cfhsi->dev); + cfhsi->ops->cfhsi_down(cfhsi->ops); /* Free buffers. */ kfree(tx_buf); @@ -1368,85 +1298,184 @@ static int cfhsi_close(struct net_device *ndev) return 0; } -static const struct net_device_ops cfhsi_ops = { +static void cfhsi_uninit(struct net_device *dev) +{ + struct cfhsi *cfhsi = netdev_priv(dev); + ASSERT_RTNL(); + symbol_put(cfhsi_get_device); + list_del(&cfhsi->list); +} + +static const struct net_device_ops cfhsi_netdevops = { + .ndo_uninit = cfhsi_uninit, .ndo_open = cfhsi_open, .ndo_stop = cfhsi_close, .ndo_start_xmit = cfhsi_xmit }; -int cfhsi_remove(struct platform_device *pdev) +static void cfhsi_netlink_parms(struct nlattr *data[], struct cfhsi *cfhsi) { - struct list_head *list_node; - struct list_head *n; - struct cfhsi *cfhsi = NULL; - struct cfhsi_dev *dev; + int i; - dev = (struct cfhsi_dev *)pdev->dev.platform_data; - spin_lock(&cfhsi_list_lock); - list_for_each_safe(list_node, n, &cfhsi_list) { - cfhsi = list_entry(list_node, struct cfhsi, list); - /* Find the corresponding device. */ - if (cfhsi->dev == dev) { - /* Remove from list. */ - list_del(list_node); - spin_unlock(&cfhsi_list_lock); - return 0; - } + if (!data) { + pr_debug("no params data found\n"); + return; } - spin_unlock(&cfhsi_list_lock); - return -ENODEV; + + i = __IFLA_CAIF_HSI_INACTIVITY_TOUT; + /* + * Inactivity timeout in millisecs. Lowest possible value is 1, + * and highest possible is NEXT_TIMER_MAX_DELTA. + */ + if (data[i]) { + u32 inactivity_timeout = nla_get_u32(data[i]); + /* Pre-calculate inactivity timeout. */ + cfhsi->cfg.inactivity_timeout = inactivity_timeout * HZ / 1000; + if (cfhsi->cfg.inactivity_timeout == 0) + cfhsi->cfg.inactivity_timeout = 1; + else if (cfhsi->cfg.inactivity_timeout > NEXT_TIMER_MAX_DELTA) + cfhsi->cfg.inactivity_timeout = NEXT_TIMER_MAX_DELTA; + } + + i = __IFLA_CAIF_HSI_AGGREGATION_TOUT; + if (data[i]) + cfhsi->cfg.aggregation_timeout = nla_get_u32(data[i]); + + i = __IFLA_CAIF_HSI_HEAD_ALIGN; + if (data[i]) + cfhsi->cfg.head_align = nla_get_u32(data[i]); + + i = __IFLA_CAIF_HSI_TAIL_ALIGN; + if (data[i]) + cfhsi->cfg.tail_align = nla_get_u32(data[i]); + + i = __IFLA_CAIF_HSI_QHIGH_WATERMARK; + if (data[i]) + cfhsi->cfg.q_high_mark = nla_get_u32(data[i]); + + i = __IFLA_CAIF_HSI_QLOW_WATERMARK; + if (data[i]) + cfhsi->cfg.q_low_mark = nla_get_u32(data[i]); +} + +static int caif_hsi_changelink(struct net_device *dev, struct nlattr *tb[], + struct nlattr *data[]) +{ + cfhsi_netlink_parms(data, netdev_priv(dev)); + netdev_state_change(dev); + return 0; } -struct platform_driver cfhsi_plat_drv = { - .probe = cfhsi_probe, - .remove = cfhsi_remove, - .driver = { - .name = "cfhsi", - .owner = THIS_MODULE, - }, +static const struct nla_policy caif_hsi_policy[__IFLA_CAIF_HSI_MAX + 1] = { + [__IFLA_CAIF_HSI_INACTIVITY_TOUT] = { .type = NLA_U32, .len = 4 }, + [__IFLA_CAIF_HSI_AGGREGATION_TOUT] = { .type = NLA_U32, .len = 4 }, + [__IFLA_CAIF_HSI_HEAD_ALIGN] = { .type = NLA_U32, .len = 4 }, + [__IFLA_CAIF_HSI_TAIL_ALIGN] = { .type = NLA_U32, .len = 4 }, + [__IFLA_CAIF_HSI_QHIGH_WATERMARK] = { .type = NLA_U32, .len = 4 }, + [__IFLA_CAIF_HSI_QLOW_WATERMARK] = { .type = NLA_U32, .len = 4 }, }; -static void __exit cfhsi_exit_module(void) +static size_t caif_hsi_get_size(const struct net_device *dev) +{ + int i; + size_t s = 0; + for (i = __IFLA_CAIF_HSI_UNSPEC + 1; i < __IFLA_CAIF_HSI_MAX; i++) + s += nla_total_size(caif_hsi_policy[i].len); + return s; +} + +static int caif_hsi_fill_info(struct sk_buff *skb, const struct net_device *dev) +{ + struct cfhsi *cfhsi = netdev_priv(dev); + + if (nla_put_u32(skb, __IFLA_CAIF_HSI_INACTIVITY_TOUT, + cfhsi->cfg.inactivity_timeout) || + nla_put_u32(skb, __IFLA_CAIF_HSI_AGGREGATION_TOUT, + cfhsi->cfg.aggregation_timeout) || + nla_put_u32(skb, __IFLA_CAIF_HSI_HEAD_ALIGN, + cfhsi->cfg.head_align) || + nla_put_u32(skb, __IFLA_CAIF_HSI_TAIL_ALIGN, + cfhsi->cfg.tail_align) || + nla_put_u32(skb, __IFLA_CAIF_HSI_QHIGH_WATERMARK, + cfhsi->cfg.q_high_mark) || + nla_put_u32(skb, __IFLA_CAIF_HSI_QLOW_WATERMARK, + cfhsi->cfg.q_low_mark)) + return -EMSGSIZE; + + return 0; +} + +static int caif_hsi_newlink(struct net *src_net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[]) { - struct list_head *list_node; - struct list_head *n; struct cfhsi *cfhsi = NULL; + struct cfhsi_ops *(*get_ops)(void); - spin_lock(&cfhsi_list_lock); - list_for_each_safe(list_node, n, &cfhsi_list) { - cfhsi = list_entry(list_node, struct cfhsi, list); + ASSERT_RTNL(); - /* Remove from list. */ - list_del(list_node); - spin_unlock(&cfhsi_list_lock); + cfhsi = netdev_priv(dev); + cfhsi_netlink_parms(data, cfhsi); + dev_net_set(cfhsi->ndev, src_net); + + get_ops = symbol_get(cfhsi_get_ops); + if (!get_ops) { + pr_err("%s: failed to get the cfhsi_ops\n", __func__); + return -ENODEV; + } - unregister_netdevice(cfhsi->ndev); + /* Assign the HSI device. */ + cfhsi->ops = (*get_ops)(); + if (!cfhsi->ops) { + pr_err("%s: failed to get the cfhsi_ops\n", __func__); + goto err; + } - spin_lock(&cfhsi_list_lock); + /* Assign the driver to this HSI device. */ + cfhsi->ops->cb_ops = &cfhsi->cb_ops; + if (register_netdevice(dev)) { + pr_warn("%s: caif_hsi device registration failed\n", __func__); + goto err; } - spin_unlock(&cfhsi_list_lock); + /* Add CAIF HSI device to list. */ + list_add_tail(&cfhsi->list, &cfhsi_list); - /* Unregister platform driver. */ - platform_driver_unregister(&cfhsi_plat_drv); + return 0; +err: + symbol_put(cfhsi_get_ops); + return -ENODEV; } -static int __init cfhsi_init_module(void) +static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = { + .kind = "cfhsi", + .priv_size = sizeof(struct cfhsi), + .setup = cfhsi_setup, + .maxtype = __IFLA_CAIF_HSI_MAX, + .policy = caif_hsi_policy, + .newlink = caif_hsi_newlink, + .changelink = caif_hsi_changelink, + .get_size = caif_hsi_get_size, + .fill_info = caif_hsi_fill_info, +}; + +static void __exit cfhsi_exit_module(void) { - int result; + struct list_head *list_node; + struct list_head *n; + struct cfhsi *cfhsi; - /* Initialize spin lock. */ - spin_lock_init(&cfhsi_list_lock); + rtnl_link_unregister(&caif_hsi_link_ops); - /* Register platform driver. */ - result = platform_driver_register(&cfhsi_plat_drv); - if (result) { - printk(KERN_ERR "Could not register platform HSI driver: %d.\n", - result); - goto err_dev_register; + rtnl_lock(); + list_for_each_safe(list_node, n, &cfhsi_list) { + cfhsi = list_entry(list_node, struct cfhsi, list); + unregister_netdev(cfhsi->ndev); } + rtnl_unlock(); +} - err_dev_register: - return result; +static int __init cfhsi_init_module(void) +{ + return rtnl_link_register(&caif_hsi_link_ops); } module_init(cfhsi_init_module); diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c index 8a3054b8481..fc73865bb83 100644 --- a/drivers/net/caif/caif_serial.c +++ b/drivers/net/caif/caif_serial.c @@ -1,6 +1,6 @@ /* * Copyright (C) ST-Ericsson AB 2010 - * Author: Sjur Brendeland / sjur.brandeland@stericsson.com + * Author: Sjur Brendeland * License terms: GNU General Public License (GPL) version 2 */ @@ -21,7 +21,7 @@ #include <linux/debugfs.h> MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Sjur Brendeland<sjur.brandeland@stericsson.com>"); +MODULE_AUTHOR("Sjur Brendeland"); MODULE_DESCRIPTION("CAIF serial device TTY line discipline"); MODULE_LICENSE("GPL"); MODULE_ALIAS_LDISC(N_CAIF); @@ -35,8 +35,9 @@ MODULE_ALIAS_LDISC(N_CAIF); #define OFF 0 #define CAIF_MAX_MTU 4096 -/*This list is protected by the rtnl lock. */ +static DEFINE_SPINLOCK(ser_lock); static LIST_HEAD(ser_list); +static LIST_HEAD(ser_release_list); static bool ser_loop; module_param(ser_loop, bool, S_IRUGO); @@ -88,11 +89,9 @@ static inline void update_tty_status(struct ser_device *ser) { ser->tty_status = ser->tty->stopped << 5 | - ser->tty->hw_stopped << 4 | ser->tty->flow_stopped << 3 | ser->tty->packet << 2 | - ser->tty->low_latency << 1 | - ser->tty->warned; + ser->tty->port->low_latency << 1; } static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty) { @@ -205,7 +204,6 @@ static void ldisc_receive(struct tty_struct *tty, const u8 *data, skb->protocol = htons(ETH_P_CAIF); skb_reset_mac_header(skb); - skb->dev = ser->dev; debugfs_rx(ser, data, count); /* Push received packet up the stack. */ ret = netif_rx_ni(skb); @@ -310,6 +308,28 @@ static void ldisc_tx_wakeup(struct tty_struct *tty) } +static void ser_release(struct work_struct *work) +{ + struct list_head list; + struct ser_device *ser, *tmp; + + spin_lock(&ser_lock); + list_replace_init(&ser_release_list, &list); + spin_unlock(&ser_lock); + + if (!list_empty(&list)) { + rtnl_lock(); + list_for_each_entry_safe(ser, tmp, &list, node) { + dev_close(ser->dev); + unregister_netdevice(ser->dev); + debugfs_deinit(ser); + } + rtnl_unlock(); + } +} + +static DECLARE_WORK(ser_release_work, ser_release); + static int ldisc_open(struct tty_struct *tty) { struct ser_device *ser; @@ -323,8 +343,16 @@ static int ldisc_open(struct tty_struct *tty) if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_TTY_CONFIG)) return -EPERM; - sprintf(name, "cf%s", tty->name); + /* release devices to avoid name collision */ + ser_release(NULL); + + result = snprintf(name, sizeof(name), "cf%s", tty->name); + if (result >= IFNAMSIZ) + return -EINVAL; dev = alloc_netdev(sizeof(*ser), name, caifdev_setup); + if (!dev) + return -ENOMEM; + ser = netdev_priv(dev); ser->tty = tty_kref_get(tty); ser->dev = dev; @@ -340,7 +368,9 @@ static int ldisc_open(struct tty_struct *tty) return -ENODEV; } + spin_lock(&ser_lock); list_add(&ser->node, &ser_list); + spin_unlock(&ser_lock); rtnl_unlock(); netif_stop_queue(dev); update_tty_status(ser); @@ -350,19 +380,13 @@ static int ldisc_open(struct tty_struct *tty) static void ldisc_close(struct tty_struct *tty) { struct ser_device *ser = tty->disc_data; - /* Remove may be called inside or outside of rtnl_lock */ - int islocked = rtnl_is_locked(); - if (!islocked) - rtnl_lock(); - /* device is freed automagically by net-sysfs */ - dev_close(ser->dev); - unregister_netdevice(ser->dev); - list_del(&ser->node); - debugfs_deinit(ser); tty_kref_put(ser->tty); - if (!islocked) - rtnl_unlock(); + + spin_lock(&ser_lock); + list_move(&ser->node, &ser_release_list); + spin_unlock(&ser_lock); + schedule_work(&ser_release_work); } /* The line discipline structure. */ @@ -437,16 +461,11 @@ static int __init caif_ser_init(void) static void __exit caif_ser_exit(void) { - struct ser_device *ser = NULL; - struct list_head *node; - struct list_head *_tmp; - - list_for_each_safe(node, _tmp, &ser_list) { - ser = list_entry(node, struct ser_device, node); - dev_close(ser->dev); - unregister_netdevice(ser->dev); - list_del(node); - } + spin_lock(&ser_lock); + list_splice(&ser_list, &ser_release_list); + spin_unlock(&ser_lock); + ser_release(NULL); + cancel_work_sync(&ser_release_work); tty_unregister_ldisc(N_CAIF); debugfs_remove_recursive(debugfsdir); } diff --git a/drivers/net/caif/caif_shm_u5500.c b/drivers/net/caif/caif_shm_u5500.c deleted file mode 100644 index 89d76b7b325..00000000000 --- a/drivers/net/caif/caif_shm_u5500.c +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Copyright (C) ST-Ericsson AB 2010 - * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com - * Author: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com - * License terms: GNU General Public License (GPL) version 2 - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt - -#include <linux/init.h> -#include <linux/module.h> -#include <linux/netdevice.h> -#include <mach/mbox-db5500.h> -#include <net/caif/caif_shm.h> - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("CAIF Shared Memory protocol driver"); - -#define MAX_SHM_INSTANCES 1 - -enum { - MBX_ACC0, - MBX_ACC1, - MBX_DSP -}; - -static struct shmdev_layer shmdev_lyr[MAX_SHM_INSTANCES]; - -static unsigned int shm_start; -static unsigned int shm_size; - -module_param(shm_size, uint , 0440); -MODULE_PARM_DESC(shm_total_size, "Start of SHM shared memory"); - -module_param(shm_start, uint , 0440); -MODULE_PARM_DESC(shm_total_start, "Total Size of SHM shared memory"); - -static int shmdev_send_msg(u32 dev_id, u32 mbx_msg) -{ - /* Always block until msg is written successfully */ - mbox_send(shmdev_lyr[dev_id].hmbx, mbx_msg, true); - return 0; -} - -static int shmdev_mbx_setup(void *pshmdrv_cb, struct shmdev_layer *pshm_dev, - void *pshm_drv) -{ - /* - * For UX5500, we have only 1 SHM instance which uses MBX0 - * for communication with the peer modem - */ - pshm_dev->hmbx = mbox_setup(MBX_ACC0, pshmdrv_cb, pshm_drv); - - if (!pshm_dev->hmbx) - return -ENODEV; - else - return 0; -} - -static int __init caif_shmdev_init(void) -{ - int i, result; - - /* Loop is currently overkill, there is only one instance */ - for (i = 0; i < MAX_SHM_INSTANCES; i++) { - - shmdev_lyr[i].shm_base_addr = shm_start; - shmdev_lyr[i].shm_total_sz = shm_size; - - if (((char *)shmdev_lyr[i].shm_base_addr == NULL) - || (shmdev_lyr[i].shm_total_sz <= 0)) { - pr_warn("ERROR," - "Shared memory Address and/or Size incorrect" - ", Bailing out ...\n"); - result = -EINVAL; - goto clean; - } - - pr_info("SHM AREA (instance %d) STARTS" - " AT %p\n", i, (char *)shmdev_lyr[i].shm_base_addr); - - shmdev_lyr[i].shm_id = i; - shmdev_lyr[i].pshmdev_mbxsend = shmdev_send_msg; - shmdev_lyr[i].pshmdev_mbxsetup = shmdev_mbx_setup; - - /* - * Finally, CAIF core module is called with details in place: - * 1. SHM base address - * 2. SHM size - * 3. MBX handle - */ - result = caif_shmcore_probe(&shmdev_lyr[i]); - if (result) { - pr_warn("ERROR[%d]," - "Could not probe SHM core (instance %d)" - " Bailing out ...\n", result, i); - goto clean; - } - } - - return 0; - -clean: - /* - * For now, we assume that even if one instance of SHM fails, we bail - * out of the driver support completely. For this, we need to release - * any memory allocated and unregister any instance of SHM net device. - */ - for (i = 0; i < MAX_SHM_INSTANCES; i++) { - if (shmdev_lyr[i].pshm_netdev) - unregister_netdev(shmdev_lyr[i].pshm_netdev); - } - return result; -} - -static void __exit caif_shmdev_exit(void) -{ - int i; - - for (i = 0; i < MAX_SHM_INSTANCES; i++) { - caif_shmcore_remove(shmdev_lyr[i].pshm_netdev); - kfree((void *)shmdev_lyr[i].shm_base_addr); - } - -} - -module_init(caif_shmdev_init); -module_exit(caif_shmdev_exit); diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c deleted file mode 100644 index bc497d71885..00000000000 --- a/drivers/net/caif/caif_shmcore.c +++ /dev/null @@ -1,753 +0,0 @@ -/* - * Copyright (C) ST-Ericsson AB 2010 - * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com - * Authors: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com, - * Daniel Martensson / daniel.martensson@stericsson.com - * License terms: GNU General Public License (GPL) version 2 - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt - -#include <linux/spinlock.h> -#include <linux/sched.h> -#include <linux/list.h> -#include <linux/netdevice.h> -#include <linux/if_arp.h> -#include <linux/io.h> - -#include <net/caif/caif_device.h> -#include <net/caif/caif_shm.h> - -#define NR_TX_BUF 6 -#define NR_RX_BUF 6 -#define TX_BUF_SZ 0x2000 -#define RX_BUF_SZ 0x2000 - -#define CAIF_NEEDED_HEADROOM 32 - -#define CAIF_FLOW_ON 1 -#define CAIF_FLOW_OFF 0 - -#define LOW_WATERMARK 3 -#define HIGH_WATERMARK 4 - -/* Maximum number of CAIF buffers per shared memory buffer. */ -#define SHM_MAX_FRMS_PER_BUF 10 - -/* - * Size in bytes of the descriptor area - * (With end of descriptor signalling) - */ -#define SHM_CAIF_DESC_SIZE ((SHM_MAX_FRMS_PER_BUF + 1) * \ - sizeof(struct shm_pck_desc)) - -/* - * Offset to the first CAIF frame within a shared memory buffer. - * Aligned on 32 bytes. - */ -#define SHM_CAIF_FRM_OFS (SHM_CAIF_DESC_SIZE + (SHM_CAIF_DESC_SIZE % 32)) - -/* Number of bytes for CAIF shared memory header. */ -#define SHM_HDR_LEN 1 - -/* Number of padding bytes for the complete CAIF frame. */ -#define SHM_FRM_PAD_LEN 4 - -#define CAIF_MAX_MTU 4096 - -#define SHM_SET_FULL(x) (((x+1) & 0x0F) << 0) -#define SHM_GET_FULL(x) (((x >> 0) & 0x0F) - 1) - -#define SHM_SET_EMPTY(x) (((x+1) & 0x0F) << 4) -#define SHM_GET_EMPTY(x) (((x >> 4) & 0x0F) - 1) - -#define SHM_FULL_MASK (0x0F << 0) -#define SHM_EMPTY_MASK (0x0F << 4) - -struct shm_pck_desc { - /* - * Offset from start of shared memory area to start of - * shared memory CAIF frame. - */ - u32 frm_ofs; - u32 frm_len; -}; - -struct buf_list { - unsigned char *desc_vptr; - u32 phy_addr; - u32 index; - u32 len; - u32 frames; - u32 frm_ofs; - struct list_head list; -}; - -struct shm_caif_frm { - /* Number of bytes of padding before the CAIF frame. */ - u8 hdr_ofs; -}; - -struct shmdrv_layer { - /* caif_dev_common must always be first in the structure*/ - struct caif_dev_common cfdev; - - u32 shm_tx_addr; - u32 shm_rx_addr; - u32 shm_base_addr; - u32 tx_empty_available; - spinlock_t lock; - - struct list_head tx_empty_list; - struct list_head tx_pend_list; - struct list_head tx_full_list; - struct list_head rx_empty_list; - struct list_head rx_pend_list; - struct list_head rx_full_list; - - struct workqueue_struct *pshm_tx_workqueue; - struct workqueue_struct *pshm_rx_workqueue; - - struct work_struct shm_tx_work; - struct work_struct shm_rx_work; - - struct sk_buff_head sk_qhead; - struct shmdev_layer *pshm_dev; -}; - -static int shm_netdev_open(struct net_device *shm_netdev) -{ - netif_wake_queue(shm_netdev); - return 0; -} - -static int shm_netdev_close(struct net_device *shm_netdev) -{ - netif_stop_queue(shm_netdev); - return 0; -} - -int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv) -{ - struct buf_list *pbuf; - struct shmdrv_layer *pshm_drv; - struct list_head *pos; - u32 avail_emptybuff = 0; - unsigned long flags = 0; - - pshm_drv = priv; - - /* Check for received buffers. */ - if (mbx_msg & SHM_FULL_MASK) { - int idx; - - spin_lock_irqsave(&pshm_drv->lock, flags); - - /* Check whether we have any outstanding buffers. */ - if (list_empty(&pshm_drv->rx_empty_list)) { - - /* Release spin lock. */ - spin_unlock_irqrestore(&pshm_drv->lock, flags); - - /* We print even in IRQ context... */ - pr_warn("No empty Rx buffers to fill: " - "mbx_msg:%x\n", mbx_msg); - - /* Bail out. */ - goto err_sync; - } - - pbuf = - list_entry(pshm_drv->rx_empty_list.next, - struct buf_list, list); - idx = pbuf->index; - - /* Check buffer synchronization. */ - if (idx != SHM_GET_FULL(mbx_msg)) { - - /* We print even in IRQ context... */ - pr_warn( - "phyif_shm_mbx_msg_cb: RX full out of sync:" - " idx:%d, msg:%x SHM_GET_FULL(mbx_msg):%x\n", - idx, mbx_msg, SHM_GET_FULL(mbx_msg)); - - spin_unlock_irqrestore(&pshm_drv->lock, flags); - - /* Bail out. */ - goto err_sync; - } - - list_del_init(&pbuf->list); - list_add_tail(&pbuf->list, &pshm_drv->rx_full_list); - - spin_unlock_irqrestore(&pshm_drv->lock, flags); - - /* Schedule RX work queue. */ - if (!work_pending(&pshm_drv->shm_rx_work)) - queue_work(pshm_drv->pshm_rx_workqueue, - &pshm_drv->shm_rx_work); - } - - /* Check for emptied buffers. */ - if (mbx_msg & SHM_EMPTY_MASK) { - int idx; - - spin_lock_irqsave(&pshm_drv->lock, flags); - - /* Check whether we have any outstanding buffers. */ - if (list_empty(&pshm_drv->tx_full_list)) { - - /* We print even in IRQ context... */ - pr_warn("No TX to empty: msg:%x\n", mbx_msg); - - spin_unlock_irqrestore(&pshm_drv->lock, flags); - - /* Bail out. */ - goto err_sync; - } - - pbuf = - list_entry(pshm_drv->tx_full_list.next, - struct buf_list, list); - idx = pbuf->index; - - /* Check buffer synchronization. */ - if (idx != SHM_GET_EMPTY(mbx_msg)) { - - spin_unlock_irqrestore(&pshm_drv->lock, flags); - - /* We print even in IRQ context... */ - pr_warn("TX empty " - "out of sync:idx:%d, msg:%x\n", idx, mbx_msg); - - /* Bail out. */ - goto err_sync; - } - list_del_init(&pbuf->list); - - /* Reset buffer parameters. */ - pbuf->frames = 0; - pbuf->frm_ofs = SHM_CAIF_FRM_OFS; - - list_add_tail(&pbuf->list, &pshm_drv->tx_empty_list); - - /* Check the available no. of buffers in the empty list */ - list_for_each(pos, &pshm_drv->tx_empty_list) - avail_emptybuff++; - - /* Check whether we have to wake up the transmitter. */ - if ((avail_emptybuff > HIGH_WATERMARK) && - (!pshm_drv->tx_empty_available)) { - pshm_drv->tx_empty_available = 1; - spin_unlock_irqrestore(&pshm_drv->lock, flags); - pshm_drv->cfdev.flowctrl - (pshm_drv->pshm_dev->pshm_netdev, - CAIF_FLOW_ON); - - - /* Schedule the work queue. if required */ - if (!work_pending(&pshm_drv->shm_tx_work)) - queue_work(pshm_drv->pshm_tx_workqueue, - &pshm_drv->shm_tx_work); - } else - spin_unlock_irqrestore(&pshm_drv->lock, flags); - } - - return 0; - -err_sync: - return -EIO; -} - -static void shm_rx_work_func(struct work_struct *rx_work) -{ - struct shmdrv_layer *pshm_drv; - struct buf_list *pbuf; - unsigned long flags = 0; - struct sk_buff *skb; - char *p; - int ret; - - pshm_drv = container_of(rx_work, struct shmdrv_layer, shm_rx_work); - - while (1) { - - struct shm_pck_desc *pck_desc; - - spin_lock_irqsave(&pshm_drv->lock, flags); - - /* Check for received buffers. */ - if (list_empty(&pshm_drv->rx_full_list)) { - spin_unlock_irqrestore(&pshm_drv->lock, flags); - break; - } - - pbuf = - list_entry(pshm_drv->rx_full_list.next, struct buf_list, - list); - list_del_init(&pbuf->list); - spin_unlock_irqrestore(&pshm_drv->lock, flags); - - /* Retrieve pointer to start of the packet descriptor area. */ - pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr; - - /* - * Check whether descriptor contains a CAIF shared memory - * frame. - */ - while (pck_desc->frm_ofs) { - unsigned int frm_buf_ofs; - unsigned int frm_pck_ofs; - unsigned int frm_pck_len; - /* - * Check whether offset is within buffer limits - * (lower). - */ - if (pck_desc->frm_ofs < - (pbuf->phy_addr - pshm_drv->shm_base_addr)) - break; - /* - * Check whether offset is within buffer limits - * (higher). - */ - if (pck_desc->frm_ofs > - ((pbuf->phy_addr - pshm_drv->shm_base_addr) + - pbuf->len)) - break; - - /* Calculate offset from start of buffer. */ - frm_buf_ofs = - pck_desc->frm_ofs - (pbuf->phy_addr - - pshm_drv->shm_base_addr); - - /* - * Calculate offset and length of CAIF packet while - * taking care of the shared memory header. - */ - frm_pck_ofs = - frm_buf_ofs + SHM_HDR_LEN + - (*(pbuf->desc_vptr + frm_buf_ofs)); - frm_pck_len = - (pck_desc->frm_len - SHM_HDR_LEN - - (*(pbuf->desc_vptr + frm_buf_ofs))); - - /* Check whether CAIF packet is within buffer limits */ - if ((frm_pck_ofs + pck_desc->frm_len) > pbuf->len) - break; - - /* Get a suitable CAIF packet and copy in data. */ - skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev, - frm_pck_len + 1); - - if (skb == NULL) { - pr_info("OOM: Try next frame in descriptor\n"); - break; - } - - p = skb_put(skb, frm_pck_len); - memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len); - - skb->protocol = htons(ETH_P_CAIF); - skb_reset_mac_header(skb); - skb->dev = pshm_drv->pshm_dev->pshm_netdev; - - /* Push received packet up the stack. */ - ret = netif_rx_ni(skb); - - if (!ret) { - pshm_drv->pshm_dev->pshm_netdev->stats. - rx_packets++; - pshm_drv->pshm_dev->pshm_netdev->stats. - rx_bytes += pck_desc->frm_len; - } else - ++pshm_drv->pshm_dev->pshm_netdev->stats. - rx_dropped; - /* Move to next packet descriptor. */ - pck_desc++; - } - - spin_lock_irqsave(&pshm_drv->lock, flags); - list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list); - - spin_unlock_irqrestore(&pshm_drv->lock, flags); - - } - - /* Schedule the work queue. if required */ - if (!work_pending(&pshm_drv->shm_tx_work)) - queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work); - -} - -static void shm_tx_work_func(struct work_struct *tx_work) -{ - u32 mbox_msg; - unsigned int frmlen, avail_emptybuff, append = 0; - unsigned long flags = 0; - struct buf_list *pbuf = NULL; - struct shmdrv_layer *pshm_drv; - struct shm_caif_frm *frm; - struct sk_buff *skb; - struct shm_pck_desc *pck_desc; - struct list_head *pos; - - pshm_drv = container_of(tx_work, struct shmdrv_layer, shm_tx_work); - - do { - /* Initialize mailbox message. */ - mbox_msg = 0x00; - avail_emptybuff = 0; - - spin_lock_irqsave(&pshm_drv->lock, flags); - - /* Check for pending receive buffers. */ - if (!list_empty(&pshm_drv->rx_pend_list)) { - - pbuf = list_entry(pshm_drv->rx_pend_list.next, - struct buf_list, list); - - list_del_init(&pbuf->list); - list_add_tail(&pbuf->list, &pshm_drv->rx_empty_list); - /* - * Value index is never changed, - * so read access should be safe. - */ - mbox_msg |= SHM_SET_EMPTY(pbuf->index); - } - - skb = skb_peek(&pshm_drv->sk_qhead); - - if (skb == NULL) - goto send_msg; - /* Check the available no. of buffers in the empty list */ - list_for_each(pos, &pshm_drv->tx_empty_list) - avail_emptybuff++; - - if ((avail_emptybuff < LOW_WATERMARK) && - pshm_drv->tx_empty_available) { - /* Update blocking condition. */ - pshm_drv->tx_empty_available = 0; - spin_unlock_irqrestore(&pshm_drv->lock, flags); - pshm_drv->cfdev.flowctrl - (pshm_drv->pshm_dev->pshm_netdev, - CAIF_FLOW_OFF); - spin_lock_irqsave(&pshm_drv->lock, flags); - } - /* - * We simply return back to the caller if we do not have space - * either in Tx pending list or Tx empty list. In this case, - * we hold the received skb in the skb list, waiting to - * be transmitted once Tx buffers become available - */ - if (list_empty(&pshm_drv->tx_empty_list)) - goto send_msg; - - /* Get the first free Tx buffer. */ - pbuf = list_entry(pshm_drv->tx_empty_list.next, - struct buf_list, list); - do { - if (append) { - skb = skb_peek(&pshm_drv->sk_qhead); - if (skb == NULL) - break; - } - - frm = (struct shm_caif_frm *) - (pbuf->desc_vptr + pbuf->frm_ofs); - - frm->hdr_ofs = 0; - frmlen = 0; - frmlen += SHM_HDR_LEN + frm->hdr_ofs + skb->len; - - /* Add tail padding if needed. */ - if (frmlen % SHM_FRM_PAD_LEN) - frmlen += SHM_FRM_PAD_LEN - - (frmlen % SHM_FRM_PAD_LEN); - - /* - * Verify that packet, header and additional padding - * can fit within the buffer frame area. - */ - if (frmlen >= (pbuf->len - pbuf->frm_ofs)) - break; - - if (!append) { - list_del_init(&pbuf->list); - append = 1; - } - - skb = skb_dequeue(&pshm_drv->sk_qhead); - if (skb == NULL) - break; - /* Copy in CAIF frame. */ - skb_copy_bits(skb, 0, pbuf->desc_vptr + - pbuf->frm_ofs + SHM_HDR_LEN + - frm->hdr_ofs, skb->len); - - pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++; - pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes += - frmlen; - dev_kfree_skb_irq(skb); - - /* Fill in the shared memory packet descriptor area. */ - pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr); - /* Forward to current frame. */ - pck_desc += pbuf->frames; - pck_desc->frm_ofs = (pbuf->phy_addr - - pshm_drv->shm_base_addr) + - pbuf->frm_ofs; - pck_desc->frm_len = frmlen; - /* Terminate packet descriptor area. */ - pck_desc++; - pck_desc->frm_ofs = 0; - /* Update buffer parameters. */ - pbuf->frames++; - pbuf->frm_ofs += frmlen + (frmlen % 32); - - } while (pbuf->frames < SHM_MAX_FRMS_PER_BUF); - - /* Assign buffer as full. */ - list_add_tail(&pbuf->list, &pshm_drv->tx_full_list); - append = 0; - mbox_msg |= SHM_SET_FULL(pbuf->index); -send_msg: - spin_unlock_irqrestore(&pshm_drv->lock, flags); - - if (mbox_msg) - pshm_drv->pshm_dev->pshmdev_mbxsend - (pshm_drv->pshm_dev->shm_id, mbox_msg); - } while (mbox_msg); -} - -static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev) -{ - struct shmdrv_layer *pshm_drv; - - pshm_drv = netdev_priv(shm_netdev); - - skb_queue_tail(&pshm_drv->sk_qhead, skb); - - /* Schedule Tx work queue. for deferred processing of skbs*/ - if (!work_pending(&pshm_drv->shm_tx_work)) - queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work); - - return 0; -} - -static const struct net_device_ops netdev_ops = { - .ndo_open = shm_netdev_open, - .ndo_stop = shm_netdev_close, - .ndo_start_xmit = shm_netdev_tx, -}; - -static void shm_netdev_setup(struct net_device *pshm_netdev) -{ - struct shmdrv_layer *pshm_drv; - pshm_netdev->netdev_ops = &netdev_ops; - - pshm_netdev->mtu = CAIF_MAX_MTU; - pshm_netdev->type = ARPHRD_CAIF; - pshm_netdev->hard_header_len = CAIF_NEEDED_HEADROOM; - pshm_netdev->tx_queue_len = 0; - pshm_netdev->destructor = free_netdev; - - pshm_drv = netdev_priv(pshm_netdev); - - /* Initialize structures in a clean state. */ - memset(pshm_drv, 0, sizeof(struct shmdrv_layer)); - - pshm_drv->cfdev.link_select = CAIF_LINK_LOW_LATENCY; -} - -int caif_shmcore_probe(struct shmdev_layer *pshm_dev) -{ - int result, j; - struct shmdrv_layer *pshm_drv = NULL; - - pshm_dev->pshm_netdev = alloc_netdev(sizeof(struct shmdrv_layer), - "cfshm%d", shm_netdev_setup); - if (!pshm_dev->pshm_netdev) - return -ENOMEM; - - pshm_drv = netdev_priv(pshm_dev->pshm_netdev); - pshm_drv->pshm_dev = pshm_dev; - - /* - * Initialization starts with the verification of the - * availability of MBX driver by calling its setup function. - * MBX driver must be available by this time for proper - * functioning of SHM driver. - */ - if ((pshm_dev->pshmdev_mbxsetup - (caif_shmdrv_rx_cb, pshm_dev, pshm_drv)) != 0) { - pr_warn("Could not config. SHM Mailbox," - " Bailing out.....\n"); - free_netdev(pshm_dev->pshm_netdev); - return -ENODEV; - } - - skb_queue_head_init(&pshm_drv->sk_qhead); - - pr_info("SHM DEVICE[%d] PROBED BY DRIVER, NEW SHM DRIVER" - " INSTANCE AT pshm_drv =0x%p\n", - pshm_drv->pshm_dev->shm_id, pshm_drv); - - if (pshm_dev->shm_total_sz < - (NR_TX_BUF * TX_BUF_SZ + NR_RX_BUF * RX_BUF_SZ)) { - - pr_warn("ERROR, Amount of available" - " Phys. SHM cannot accommodate current SHM " - "driver configuration, Bailing out ...\n"); - free_netdev(pshm_dev->pshm_netdev); - return -ENOMEM; - } - - pshm_drv->shm_base_addr = pshm_dev->shm_base_addr; - pshm_drv->shm_tx_addr = pshm_drv->shm_base_addr; - - if (pshm_dev->shm_loopback) - pshm_drv->shm_rx_addr = pshm_drv->shm_tx_addr; - else - pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr + - (NR_TX_BUF * TX_BUF_SZ); - - spin_lock_init(&pshm_drv->lock); - INIT_LIST_HEAD(&pshm_drv->tx_empty_list); - INIT_LIST_HEAD(&pshm_drv->tx_pend_list); - INIT_LIST_HEAD(&pshm_drv->tx_full_list); - - INIT_LIST_HEAD(&pshm_drv->rx_empty_list); - INIT_LIST_HEAD(&pshm_drv->rx_pend_list); - INIT_LIST_HEAD(&pshm_drv->rx_full_list); - - INIT_WORK(&pshm_drv->shm_tx_work, shm_tx_work_func); - INIT_WORK(&pshm_drv->shm_rx_work, shm_rx_work_func); - - pshm_drv->pshm_tx_workqueue = - create_singlethread_workqueue("shm_tx_work"); - pshm_drv->pshm_rx_workqueue = - create_singlethread_workqueue("shm_rx_work"); - - for (j = 0; j < NR_TX_BUF; j++) { - struct buf_list *tx_buf = - kmalloc(sizeof(struct buf_list), GFP_KERNEL); - - if (tx_buf == NULL) { - pr_warn("ERROR, Could not" - " allocate dynamic mem. for tx_buf," - " Bailing out ...\n"); - free_netdev(pshm_dev->pshm_netdev); - return -ENOMEM; - } - tx_buf->index = j; - tx_buf->phy_addr = pshm_drv->shm_tx_addr + (TX_BUF_SZ * j); - tx_buf->len = TX_BUF_SZ; - tx_buf->frames = 0; - tx_buf->frm_ofs = SHM_CAIF_FRM_OFS; - - if (pshm_dev->shm_loopback) - tx_buf->desc_vptr = (unsigned char *)tx_buf->phy_addr; - else - /* - * FIXME: the result of ioremap is not a pointer - arnd - */ - tx_buf->desc_vptr = - ioremap(tx_buf->phy_addr, TX_BUF_SZ); - - list_add_tail(&tx_buf->list, &pshm_drv->tx_empty_list); - } - - for (j = 0; j < NR_RX_BUF; j++) { - struct buf_list *rx_buf = - kmalloc(sizeof(struct buf_list), GFP_KERNEL); - - if (rx_buf == NULL) { - pr_warn("ERROR, Could not" - " allocate dynamic mem.for rx_buf," - " Bailing out ...\n"); - free_netdev(pshm_dev->pshm_netdev); - return -ENOMEM; - } - rx_buf->index = j; - rx_buf->phy_addr = pshm_drv->shm_rx_addr + (RX_BUF_SZ * j); - rx_buf->len = RX_BUF_SZ; - - if (pshm_dev->shm_loopback) - rx_buf->desc_vptr = (unsigned char *)rx_buf->phy_addr; - else - rx_buf->desc_vptr = - ioremap(rx_buf->phy_addr, RX_BUF_SZ); - list_add_tail(&rx_buf->list, &pshm_drv->rx_empty_list); - } - - pshm_drv->tx_empty_available = 1; - result = register_netdev(pshm_dev->pshm_netdev); - if (result) - pr_warn("ERROR[%d], SHM could not, " - "register with NW FRMWK Bailing out ...\n", result); - - return result; -} - -void caif_shmcore_remove(struct net_device *pshm_netdev) -{ - struct buf_list *pbuf; - struct shmdrv_layer *pshm_drv = NULL; - - pshm_drv = netdev_priv(pshm_netdev); - - while (!(list_empty(&pshm_drv->tx_pend_list))) { - pbuf = - list_entry(pshm_drv->tx_pend_list.next, - struct buf_list, list); - - list_del(&pbuf->list); - kfree(pbuf); - } - - while (!(list_empty(&pshm_drv->tx_full_list))) { - pbuf = - list_entry(pshm_drv->tx_full_list.next, - struct buf_list, list); - list_del(&pbuf->list); - kfree(pbuf); - } - - while (!(list_empty(&pshm_drv->tx_empty_list))) { - pbuf = - list_entry(pshm_drv->tx_empty_list.next, - struct buf_list, list); - list_del(&pbuf->list); - kfree(pbuf); - } - - while (!(list_empty(&pshm_drv->rx_full_list))) { - pbuf = - list_entry(pshm_drv->tx_full_list.next, - struct buf_list, list); - list_del(&pbuf->list); - kfree(pbuf); - } - - while (!(list_empty(&pshm_drv->rx_pend_list))) { - pbuf = - list_entry(pshm_drv->tx_pend_list.next, - struct buf_list, list); - list_del(&pbuf->list); - kfree(pbuf); - } - - while (!(list_empty(&pshm_drv->rx_empty_list))) { - pbuf = - list_entry(pshm_drv->rx_empty_list.next, - struct buf_list, list); - list_del(&pbuf->list); - kfree(pbuf); - } - - /* Destroy work queues. */ - destroy_workqueue(pshm_drv->pshm_tx_workqueue); - destroy_workqueue(pshm_drv->pshm_rx_workqueue); - - unregister_netdev(pshm_netdev); -} diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c index b71ce9bf0af..ff54c0eb205 100644 --- a/drivers/net/caif/caif_spi.c +++ b/drivers/net/caif/caif_spi.c @@ -1,7 +1,6 @@ /* * Copyright (C) ST-Ericsson AB 2010 - * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com - * Author: Daniel Martensson / Daniel.Martensson@stericsson.com + * Author: Daniel Martensson * License terms: GNU General Public License (GPL) version 2. */ @@ -29,7 +28,7 @@ #endif /* CONFIG_CAIF_SPI_SYNC */ MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>"); +MODULE_AUTHOR("Daniel Martensson"); MODULE_DESCRIPTION("CAIF SPI driver"); /* Returns the number of padding bytes for alignment. */ @@ -555,7 +554,6 @@ int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len) skb->protocol = htons(ETH_P_CAIF); skb_reset_mac_header(skb); - skb->dev = cfspi->ndev; /* * Push received packet up the stack. @@ -864,6 +862,7 @@ static int __init cfspi_init_module(void) driver_remove_file(&cfspi_spi_driver.driver, &driver_attr_up_head_align); err_create_up_head_align: + platform_driver_unregister(&cfspi_spi_driver); err_dev_register: return result; } diff --git a/drivers/net/caif/caif_spi_slave.c b/drivers/net/caif/caif_spi_slave.c index e139e133fc7..39ba2f892ad 100644 --- a/drivers/net/caif/caif_spi_slave.c +++ b/drivers/net/caif/caif_spi_slave.c @@ -1,10 +1,8 @@ /* * Copyright (C) ST-Ericsson AB 2010 - * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com - * Author: Daniel Martensson / Daniel.Martensson@stericsson.com + * Author: Daniel Martensson * License terms: GNU General Public License (GPL) version 2. */ -#include <linux/init.h> #include <linux/module.h> #include <linux/device.h> #include <linux/platform_device.h> diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c new file mode 100644 index 00000000000..985608634f8 --- /dev/null +++ b/drivers/net/caif/caif_virtio.c @@ -0,0 +1,791 @@ +/* + * Copyright (C) ST-Ericsson AB 2013 + * Authors: Vicram Arv + * Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * Sjur Brendeland + * License terms: GNU General Public License (GPL) version 2 + */ +#include <linux/module.h> +#include <linux/if_arp.h> +#include <linux/virtio.h> +#include <linux/vringh.h> +#include <linux/debugfs.h> +#include <linux/spinlock.h> +#include <linux/genalloc.h> +#include <linux/interrupt.h> +#include <linux/netdevice.h> +#include <linux/rtnetlink.h> +#include <linux/virtio_ids.h> +#include <linux/virtio_caif.h> +#include <linux/virtio_ring.h> +#include <linux/dma-mapping.h> +#include <net/caif/caif_dev.h> +#include <linux/virtio_config.h> + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Vicram Arv"); +MODULE_AUTHOR("Sjur Brendeland"); +MODULE_DESCRIPTION("Virtio CAIF Driver"); + +/* NAPI schedule quota */ +#define CFV_DEFAULT_QUOTA 32 + +/* Defaults used if virtio config space is unavailable */ +#define CFV_DEF_MTU_SIZE 4096 +#define CFV_DEF_HEADROOM 32 +#define CFV_DEF_TAILROOM 32 + +/* Required IP header alignment */ +#define IP_HDR_ALIGN 4 + +/* struct cfv_napi_contxt - NAPI context info + * @riov: IOV holding data read from the ring. Note that riov may + * still hold data when cfv_rx_poll() returns. + * @head: Last descriptor ID we received from vringh_getdesc_kern. + * We use this to put descriptor back on the used ring. USHRT_MAX is + * used to indicate invalid head-id. + */ +struct cfv_napi_context { + struct vringh_kiov riov; + unsigned short head; +}; + +/* struct cfv_stats - statistics for debugfs + * @rx_napi_complete: Number of NAPI completions (RX) + * @rx_napi_resched: Number of calls where the full quota was used (RX) + * @rx_nomem: Number of SKB alloc failures (RX) + * @rx_kicks: Number of RX kicks + * @tx_full_ring: Number times TX ring was full + * @tx_no_mem: Number of times TX went out of memory + * @tx_flow_on: Number of flow on (TX) + * @tx_kicks: Number of TX kicks + */ +struct cfv_stats { + u32 rx_napi_complete; + u32 rx_napi_resched; + u32 rx_nomem; + u32 rx_kicks; + u32 tx_full_ring; + u32 tx_no_mem; + u32 tx_flow_on; + u32 tx_kicks; +}; + +/* struct cfv_info - Caif Virtio control structure + * @cfdev: caif common header + * @vdev: Associated virtio device + * @vr_rx: rx/downlink host vring + * @vq_tx: tx/uplink virtqueue + * @ndev: CAIF link layer device + * @watermark_tx: indicates number of free descriptors we need + * to reopen the tx-queues after overload. + * @tx_lock: protects vq_tx from concurrent use + * @tx_release_tasklet: Tasklet for freeing consumed TX buffers + * @napi: Napi context used in cfv_rx_poll() + * @ctx: Context data used in cfv_rx_poll() + * @tx_hr: transmit headroom + * @rx_hr: receive headroom + * @tx_tr: transmit tail room + * @rx_tr: receive tail room + * @mtu: transmit max size + * @mru: receive max size + * @allocsz: size of dma memory reserved for TX buffers + * @alloc_addr: virtual address to dma memory for TX buffers + * @alloc_dma: dma address to dma memory for TX buffers + * @genpool: Gen Pool used for allocating TX buffers + * @reserved_mem: Pointer to memory reserve allocated from genpool + * @reserved_size: Size of memory reserve allocated from genpool + * @stats: Statistics exposed in sysfs + * @debugfs: Debugfs dentry for statistic counters + */ +struct cfv_info { + struct caif_dev_common cfdev; + struct virtio_device *vdev; + struct vringh *vr_rx; + struct virtqueue *vq_tx; + struct net_device *ndev; + unsigned int watermark_tx; + /* Protect access to vq_tx */ + spinlock_t tx_lock; + struct tasklet_struct tx_release_tasklet; + struct napi_struct napi; + struct cfv_napi_context ctx; + u16 tx_hr; + u16 rx_hr; + u16 tx_tr; + u16 rx_tr; + u32 mtu; + u32 mru; + size_t allocsz; + void *alloc_addr; + dma_addr_t alloc_dma; + struct gen_pool *genpool; + unsigned long reserved_mem; + size_t reserved_size; + struct cfv_stats stats; + struct dentry *debugfs; +}; + +/* struct buf_info - maintains transmit buffer data handle + * @size: size of transmit buffer + * @dma_handle: handle to allocated dma device memory area + * @vaddr: virtual address mapping to allocated memory area + */ +struct buf_info { + size_t size; + u8 *vaddr; +}; + +/* Called from virtio device, in IRQ context */ +static void cfv_release_cb(struct virtqueue *vq_tx) +{ + struct cfv_info *cfv = vq_tx->vdev->priv; + + ++cfv->stats.tx_kicks; + tasklet_schedule(&cfv->tx_release_tasklet); +} + +static void free_buf_info(struct cfv_info *cfv, struct buf_info *buf_info) +{ + if (!buf_info) + return; + gen_pool_free(cfv->genpool, (unsigned long) buf_info->vaddr, + buf_info->size); + kfree(buf_info); +} + +/* This is invoked whenever the remote processor completed processing + * a TX msg we just sent, and the buffer is put back to the used ring. + */ +static void cfv_release_used_buf(struct virtqueue *vq_tx) +{ + struct cfv_info *cfv = vq_tx->vdev->priv; + unsigned long flags; + + BUG_ON(vq_tx != cfv->vq_tx); + + for (;;) { + unsigned int len; + struct buf_info *buf_info; + + /* Get used buffer from used ring to recycle used descriptors */ + spin_lock_irqsave(&cfv->tx_lock, flags); + buf_info = virtqueue_get_buf(vq_tx, &len); + spin_unlock_irqrestore(&cfv->tx_lock, flags); + + /* Stop looping if there are no more buffers to free */ + if (!buf_info) + break; + + free_buf_info(cfv, buf_info); + + /* watermark_tx indicates if we previously stopped the tx + * queues. If we have enough free stots in the virtio ring, + * re-establish memory reserved and open up tx queues. + */ + if (cfv->vq_tx->num_free <= cfv->watermark_tx) + continue; + + /* Re-establish memory reserve */ + if (cfv->reserved_mem == 0 && cfv->genpool) + cfv->reserved_mem = + gen_pool_alloc(cfv->genpool, + cfv->reserved_size); + + /* Open up the tx queues */ + if (cfv->reserved_mem) { + cfv->watermark_tx = + virtqueue_get_vring_size(cfv->vq_tx); + netif_tx_wake_all_queues(cfv->ndev); + /* Buffers are recycled in cfv_netdev_tx, so + * disable notifications when queues are opened. + */ + virtqueue_disable_cb(cfv->vq_tx); + ++cfv->stats.tx_flow_on; + } else { + /* if no memory reserve, wait for more free slots */ + WARN_ON(cfv->watermark_tx > + virtqueue_get_vring_size(cfv->vq_tx)); + cfv->watermark_tx += + virtqueue_get_vring_size(cfv->vq_tx) / 4; + } + } +} + +/* Allocate a SKB and copy packet data to it */ +static struct sk_buff *cfv_alloc_and_copy_skb(int *err, + struct cfv_info *cfv, + u8 *frm, u32 frm_len) +{ + struct sk_buff *skb; + u32 cfpkt_len, pad_len; + + *err = 0; + /* Verify that packet size with down-link header and mtu size */ + if (frm_len > cfv->mru || frm_len <= cfv->rx_hr + cfv->rx_tr) { + netdev_err(cfv->ndev, + "Invalid frmlen:%u mtu:%u hr:%d tr:%d\n", + frm_len, cfv->mru, cfv->rx_hr, + cfv->rx_tr); + *err = -EPROTO; + return NULL; + } + + cfpkt_len = frm_len - (cfv->rx_hr + cfv->rx_tr); + pad_len = (unsigned long)(frm + cfv->rx_hr) & (IP_HDR_ALIGN - 1); + + skb = netdev_alloc_skb(cfv->ndev, frm_len + pad_len); + if (!skb) { + *err = -ENOMEM; + return NULL; + } + + skb_reserve(skb, cfv->rx_hr + pad_len); + + memcpy(skb_put(skb, cfpkt_len), frm + cfv->rx_hr, cfpkt_len); + return skb; +} + +/* Get packets from the host vring */ +static int cfv_rx_poll(struct napi_struct *napi, int quota) +{ + struct cfv_info *cfv = container_of(napi, struct cfv_info, napi); + int rxcnt = 0; + int err = 0; + void *buf; + struct sk_buff *skb; + struct vringh_kiov *riov = &cfv->ctx.riov; + unsigned int skb_len; + +again: + do { + skb = NULL; + + /* Put the previous iovec back on the used ring and + * fetch a new iovec if we have processed all elements. + */ + if (riov->i == riov->used) { + if (cfv->ctx.head != USHRT_MAX) { + vringh_complete_kern(cfv->vr_rx, + cfv->ctx.head, + 0); + cfv->ctx.head = USHRT_MAX; + } + + err = vringh_getdesc_kern( + cfv->vr_rx, + riov, + NULL, + &cfv->ctx.head, + GFP_ATOMIC); + + if (err <= 0) + goto exit; + } + + buf = phys_to_virt((unsigned long) riov->iov[riov->i].iov_base); + /* TODO: Add check on valid buffer address */ + + skb = cfv_alloc_and_copy_skb(&err, cfv, buf, + riov->iov[riov->i].iov_len); + if (unlikely(err)) + goto exit; + + /* Push received packet up the stack. */ + skb_len = skb->len; + skb->protocol = htons(ETH_P_CAIF); + skb_reset_mac_header(skb); + skb->dev = cfv->ndev; + err = netif_receive_skb(skb); + if (unlikely(err)) { + ++cfv->ndev->stats.rx_dropped; + } else { + ++cfv->ndev->stats.rx_packets; + cfv->ndev->stats.rx_bytes += skb_len; + } + + ++riov->i; + ++rxcnt; + } while (rxcnt < quota); + + ++cfv->stats.rx_napi_resched; + goto out; + +exit: + switch (err) { + case 0: + ++cfv->stats.rx_napi_complete; + + /* Really out of patckets? (stolen from virtio_net)*/ + napi_complete(napi); + if (unlikely(!vringh_notify_enable_kern(cfv->vr_rx)) && + napi_schedule_prep(napi)) { + vringh_notify_disable_kern(cfv->vr_rx); + __napi_schedule(napi); + goto again; + } + break; + + case -ENOMEM: + ++cfv->stats.rx_nomem; + dev_kfree_skb(skb); + /* Stop NAPI poll on OOM, we hope to be polled later */ + napi_complete(napi); + vringh_notify_enable_kern(cfv->vr_rx); + break; + + default: + /* We're doomed, any modem fault is fatal */ + netdev_warn(cfv->ndev, "Bad ring, disable device\n"); + cfv->ndev->stats.rx_dropped = riov->used - riov->i; + napi_complete(napi); + vringh_notify_disable_kern(cfv->vr_rx); + netif_carrier_off(cfv->ndev); + break; + } +out: + if (rxcnt && vringh_need_notify_kern(cfv->vr_rx) > 0) + vringh_notify(cfv->vr_rx); + return rxcnt; +} + +static void cfv_recv(struct virtio_device *vdev, struct vringh *vr_rx) +{ + struct cfv_info *cfv = vdev->priv; + + ++cfv->stats.rx_kicks; + vringh_notify_disable_kern(cfv->vr_rx); + napi_schedule(&cfv->napi); +} + +static void cfv_destroy_genpool(struct cfv_info *cfv) +{ + if (cfv->alloc_addr) + dma_free_coherent(cfv->vdev->dev.parent->parent, + cfv->allocsz, cfv->alloc_addr, + cfv->alloc_dma); + + if (!cfv->genpool) + return; + gen_pool_free(cfv->genpool, cfv->reserved_mem, + cfv->reserved_size); + gen_pool_destroy(cfv->genpool); + cfv->genpool = NULL; +} + +static int cfv_create_genpool(struct cfv_info *cfv) +{ + int err; + + /* dma_alloc can only allocate whole pages, and we need a more + * fine graned allocation so we use genpool. We ask for space needed + * by IP and a full ring. If the dma allcoation fails we retry with a + * smaller allocation size. + */ + err = -ENOMEM; + cfv->allocsz = (virtqueue_get_vring_size(cfv->vq_tx) * + (ETH_DATA_LEN + cfv->tx_hr + cfv->tx_tr) * 11)/10; + if (cfv->allocsz <= (num_possible_cpus() + 1) * cfv->ndev->mtu) + return -EINVAL; + + for (;;) { + if (cfv->allocsz <= num_possible_cpus() * cfv->ndev->mtu) { + netdev_info(cfv->ndev, "Not enough device memory\n"); + return -ENOMEM; + } + + cfv->alloc_addr = dma_alloc_coherent( + cfv->vdev->dev.parent->parent, + cfv->allocsz, &cfv->alloc_dma, + GFP_ATOMIC); + if (cfv->alloc_addr) + break; + + cfv->allocsz = (cfv->allocsz * 3) >> 2; + } + + netdev_dbg(cfv->ndev, "Allocated %zd bytes from dma-memory\n", + cfv->allocsz); + + /* Allocate on 128 bytes boundaries (1 << 7)*/ + cfv->genpool = gen_pool_create(7, -1); + if (!cfv->genpool) + goto err; + + err = gen_pool_add_virt(cfv->genpool, (unsigned long)cfv->alloc_addr, + (phys_addr_t)virt_to_phys(cfv->alloc_addr), + cfv->allocsz, -1); + if (err) + goto err; + + /* Reserve some memory for low memory situations. If we hit the roof + * in the memory pool, we stop TX flow and release the reserve. + */ + cfv->reserved_size = num_possible_cpus() * cfv->ndev->mtu; + cfv->reserved_mem = gen_pool_alloc(cfv->genpool, + cfv->reserved_size); + if (!cfv->reserved_mem) { + err = -ENOMEM; + goto err; + } + + cfv->watermark_tx = virtqueue_get_vring_size(cfv->vq_tx); + return 0; +err: + cfv_destroy_genpool(cfv); + return err; +} + +/* Enable the CAIF interface and allocate the memory-pool */ +static int cfv_netdev_open(struct net_device *netdev) +{ + struct cfv_info *cfv = netdev_priv(netdev); + + if (cfv_create_genpool(cfv)) + return -ENOMEM; + + netif_carrier_on(netdev); + napi_enable(&cfv->napi); + + /* Schedule NAPI to read any pending packets */ + napi_schedule(&cfv->napi); + return 0; +} + +/* Disable the CAIF interface and free the memory-pool */ +static int cfv_netdev_close(struct net_device *netdev) +{ + struct cfv_info *cfv = netdev_priv(netdev); + unsigned long flags; + struct buf_info *buf_info; + + /* Disable interrupts, queues and NAPI polling */ + netif_carrier_off(netdev); + virtqueue_disable_cb(cfv->vq_tx); + vringh_notify_disable_kern(cfv->vr_rx); + napi_disable(&cfv->napi); + + /* Release any TX buffers on both used and avilable rings */ + cfv_release_used_buf(cfv->vq_tx); + spin_lock_irqsave(&cfv->tx_lock, flags); + while ((buf_info = virtqueue_detach_unused_buf(cfv->vq_tx))) + free_buf_info(cfv, buf_info); + spin_unlock_irqrestore(&cfv->tx_lock, flags); + + /* Release all dma allocated memory and destroy the pool */ + cfv_destroy_genpool(cfv); + return 0; +} + +/* Allocate a buffer in dma-memory and copy skb to it */ +static struct buf_info *cfv_alloc_and_copy_to_shm(struct cfv_info *cfv, + struct sk_buff *skb, + struct scatterlist *sg) +{ + struct caif_payload_info *info = (void *)&skb->cb; + struct buf_info *buf_info = NULL; + u8 pad_len, hdr_ofs; + + if (!cfv->genpool) + goto err; + + if (unlikely(cfv->tx_hr + skb->len + cfv->tx_tr > cfv->mtu)) { + netdev_warn(cfv->ndev, "Invalid packet len (%d > %d)\n", + cfv->tx_hr + skb->len + cfv->tx_tr, cfv->mtu); + goto err; + } + + buf_info = kmalloc(sizeof(struct buf_info), GFP_ATOMIC); + if (unlikely(!buf_info)) + goto err; + + /* Make the IP header aligned in tbe buffer */ + hdr_ofs = cfv->tx_hr + info->hdr_len; + pad_len = hdr_ofs & (IP_HDR_ALIGN - 1); + buf_info->size = cfv->tx_hr + skb->len + cfv->tx_tr + pad_len; + + /* allocate dma memory buffer */ + buf_info->vaddr = (void *)gen_pool_alloc(cfv->genpool, buf_info->size); + if (unlikely(!buf_info->vaddr)) + goto err; + + /* copy skbuf contents to send buffer */ + skb_copy_bits(skb, 0, buf_info->vaddr + cfv->tx_hr + pad_len, skb->len); + sg_init_one(sg, buf_info->vaddr + pad_len, + skb->len + cfv->tx_hr + cfv->rx_hr); + + return buf_info; +err: + kfree(buf_info); + return NULL; +} + +/* Put the CAIF packet on the virtio ring and kick the receiver */ +static int cfv_netdev_tx(struct sk_buff *skb, struct net_device *netdev) +{ + struct cfv_info *cfv = netdev_priv(netdev); + struct buf_info *buf_info; + struct scatterlist sg; + unsigned long flags; + bool flow_off = false; + int ret; + + /* garbage collect released buffers */ + cfv_release_used_buf(cfv->vq_tx); + spin_lock_irqsave(&cfv->tx_lock, flags); + + /* Flow-off check takes into account number of cpus to make sure + * virtqueue will not be overfilled in any possible smp conditions. + * + * Flow-on is triggered when sufficient buffers are freed + */ + if (unlikely(cfv->vq_tx->num_free <= num_present_cpus())) { + flow_off = true; + cfv->stats.tx_full_ring++; + } + + /* If we run out of memory, we release the memory reserve and retry + * allocation. + */ + buf_info = cfv_alloc_and_copy_to_shm(cfv, skb, &sg); + if (unlikely(!buf_info)) { + cfv->stats.tx_no_mem++; + flow_off = true; + + if (cfv->reserved_mem && cfv->genpool) { + gen_pool_free(cfv->genpool, cfv->reserved_mem, + cfv->reserved_size); + cfv->reserved_mem = 0; + buf_info = cfv_alloc_and_copy_to_shm(cfv, skb, &sg); + } + } + + if (unlikely(flow_off)) { + /* Turn flow on when a 1/4 of the descriptors are released */ + cfv->watermark_tx = virtqueue_get_vring_size(cfv->vq_tx) / 4; + /* Enable notifications of recycled TX buffers */ + virtqueue_enable_cb(cfv->vq_tx); + netif_tx_stop_all_queues(netdev); + } + + if (unlikely(!buf_info)) { + /* If the memory reserve does it's job, this shouldn't happen */ + netdev_warn(cfv->ndev, "Out of gen_pool memory\n"); + goto err; + } + + ret = virtqueue_add_outbuf(cfv->vq_tx, &sg, 1, buf_info, GFP_ATOMIC); + if (unlikely((ret < 0))) { + /* If flow control works, this shouldn't happen */ + netdev_warn(cfv->ndev, "Failed adding buffer to TX vring:%d\n", + ret); + goto err; + } + + /* update netdev statistics */ + cfv->ndev->stats.tx_packets++; + cfv->ndev->stats.tx_bytes += skb->len; + spin_unlock_irqrestore(&cfv->tx_lock, flags); + + /* tell the remote processor it has a pending message to read */ + virtqueue_kick(cfv->vq_tx); + + dev_kfree_skb(skb); + return NETDEV_TX_OK; +err: + spin_unlock_irqrestore(&cfv->tx_lock, flags); + cfv->ndev->stats.tx_dropped++; + free_buf_info(cfv, buf_info); + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +static void cfv_tx_release_tasklet(unsigned long drv) +{ + struct cfv_info *cfv = (struct cfv_info *)drv; + cfv_release_used_buf(cfv->vq_tx); +} + +static const struct net_device_ops cfv_netdev_ops = { + .ndo_open = cfv_netdev_open, + .ndo_stop = cfv_netdev_close, + .ndo_start_xmit = cfv_netdev_tx, +}; + +static void cfv_netdev_setup(struct net_device *netdev) +{ + netdev->netdev_ops = &cfv_netdev_ops; + netdev->type = ARPHRD_CAIF; + netdev->tx_queue_len = 100; + netdev->flags = IFF_POINTOPOINT | IFF_NOARP; + netdev->mtu = CFV_DEF_MTU_SIZE; + netdev->destructor = free_netdev; +} + +/* Create debugfs counters for the device */ +static inline void debugfs_init(struct cfv_info *cfv) +{ + cfv->debugfs = + debugfs_create_dir(netdev_name(cfv->ndev), NULL); + + if (IS_ERR(cfv->debugfs)) + return; + + debugfs_create_u32("rx-napi-complete", S_IRUSR, cfv->debugfs, + &cfv->stats.rx_napi_complete); + debugfs_create_u32("rx-napi-resched", S_IRUSR, cfv->debugfs, + &cfv->stats.rx_napi_resched); + debugfs_create_u32("rx-nomem", S_IRUSR, cfv->debugfs, + &cfv->stats.rx_nomem); + debugfs_create_u32("rx-kicks", S_IRUSR, cfv->debugfs, + &cfv->stats.rx_kicks); + debugfs_create_u32("tx-full-ring", S_IRUSR, cfv->debugfs, + &cfv->stats.tx_full_ring); + debugfs_create_u32("tx-no-mem", S_IRUSR, cfv->debugfs, + &cfv->stats.tx_no_mem); + debugfs_create_u32("tx-kicks", S_IRUSR, cfv->debugfs, + &cfv->stats.tx_kicks); + debugfs_create_u32("tx-flow-on", S_IRUSR, cfv->debugfs, + &cfv->stats.tx_flow_on); +} + +/* Setup CAIF for the a virtio device */ +static int cfv_probe(struct virtio_device *vdev) +{ + vq_callback_t *vq_cbs = cfv_release_cb; + vrh_callback_t *vrh_cbs = cfv_recv; + const char *names = "output"; + const char *cfv_netdev_name = "cfvrt"; + struct net_device *netdev; + struct cfv_info *cfv; + int err = -EINVAL; + + netdev = alloc_netdev(sizeof(struct cfv_info), cfv_netdev_name, + cfv_netdev_setup); + if (!netdev) + return -ENOMEM; + + cfv = netdev_priv(netdev); + cfv->vdev = vdev; + cfv->ndev = netdev; + + spin_lock_init(&cfv->tx_lock); + + /* Get the RX virtio ring. This is a "host side vring". */ + err = -ENODEV; + if (!vdev->vringh_config || !vdev->vringh_config->find_vrhs) + goto err; + + err = vdev->vringh_config->find_vrhs(vdev, 1, &cfv->vr_rx, &vrh_cbs); + if (err) + goto err; + + /* Get the TX virtio ring. This is a "guest side vring". */ + err = vdev->config->find_vqs(vdev, 1, &cfv->vq_tx, &vq_cbs, &names); + if (err) + goto err; + + /* Get the CAIF configuration from virtio config space, if available */ + if (vdev->config->get) { + virtio_cread(vdev, struct virtio_caif_transf_config, headroom, + &cfv->tx_hr); + virtio_cread(vdev, struct virtio_caif_transf_config, headroom, + &cfv->rx_hr); + virtio_cread(vdev, struct virtio_caif_transf_config, tailroom, + &cfv->tx_tr); + virtio_cread(vdev, struct virtio_caif_transf_config, tailroom, + &cfv->rx_tr); + virtio_cread(vdev, struct virtio_caif_transf_config, mtu, + &cfv->mtu); + virtio_cread(vdev, struct virtio_caif_transf_config, mtu, + &cfv->mru); + } else { + cfv->tx_hr = CFV_DEF_HEADROOM; + cfv->rx_hr = CFV_DEF_HEADROOM; + cfv->tx_tr = CFV_DEF_TAILROOM; + cfv->rx_tr = CFV_DEF_TAILROOM; + cfv->mtu = CFV_DEF_MTU_SIZE; + cfv->mru = CFV_DEF_MTU_SIZE; + } + + netdev->needed_headroom = cfv->tx_hr; + netdev->needed_tailroom = cfv->tx_tr; + + /* Disable buffer release interrupts unless we have stopped TX queues */ + virtqueue_disable_cb(cfv->vq_tx); + + netdev->mtu = cfv->mtu - cfv->tx_tr; + vdev->priv = cfv; + + /* Initialize NAPI poll context data */ + vringh_kiov_init(&cfv->ctx.riov, NULL, 0); + cfv->ctx.head = USHRT_MAX; + netif_napi_add(netdev, &cfv->napi, cfv_rx_poll, CFV_DEFAULT_QUOTA); + + tasklet_init(&cfv->tx_release_tasklet, + cfv_tx_release_tasklet, + (unsigned long)cfv); + + /* Carrier is off until netdevice is opened */ + netif_carrier_off(netdev); + + /* register Netdev */ + err = register_netdev(netdev); + if (err) { + dev_err(&vdev->dev, "Unable to register netdev (%d)\n", err); + goto err; + } + + debugfs_init(cfv); + + return 0; +err: + netdev_warn(cfv->ndev, "CAIF Virtio probe failed:%d\n", err); + + if (cfv->vr_rx) + vdev->vringh_config->del_vrhs(cfv->vdev); + if (cfv->vdev) + vdev->config->del_vqs(cfv->vdev); + free_netdev(netdev); + return err; +} + +static void cfv_remove(struct virtio_device *vdev) +{ + struct cfv_info *cfv = vdev->priv; + + rtnl_lock(); + dev_close(cfv->ndev); + rtnl_unlock(); + + tasklet_kill(&cfv->tx_release_tasklet); + debugfs_remove_recursive(cfv->debugfs); + + vringh_kiov_cleanup(&cfv->ctx.riov); + vdev->config->reset(vdev); + vdev->vringh_config->del_vrhs(cfv->vdev); + cfv->vr_rx = NULL; + vdev->config->del_vqs(cfv->vdev); + unregister_netdev(cfv->ndev); +} + +static struct virtio_device_id id_table[] = { + { VIRTIO_ID_CAIF, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; + +static unsigned int features[] = { +}; + +static struct virtio_driver caif_virtio_driver = { + .feature_table = features, + .feature_table_size = ARRAY_SIZE(features), + .driver.name = KBUILD_MODNAME, + .driver.owner = THIS_MODULE, + .id_table = id_table, + .probe = cfv_probe, + .remove = cfv_remove, +}; + +module_virtio_driver(caif_virtio_driver); +MODULE_DEVICE_TABLE(virtio, id_table); |
