diff options
Diffstat (limited to 'drivers/net/ethernet/marvell/mv643xx_eth.c')
| -rw-r--r-- | drivers/net/ethernet/marvell/mv643xx_eth.c | 628 |
1 files changed, 489 insertions, 139 deletions
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 2ad1494efbb..b151a949f35 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -33,8 +33,7 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -43,6 +42,7 @@ #include <linux/dma-mapping.h> #include <linux/in.h> #include <linux/ip.h> +#include <net/tso.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/etherdevice.h> @@ -60,6 +60,10 @@ #include <linux/types.h> #include <linux/slab.h> #include <linux/clk.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/of_net.h> +#include <linux/of_mdio.h> static char mv643xx_eth_driver_name[] = "mv643xx_eth"; static char mv643xx_eth_driver_version[] = "1.4"; @@ -115,6 +119,8 @@ static char mv643xx_eth_driver_version[] = "1.4"; #define LINK_UP 0x00000002 #define TXQ_COMMAND 0x0048 #define TXQ_FIX_PRIO_CONF 0x004c +#define PORT_SERIAL_CONTROL1 0x004c +#define CLK125_BYPASS_EN 0x00000010 #define TX_BW_RATE 0x0050 #define TX_BW_MTU 0x0058 #define TX_BW_BURST 0x005c @@ -174,10 +180,18 @@ static char mv643xx_eth_driver_version[] = "1.4"; * Misc definitions. */ #define DEFAULT_RX_QUEUE_SIZE 128 -#define DEFAULT_TX_QUEUE_SIZE 256 +#define DEFAULT_TX_QUEUE_SIZE 512 #define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES) +#define TSO_HEADER_SIZE 128 +/* Max number of allowed TCP segments for software TSO */ +#define MV643XX_MAX_TSO_SEGS 100 +#define MV643XX_MAX_SKB_DESCS (MV643XX_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) + +#define IS_TSO_HEADER(txq, addr) \ + ((addr >= txq->tso_hdrs_dma) && \ + (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE)) /* * RX/TX descriptors. */ @@ -245,6 +259,7 @@ struct tx_desc { #define GEN_TCP_UDP_CHECKSUM 0x00020000 #define UDP_FRAME 0x00010000 #define MAC_HDR_EXTRA_4_BYTES 0x00008000 +#define GEN_TCP_UDP_CHK_FULL 0x00000400 #define MAC_HDR_EXTRA_8_BYTES 0x00000200 #define TX_IHL_SHIFT 11 @@ -340,6 +355,12 @@ struct tx_queue { int tx_curr_desc; int tx_used_desc; + int tx_stop_threshold; + int tx_wake_threshold; + + char *tso_hdrs; + dma_addr_t tso_hdrs_dma; + struct tx_desc *tx_desc_area; dma_addr_t tx_desc_dma; int tx_desc_area_size; @@ -486,7 +507,7 @@ static void txq_maybe_wake(struct tx_queue *txq) if (netif_tx_queue_stopped(nq)) { __netif_tx_lock(nq, smp_processor_id()); - if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1) + if (txq->tx_desc_count <= txq->tx_wake_threshold) netif_tx_wake_queue(nq); __netif_tx_unlock(nq); } @@ -615,7 +636,7 @@ static int rxq_refill(struct rx_queue *rxq, int budget) rx_desc = rxq->rx_desc_area + rx; - size = skb->end - skb->data; + size = skb_end_pointer(skb) - skb->data; rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data, size, DMA_FROM_DEVICE); @@ -656,6 +677,198 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb) return 0; } +static inline __be16 sum16_as_be(__sum16 sum) +{ + return (__force __be16)sum; +} + +static int skb_tx_csum(struct mv643xx_eth_private *mp, struct sk_buff *skb, + u16 *l4i_chk, u32 *command, int length) +{ + int ret; + u32 cmd = 0; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + int hdr_len; + int tag_bytes; + + BUG_ON(skb->protocol != htons(ETH_P_IP) && + skb->protocol != htons(ETH_P_8021Q)); + + hdr_len = (void *)ip_hdr(skb) - (void *)skb->data; + tag_bytes = hdr_len - ETH_HLEN; + + if (length - hdr_len > mp->shared->tx_csum_limit || + unlikely(tag_bytes & ~12)) { + ret = skb_checksum_help(skb); + if (!ret) + goto no_csum; + return ret; + } + + if (tag_bytes & 4) + cmd |= MAC_HDR_EXTRA_4_BYTES; + if (tag_bytes & 8) + cmd |= MAC_HDR_EXTRA_8_BYTES; + + cmd |= GEN_TCP_UDP_CHECKSUM | GEN_TCP_UDP_CHK_FULL | + GEN_IP_V4_CHECKSUM | + ip_hdr(skb)->ihl << TX_IHL_SHIFT; + + /* TODO: Revisit this. With the usage of GEN_TCP_UDP_CHK_FULL + * it seems we don't need to pass the initial checksum. */ + switch (ip_hdr(skb)->protocol) { + case IPPROTO_UDP: + cmd |= UDP_FRAME; + *l4i_chk = 0; + break; + case IPPROTO_TCP: + *l4i_chk = 0; + break; + default: + WARN(1, "protocol not supported"); + } + } else { +no_csum: + /* Errata BTS #50, IHL must be 5 if no HW checksum */ + cmd |= 5 << TX_IHL_SHIFT; + } + *command = cmd; + return 0; +} + +static inline int +txq_put_data_tso(struct net_device *dev, struct tx_queue *txq, + struct sk_buff *skb, char *data, int length, + bool last_tcp, bool is_last) +{ + int tx_index; + u32 cmd_sts; + struct tx_desc *desc; + + tx_index = txq->tx_curr_desc++; + if (txq->tx_curr_desc == txq->tx_ring_size) + txq->tx_curr_desc = 0; + desc = &txq->tx_desc_area[tx_index]; + + desc->l4i_chk = 0; + desc->byte_cnt = length; + desc->buf_ptr = dma_map_single(dev->dev.parent, data, + length, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev->dev.parent, desc->buf_ptr))) { + WARN(1, "dma_map_single failed!\n"); + return -ENOMEM; + } + + cmd_sts = BUFFER_OWNED_BY_DMA; + if (last_tcp) { + /* last descriptor in the TCP packet */ + cmd_sts |= ZERO_PADDING | TX_LAST_DESC; + /* last descriptor in SKB */ + if (is_last) + cmd_sts |= TX_ENABLE_INTERRUPT; + } + desc->cmd_sts = cmd_sts; + return 0; +} + +static inline void +txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + int tx_index; + struct tx_desc *desc; + int ret; + u32 cmd_csum = 0; + u16 l4i_chk = 0; + + tx_index = txq->tx_curr_desc; + desc = &txq->tx_desc_area[tx_index]; + + ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_csum, length); + if (ret) + WARN(1, "failed to prepare checksum!"); + + /* Should we set this? Can't use the value from skb_tx_csum() + * as it's not the correct initial L4 checksum to use. */ + desc->l4i_chk = 0; + + desc->byte_cnt = hdr_len; + desc->buf_ptr = txq->tso_hdrs_dma + + txq->tx_curr_desc * TSO_HEADER_SIZE; + desc->cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA | TX_FIRST_DESC | + GEN_CRC; + + txq->tx_curr_desc++; + if (txq->tx_curr_desc == txq->tx_ring_size) + txq->tx_curr_desc = 0; +} + +static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb, + struct net_device *dev) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + int total_len, data_left, ret; + int desc_count = 0; + struct tso_t tso; + int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + + /* Count needed descriptors */ + if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) { + netdev_dbg(dev, "not enough descriptors for TSO!\n"); + return -EBUSY; + } + + /* Initialize the TSO handler, and prepare the first payload */ + tso_start(skb, &tso); + + total_len = skb->len - hdr_len; + while (total_len > 0) { + char *hdr; + + data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); + total_len -= data_left; + desc_count++; + + /* prepare packet headers: MAC + IP + TCP */ + hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE; + tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); + txq_put_hdr_tso(skb, txq, data_left); + + while (data_left > 0) { + int size; + desc_count++; + + size = min_t(int, tso.size, data_left); + ret = txq_put_data_tso(dev, txq, skb, tso.data, size, + size == data_left, + total_len == 0); + if (ret) + goto err_release; + data_left -= size; + tso_build_data(skb, &tso, size); + } + } + + __skb_queue_tail(&txq->tx_skb, skb); + skb_tx_timestamp(skb); + + /* clear TX_END status */ + mp->work_tx_end &= ~(1 << txq->index); + + /* ensure all descriptors are written before poking hardware */ + wmb(); + txq_enable(txq); + txq->tx_desc_count += desc_count; + return 0; +err_release: + /* TODO: Release all used data descriptors; header descriptors must not + * be DMA-unmapped. + */ + return ret; +} + static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) { struct mv643xx_eth_private *mp = txq_to_mp(txq); @@ -666,8 +879,10 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) skb_frag_t *this_frag; int tx_index; struct tx_desc *desc; + void *addr; this_frag = &skb_shinfo(skb)->frags[frag]; + addr = page_address(this_frag->page.p) + this_frag->page_offset; tx_index = txq->tx_curr_desc++; if (txq->tx_curr_desc == txq->tx_ring_size) txq->tx_curr_desc = 0; @@ -687,19 +902,13 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) desc->l4i_chk = 0; desc->byte_cnt = skb_frag_size(this_frag); - desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent, - this_frag, 0, - skb_frag_size(this_frag), - DMA_TO_DEVICE); + desc->buf_ptr = dma_map_single(mp->dev->dev.parent, addr, + desc->byte_cnt, DMA_TO_DEVICE); } } -static inline __be16 sum16_as_be(__sum16 sum) -{ - return (__force __be16)sum; -} - -static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb) +static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb, + struct net_device *dev) { struct mv643xx_eth_private *mp = txq_to_mp(txq); int nr_frags = skb_shinfo(skb)->nr_frags; @@ -707,54 +916,22 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb) struct tx_desc *desc; u32 cmd_sts; u16 l4i_chk; - int length; + int length, ret; - cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA; + cmd_sts = 0; l4i_chk = 0; - if (skb->ip_summed == CHECKSUM_PARTIAL) { - int hdr_len; - int tag_bytes; - - BUG_ON(skb->protocol != htons(ETH_P_IP) && - skb->protocol != htons(ETH_P_8021Q)); - - hdr_len = (void *)ip_hdr(skb) - (void *)skb->data; - tag_bytes = hdr_len - ETH_HLEN; - if (skb->len - hdr_len > mp->shared->tx_csum_limit || - unlikely(tag_bytes & ~12)) { - if (skb_checksum_help(skb) == 0) - goto no_csum; - kfree_skb(skb); - return 1; - } - - if (tag_bytes & 4) - cmd_sts |= MAC_HDR_EXTRA_4_BYTES; - if (tag_bytes & 8) - cmd_sts |= MAC_HDR_EXTRA_8_BYTES; - - cmd_sts |= GEN_TCP_UDP_CHECKSUM | - GEN_IP_V4_CHECKSUM | - ip_hdr(skb)->ihl << TX_IHL_SHIFT; - - switch (ip_hdr(skb)->protocol) { - case IPPROTO_UDP: - cmd_sts |= UDP_FRAME; - l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check)); - break; - case IPPROTO_TCP: - l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check)); - break; - default: - BUG(); - } - } else { -no_csum: - /* Errata BTS #50, IHL must be 5 if no HW checksum */ - cmd_sts |= 5 << TX_IHL_SHIFT; + if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { + if (net_ratelimit()) + netdev_err(dev, "tx queue full?!\n"); + return -EBUSY; } + ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_sts, skb->len); + if (ret) + return ret; + cmd_sts |= TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA; + tx_index = txq->tx_curr_desc++; if (txq->tx_curr_desc == txq->tx_ring_size) txq->tx_curr_desc = 0; @@ -796,7 +973,7 @@ no_csum: static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) { struct mv643xx_eth_private *mp = netdev_priv(dev); - int length, queue; + int length, queue, ret; struct tx_queue *txq; struct netdev_queue *nq; @@ -805,30 +982,26 @@ static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) nq = netdev_get_tx_queue(dev, queue); if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { - txq->tx_dropped++; netdev_printk(KERN_DEBUG, dev, "failed to linearize skb with tiny unaligned fragment\n"); return NETDEV_TX_BUSY; } - if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { - if (net_ratelimit()) - netdev_err(dev, "tx queue full?!\n"); - kfree_skb(skb); - return NETDEV_TX_OK; - } - length = skb->len; - if (!txq_submit_skb(txq, skb)) { - int entries_left; - + if (skb_is_gso(skb)) + ret = txq_submit_tso(txq, skb, dev); + else + ret = txq_submit_skb(txq, skb, dev); + if (!ret) { txq->tx_bytes += length; txq->tx_packets++; - entries_left = txq->tx_ring_size - txq->tx_desc_count; - if (entries_left < MAX_SKB_FRAGS + 1) + if (txq->tx_desc_count >= txq->tx_stop_threshold) netif_tx_stop_queue(nq); + } else { + txq->tx_dropped++; + dev_kfree_skb_any(skb); } return NETDEV_TX_OK; @@ -902,14 +1075,9 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) mp->dev->stats.tx_errors++; } - if (cmd_sts & TX_FIRST_DESC) { + if (!IS_TSO_HEADER(txq, desc->buf_ptr)) dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, desc->byte_cnt, DMA_TO_DEVICE); - } else { - dma_unmap_page(mp->dev->dev.parent, desc->buf_ptr, - desc->byte_cnt, DMA_TO_DEVICE); - } - dev_kfree_skb(skb); } @@ -1005,8 +1173,9 @@ static void txq_set_fixed_prio_mode(struct tx_queue *txq) /* mii management interface *************************************************/ -static void mv643xx_adjust_pscr(struct mv643xx_eth_private *mp) +static void mv643xx_eth_adjust_link(struct net_device *dev) { + struct mv643xx_eth_private *mp = netdev_priv(dev); u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL); u32 autoneg_disable = FORCE_LINK_PASS | DISABLE_AUTO_NEG_SPEED_GMII | @@ -1125,15 +1294,13 @@ static void mib_counters_update(struct mv643xx_eth_private *mp) p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT); p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT); spin_unlock_bh(&mp->mib_counters_lock); - - mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); } static void mib_counters_timer_wrapper(unsigned long _mp) { struct mv643xx_eth_private *mp = (void *)_mp; - mib_counters_update(mp); + mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); } @@ -1384,7 +1551,7 @@ mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) ret = phy_ethtool_sset(mp->phy, cmd); if (!ret) - mv643xx_adjust_pscr(mp); + mv643xx_eth_adjust_link(dev); return ret; } @@ -1453,7 +1620,11 @@ mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er) return -EINVAL; mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096; - mp->tx_ring_size = er->tx_pending < 4096 ? er->tx_pending : 4096; + mp->tx_ring_size = clamp_t(unsigned int, er->tx_pending, + MV643XX_MAX_SKB_DESCS * 2, 4096); + if (mp->tx_ring_size != er->tx_pending) + netdev_warn(dev, "TX queue size set to %u (requested %u)\n", + mp->tx_ring_size, er->tx_pending); if (netif_running(dev)) { mv643xx_eth_stop(dev); @@ -1757,7 +1928,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index) memset(rxq->rx_desc_area, 0, size); rxq->rx_desc_area_size = size; - rxq->rx_skb = kmalloc_array(rxq->rx_ring_size, sizeof(*rxq->rx_skb), + rxq->rx_skb = kcalloc(rxq->rx_ring_size, sizeof(*rxq->rx_skb), GFP_KERNEL); if (rxq->rx_skb == NULL) goto out_free; @@ -1829,6 +2000,13 @@ static int txq_init(struct mv643xx_eth_private *mp, int index) txq->tx_ring_size = mp->tx_ring_size; + /* A queue must always have room for at least one skb. + * Therefore, stop the queue when the free entries reaches + * the maximum number of descriptors per skb. + */ + txq->tx_stop_threshold = txq->tx_ring_size - MV643XX_MAX_SKB_DESCS; + txq->tx_wake_threshold = txq->tx_stop_threshold / 2; + txq->tx_desc_count = 0; txq->tx_curr_desc = 0; txq->tx_used_desc = 0; @@ -1868,6 +2046,15 @@ static int txq_init(struct mv643xx_eth_private *mp, int index) nexti * sizeof(struct tx_desc); } + /* Allocate DMA buffers for TSO MAC/IP/TCP headers */ + txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent, + txq->tx_ring_size * TSO_HEADER_SIZE, + &txq->tso_hdrs_dma, GFP_KERNEL); + if (txq->tso_hdrs == NULL) { + dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, + txq->tx_desc_area, txq->tx_desc_dma); + return -ENOMEM; + } skb_queue_head_init(&txq->tx_skb); return 0; @@ -1888,6 +2075,10 @@ static void txq_deinit(struct tx_queue *txq) else dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, txq->tx_desc_area, txq->tx_desc_dma); + if (txq->tso_hdrs) + dma_free_coherent(mp->dev->dev.parent, + txq->tx_ring_size * TSO_HEADER_SIZE, + txq->tso_hdrs, txq->tso_hdrs_dma); } @@ -2063,23 +2254,6 @@ static inline void oom_timer_wrapper(unsigned long data) napi_schedule(&mp->napi); } -static void phy_reset(struct mv643xx_eth_private *mp) -{ - int data; - - data = phy_read(mp->phy, MII_BMCR); - if (data < 0) - return; - - data |= BMCR_RESET; - if (phy_write(mp->phy, MII_BMCR, data) < 0) - return; - - do { - data = phy_read(mp->phy, MII_BMCR); - } while (data >= 0 && data & BMCR_RESET); -} - static void port_start(struct mv643xx_eth_private *mp) { u32 pscr; @@ -2092,8 +2266,9 @@ static void port_start(struct mv643xx_eth_private *mp) struct ethtool_cmd cmd; mv643xx_eth_get_settings(mp->dev, &cmd); - phy_reset(mp); + phy_init_hw(mp->phy); mv643xx_eth_set_settings(mp->dev, &cmd); + phy_start(mp->phy); } /* @@ -2231,6 +2406,7 @@ static int mv643xx_eth_open(struct net_device *dev) mp->int_mask |= INT_TX_END_0 << i; } + add_timer(&mp->mib_counters_timer); port_start(mp); wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX); @@ -2288,7 +2464,8 @@ static int mv643xx_eth_stop(struct net_device *dev) del_timer_sync(&mp->rx_oom); netif_carrier_off(dev); - + if (mp->phy) + phy_stop(mp->phy); free_irq(dev->irq, dev); port_reset(mp); @@ -2314,7 +2491,7 @@ static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) ret = phy_mii_ioctl(mp->phy, ifr, cmd); if (!ret) - mv643xx_adjust_pscr(mp); + mv643xx_eth_adjust_link(dev); return ret; } @@ -2450,13 +2627,160 @@ static void infer_hw_params(struct mv643xx_eth_shared_private *msp) } } +#if defined(CONFIG_OF) +static const struct of_device_id mv643xx_eth_shared_ids[] = { + { .compatible = "marvell,orion-eth", }, + { .compatible = "marvell,kirkwood-eth", }, + { } +}; +MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids); +#endif + +#if defined(CONFIG_OF) && !defined(CONFIG_MV64X60) +#define mv643xx_eth_property(_np, _name, _v) \ + do { \ + u32 tmp; \ + if (!of_property_read_u32(_np, "marvell," _name, &tmp)) \ + _v = tmp; \ + } while (0) + +static struct platform_device *port_platdev[3]; + +static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, + struct device_node *pnp) +{ + struct platform_device *ppdev; + struct mv643xx_eth_platform_data ppd; + struct resource res; + const char *mac_addr; + int ret; + int dev_num = 0; + + memset(&ppd, 0, sizeof(ppd)); + ppd.shared = pdev; + + memset(&res, 0, sizeof(res)); + if (!of_irq_to_resource(pnp, 0, &res)) { + dev_err(&pdev->dev, "missing interrupt on %s\n", pnp->name); + return -EINVAL; + } + + if (of_property_read_u32(pnp, "reg", &ppd.port_number)) { + dev_err(&pdev->dev, "missing reg property on %s\n", pnp->name); + return -EINVAL; + } + + if (ppd.port_number >= 3) { + dev_err(&pdev->dev, "invalid reg property on %s\n", pnp->name); + return -EINVAL; + } + + while (dev_num < 3 && port_platdev[dev_num]) + dev_num++; + + if (dev_num == 3) { + dev_err(&pdev->dev, "too many ports registered\n"); + return -EINVAL; + } + + mac_addr = of_get_mac_address(pnp); + if (mac_addr) + memcpy(ppd.mac_addr, mac_addr, ETH_ALEN); + + mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size); + mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr); + mv643xx_eth_property(pnp, "tx-sram-size", ppd.tx_sram_size); + mv643xx_eth_property(pnp, "rx-queue-size", ppd.rx_queue_size); + mv643xx_eth_property(pnp, "rx-sram-addr", ppd.rx_sram_addr); + mv643xx_eth_property(pnp, "rx-sram-size", ppd.rx_sram_size); + + ppd.phy_node = of_parse_phandle(pnp, "phy-handle", 0); + if (!ppd.phy_node) { + ppd.phy_addr = MV643XX_ETH_PHY_NONE; + of_property_read_u32(pnp, "speed", &ppd.speed); + of_property_read_u32(pnp, "duplex", &ppd.duplex); + } + + ppdev = platform_device_alloc(MV643XX_ETH_NAME, dev_num); + if (!ppdev) + return -ENOMEM; + ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + ppdev->dev.of_node = pnp; + + ret = platform_device_add_resources(ppdev, &res, 1); + if (ret) + goto port_err; + + ret = platform_device_add_data(ppdev, &ppd, sizeof(ppd)); + if (ret) + goto port_err; + + ret = platform_device_add(ppdev); + if (ret) + goto port_err; + + port_platdev[dev_num] = ppdev; + + return 0; + +port_err: + platform_device_put(ppdev); + return ret; +} + +static int mv643xx_eth_shared_of_probe(struct platform_device *pdev) +{ + struct mv643xx_eth_shared_platform_data *pd; + struct device_node *pnp, *np = pdev->dev.of_node; + int ret; + + /* bail out if not registered from DT */ + if (!np) + return 0; + + pd = devm_kzalloc(&pdev->dev, sizeof(*pd), GFP_KERNEL); + if (!pd) + return -ENOMEM; + pdev->dev.platform_data = pd; + + mv643xx_eth_property(np, "tx-checksum-limit", pd->tx_csum_limit); + + for_each_available_child_of_node(np, pnp) { + ret = mv643xx_eth_shared_of_add_port(pdev, pnp); + if (ret) + return ret; + } + return 0; +} + +static void mv643xx_eth_shared_of_remove(void) +{ + int n; + + for (n = 0; n < 3; n++) { + platform_device_del(port_platdev[n]); + port_platdev[n] = NULL; + } +} +#else +static inline int mv643xx_eth_shared_of_probe(struct platform_device *pdev) +{ + return 0; +} + +static inline void mv643xx_eth_shared_of_remove(void) +{ +} +#endif + static int mv643xx_eth_shared_probe(struct platform_device *pdev) { static int mv643xx_eth_version_printed; - struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; + struct mv643xx_eth_shared_platform_data *pd; struct mv643xx_eth_shared_private *msp; const struct mbus_dram_target_info *dram; struct resource *res; + int ret; if (!mv643xx_eth_version_printed++) pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n", @@ -2469,8 +2793,9 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL); if (msp == NULL) return -ENOMEM; + platform_set_drvdata(pdev, msp); - msp->base = ioremap(res->start, resource_size(res)); + msp->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (msp->base == NULL) return -ENOMEM; @@ -2485,12 +2810,15 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) if (dram) mv643xx_eth_conf_mbus_windows(msp, dram); + ret = mv643xx_eth_shared_of_probe(pdev); + if (ret) + return ret; + pd = dev_get_platdata(&pdev->dev); + msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? pd->tx_csum_limit : 9 * 1024; infer_hw_params(msp); - platform_set_drvdata(pdev, msp); - return 0; } @@ -2498,10 +2826,9 @@ static int mv643xx_eth_shared_remove(struct platform_device *pdev) { struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev); - iounmap(msp->base); + mv643xx_eth_shared_of_remove(); if (!IS_ERR(msp->clk)) clk_disable_unprepare(msp->clk); - return 0; } @@ -2511,6 +2838,7 @@ static struct platform_driver mv643xx_eth_shared_driver = { .driver = { .name = MV643XX_ETH_SHARED_NAME, .owner = THIS_MODULE, + .of_match_table = of_match_ptr(mv643xx_eth_shared_ids), }, }; @@ -2538,9 +2866,10 @@ static void set_params(struct mv643xx_eth_private *mp, struct mv643xx_eth_platform_data *pd) { struct net_device *dev = mp->dev; + unsigned int tx_ring_size; if (is_valid_ether_addr(pd->mac_addr)) - memcpy(dev->dev_addr, pd->mac_addr, 6); + memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); else uc_addr_get(mp, dev->dev_addr); @@ -2552,22 +2881,22 @@ static void set_params(struct mv643xx_eth_private *mp, mp->rxq_count = pd->rx_queue_count ? : 1; - mp->tx_ring_size = DEFAULT_TX_QUEUE_SIZE; + tx_ring_size = DEFAULT_TX_QUEUE_SIZE; if (pd->tx_queue_size) - mp->tx_ring_size = pd->tx_queue_size; + tx_ring_size = pd->tx_queue_size; + + mp->tx_ring_size = clamp_t(unsigned int, tx_ring_size, + MV643XX_MAX_SKB_DESCS * 2, 4096); + if (mp->tx_ring_size != tx_ring_size) + netdev_warn(dev, "TX queue size set to %u (requested %u)\n", + mp->tx_ring_size, tx_ring_size); + mp->tx_desc_sram_addr = pd->tx_sram_addr; mp->tx_desc_sram_size = pd->tx_sram_size; mp->txq_count = pd->tx_queue_count ? : 1; } -static void mv643xx_eth_adjust_link(struct net_device *dev) -{ - struct mv643xx_eth_private *mp = netdev_priv(dev); - - mv643xx_adjust_pscr(mp); -} - static struct phy_device *phy_scan(struct mv643xx_eth_private *mp, int phy_addr) { @@ -2608,8 +2937,6 @@ static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex) { struct phy_device *phy = mp->phy; - phy_reset(mp); - if (speed == 0) { phy->autoneg = AUTONEG_ENABLE; phy->speed = 0; @@ -2677,7 +3004,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev) struct resource *res; int err; - pd = pdev->dev.platform_data; + pd = dev_get_platdata(&pdev->dev); if (pd == NULL) { dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n"); return -ENODEV; @@ -2701,6 +3028,15 @@ static int mv643xx_eth_probe(struct platform_device *pdev) mp->dev = dev; + /* Kirkwood resets some registers on gated clocks. Especially + * CLK125_BYPASS_EN must be cleared but is not available on + * all other SoCs/System Controllers using this driver. + */ + if (of_device_is_compatible(pdev->dev.of_node, + "marvell,kirkwood-eth-port")) + wrlp(mp, PORT_SERIAL_CONTROL1, + rdlp(mp, PORT_SERIAL_CONTROL1) & ~CLK125_BYPASS_EN); + /* * Start with a default rate, and if there is a clock, allow * it to override the default. @@ -2710,25 +3046,39 @@ static int mv643xx_eth_probe(struct platform_device *pdev) if (!IS_ERR(mp->clk)) { clk_prepare_enable(mp->clk); mp->t_clk = clk_get_rate(mp->clk); + } else if (!IS_ERR(mp->shared->clk)) { + mp->t_clk = clk_get_rate(mp->shared->clk); } set_params(mp, pd); netif_set_real_num_tx_queues(dev, mp->txq_count); netif_set_real_num_rx_queues(dev, mp->rxq_count); - if (pd->phy_addr != MV643XX_ETH_PHY_NONE) { + err = 0; + if (pd->phy_node) { + mp->phy = of_phy_connect(mp->dev, pd->phy_node, + mv643xx_eth_adjust_link, 0, + PHY_INTERFACE_MODE_GMII); + if (!mp->phy) + err = -ENODEV; + else + phy_addr_set(mp, mp->phy->addr); + } else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) { mp->phy = phy_scan(mp, pd->phy_addr); - if (IS_ERR(mp->phy)) { + if (IS_ERR(mp->phy)) err = PTR_ERR(mp->phy); - if (err == -ENODEV) - err = -EPROBE_DEFER; - goto out; - } - phy_init(mp, pd->speed, pd->duplex); + else + phy_init(mp, pd->speed, pd->duplex); + } + if (err == -ENODEV) { + err = -EPROBE_DEFER; + goto out; } + if (err) + goto out; - SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops); + dev->ethtool_ops = &mv643xx_eth_ethtool_ops; init_pscr(mp, pd->speed, pd->duplex); @@ -2739,7 +3089,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev) mp->mib_counters_timer.data = (unsigned long)mp; mp->mib_counters_timer.function = mib_counters_timer_wrapper; mp->mib_counters_timer.expires = jiffies + 30 * HZ; - add_timer(&mp->mib_counters_timer); spin_lock_init(&mp->mib_counters_lock); @@ -2761,11 +3110,14 @@ static int mv643xx_eth_probe(struct platform_device *pdev) dev->watchdog_timeo = 2 * HZ; dev->base_addr = 0; - dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; - dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; - dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM; + dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; + dev->vlan_features = dev->features; + + dev->features |= NETIF_F_RXCSUM; + dev->hw_features = dev->features; dev->priv_flags |= IFF_UNICAST_FLT; + dev->gso_max_segs = MV643XX_MAX_TSO_SEGS; SET_NETDEV_DEV(dev, &pdev->dev); @@ -2805,7 +3157,7 @@ static int mv643xx_eth_remove(struct platform_device *pdev) unregister_netdev(mp->dev); if (mp->phy != NULL) - phy_detach(mp->phy); + phy_disconnect(mp->phy); cancel_work_sync(&mp->tx_timeout_task); if (!IS_ERR(mp->clk)) @@ -2813,8 +3165,6 @@ static int mv643xx_eth_remove(struct platform_device *pdev) free_netdev(mp->dev); - platform_set_drvdata(pdev, NULL); - return 0; } |
