diff options
Diffstat (limited to 'drivers/net')
53 files changed, 1650 insertions, 597 deletions
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 9f69e818b00..93580a47cc5 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -82,7 +82,8 @@ static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb) } /* Forward declaration */ -static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]); +static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[], + bool strict_match); static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp); static void rlb_src_unlink(struct bonding *bond, u32 index); static void rlb_src_link(struct bonding *bond, u32 ip_src_hash, @@ -459,7 +460,7 @@ static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[]) bond->alb_info.rlb_promisc_timeout_counter = 0; - alb_send_learning_packets(bond->curr_active_slave, addr); + alb_send_learning_packets(bond->curr_active_slave, addr, true); } /* slave being removed should not be active at this point @@ -995,7 +996,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id) /*********************** tlb/rlb shared functions *********************/ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[], - u16 vid) + __be16 vlan_proto, u16 vid) { struct learning_pkt pkt; struct sk_buff *skb; @@ -1021,7 +1022,7 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[], skb->dev = slave->dev; if (vid) { - skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vid); + skb = vlan_put_tag(skb, vlan_proto, vid); if (!skb) { pr_err("%s: Error: failed to insert VLAN tag\n", slave->bond->dev->name); @@ -1032,22 +1033,32 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[], dev_queue_xmit(skb); } - -static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]) +static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[], + bool strict_match) { struct bonding *bond = bond_get_bond_by_slave(slave); struct net_device *upper; struct list_head *iter; /* send untagged */ - alb_send_lp_vid(slave, mac_addr, 0); + alb_send_lp_vid(slave, mac_addr, 0, 0); /* loop through vlans and send one packet for each */ rcu_read_lock(); netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) { - if (upper->priv_flags & IFF_802_1Q_VLAN) - alb_send_lp_vid(slave, mac_addr, - vlan_dev_vlan_id(upper)); + if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) { + if (strict_match && + ether_addr_equal_64bits(mac_addr, + upper->dev_addr)) { + alb_send_lp_vid(slave, mac_addr, + vlan_dev_vlan_proto(upper), + vlan_dev_vlan_id(upper)); + } else if (!strict_match) { + alb_send_lp_vid(slave, upper->dev_addr, + vlan_dev_vlan_proto(upper), + vlan_dev_vlan_id(upper)); + } + } } rcu_read_unlock(); } @@ -1107,7 +1118,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1, /* fasten the change in the switch */ if (SLAVE_IS_OK(slave1)) { - alb_send_learning_packets(slave1, slave1->dev->dev_addr); + alb_send_learning_packets(slave1, slave1->dev->dev_addr, false); if (bond->alb_info.rlb_enabled) { /* inform the clients that the mac address * has changed @@ -1119,7 +1130,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1, } if (SLAVE_IS_OK(slave2)) { - alb_send_learning_packets(slave2, slave2->dev->dev_addr); + alb_send_learning_packets(slave2, slave2->dev->dev_addr, false); if (bond->alb_info.rlb_enabled) { /* inform the clients that the mac address * has changed @@ -1490,6 +1501,8 @@ void bond_alb_monitor(struct work_struct *work) /* send learning packets */ if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) { + bool strict_match; + /* change of curr_active_slave involves swapping of mac addresses. * in order to avoid this swapping from happening while * sending the learning packets, the curr_slave_lock must be held for @@ -1497,8 +1510,15 @@ void bond_alb_monitor(struct work_struct *work) */ read_lock(&bond->curr_slave_lock); - bond_for_each_slave_rcu(bond, slave, iter) - alb_send_learning_packets(slave, slave->dev->dev_addr); + bond_for_each_slave_rcu(bond, slave, iter) { + /* If updating current_active, use all currently + * user mac addreses (!strict_match). Otherwise, only + * use mac of the slave device. + */ + strict_match = (slave != bond->curr_active_slave); + alb_send_learning_packets(slave, slave->dev->dev_addr, + strict_match); + } read_unlock(&bond->curr_slave_lock); @@ -1721,7 +1741,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave } else { /* set the new_slave to the bond mac address */ alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr); - alb_send_learning_packets(new_slave, bond->dev->dev_addr); + alb_send_learning_packets(new_slave, bond->dev->dev_addr, + false); } write_lock_bh(&bond->curr_slave_lock); @@ -1764,7 +1785,8 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr) alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr); read_lock(&bond->lock); - alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr); + alb_send_learning_packets(bond->curr_active_slave, + bond_dev->dev_addr, false); if (bond->alb_info.rlb_enabled) { /* inform clients mac address has changed */ rlb_req_update_slave_clients(bond, bond->curr_active_slave); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 69aff72c895..d3a67896d43 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2126,10 +2126,10 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip) */ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_ip, __be32 src_ip, - struct bond_vlan_tag *inner, - struct bond_vlan_tag *outer) + struct bond_vlan_tag *tags) { struct sk_buff *skb; + int i; pr_debug("arp %d on slave %s: dst %pI4 src %pI4\n", arp_op, slave_dev->name, &dest_ip, &src_ip); @@ -2141,21 +2141,26 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, net_err_ratelimited("ARP packet allocation failed\n"); return; } - if (outer->vlan_id) { - if (inner->vlan_id) { - pr_debug("inner tag: proto %X vid %X\n", - ntohs(inner->vlan_proto), inner->vlan_id); - skb = __vlan_put_tag(skb, inner->vlan_proto, - inner->vlan_id); - if (!skb) { - net_err_ratelimited("failed to insert inner VLAN tag\n"); - return; - } - } - pr_debug("outer reg: proto %X vid %X\n", - ntohs(outer->vlan_proto), outer->vlan_id); - skb = vlan_put_tag(skb, outer->vlan_proto, outer->vlan_id); + /* Go through all the tags backwards and add them to the packet */ + for (i = BOND_MAX_VLAN_ENCAP - 1; i > 0; i--) { + if (!tags[i].vlan_id) + continue; + + pr_debug("inner tag: proto %X vid %X\n", + ntohs(tags[i].vlan_proto), tags[i].vlan_id); + skb = __vlan_put_tag(skb, tags[i].vlan_proto, + tags[i].vlan_id); + if (!skb) { + net_err_ratelimited("failed to insert inner VLAN tag\n"); + return; + } + } + /* Set the outer tag */ + if (tags[0].vlan_id) { + pr_debug("outer tag: proto %X vid %X\n", + ntohs(tags[0].vlan_proto), tags[0].vlan_id); + skb = vlan_put_tag(skb, tags[0].vlan_proto, tags[0].vlan_id); if (!skb) { net_err_ratelimited("failed to insert outer VLAN tag\n"); return; @@ -2164,22 +2169,52 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, arp_xmit(skb); } +/* Validate the device path between the @start_dev and the @end_dev. + * The path is valid if the @end_dev is reachable through device + * stacking. + * When the path is validated, collect any vlan information in the + * path. + */ +static bool bond_verify_device_path(struct net_device *start_dev, + struct net_device *end_dev, + struct bond_vlan_tag *tags) +{ + struct net_device *upper; + struct list_head *iter; + int idx; + + if (start_dev == end_dev) + return true; + + netdev_for_each_upper_dev_rcu(start_dev, upper, iter) { + if (bond_verify_device_path(upper, end_dev, tags)) { + if (is_vlan_dev(upper)) { + idx = vlan_get_encap_level(upper); + if (idx >= BOND_MAX_VLAN_ENCAP) + return false; + + tags[idx].vlan_proto = + vlan_dev_vlan_proto(upper); + tags[idx].vlan_id = vlan_dev_vlan_id(upper); + } + return true; + } + } + + return false; +} static void bond_arp_send_all(struct bonding *bond, struct slave *slave) { - struct net_device *upper, *vlan_upper; - struct list_head *iter, *vlan_iter; struct rtable *rt; - struct bond_vlan_tag inner, outer; + struct bond_vlan_tag tags[BOND_MAX_VLAN_ENCAP]; __be32 *targets = bond->params.arp_targets, addr; int i; + bool ret; for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) { pr_debug("basa: target %pI4\n", &targets[i]); - inner.vlan_proto = 0; - inner.vlan_id = 0; - outer.vlan_proto = 0; - outer.vlan_id = 0; + memset(tags, 0, sizeof(tags)); /* Find out through which dev should the packet go */ rt = ip_route_output(dev_net(bond->dev), targets[i], 0, @@ -2192,7 +2227,8 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave) net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n", bond->dev->name, &targets[i]); - bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 0, &inner, &outer); + bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], + 0, tags); continue; } @@ -2201,52 +2237,12 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave) goto found; rcu_read_lock(); - /* first we search only for vlan devices. for every vlan - * found we verify its upper dev list, searching for the - * rt->dst.dev. If found we save the tag of the vlan and - * proceed to send the packet. - */ - netdev_for_each_all_upper_dev_rcu(bond->dev, vlan_upper, - vlan_iter) { - if (!is_vlan_dev(vlan_upper)) - continue; - - if (vlan_upper == rt->dst.dev) { - outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper); - outer.vlan_id = vlan_dev_vlan_id(vlan_upper); - rcu_read_unlock(); - goto found; - } - netdev_for_each_all_upper_dev_rcu(vlan_upper, upper, - iter) { - if (upper == rt->dst.dev) { - /* If the upper dev is a vlan dev too, - * set the vlan tag to inner tag. - */ - if (is_vlan_dev(upper)) { - inner.vlan_proto = vlan_dev_vlan_proto(upper); - inner.vlan_id = vlan_dev_vlan_id(upper); - } - outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper); - outer.vlan_id = vlan_dev_vlan_id(vlan_upper); - rcu_read_unlock(); - goto found; - } - } - } - - /* if the device we're looking for is not on top of any of - * our upper vlans, then just search for any dev that - * matches, and in case it's a vlan - save the id - */ - netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) { - if (upper == rt->dst.dev) { - rcu_read_unlock(); - goto found; - } - } + ret = bond_verify_device_path(bond->dev, rt->dst.dev, tags); rcu_read_unlock(); + if (ret) + goto found; + /* Not our device - skip */ pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n", bond->dev->name, &targets[i], @@ -2259,7 +2255,7 @@ found: addr = bond_confirm_addr(rt->dst.dev, targets[i], 0); ip_rt_put(rt); bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], - addr, &inner, &outer); + addr, tags); } } diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 724e30fa20b..83207029844 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -125,6 +125,7 @@ static const struct bond_opt_value bond_fail_over_mac_tbl[] = { static const struct bond_opt_value bond_intmax_tbl[] = { { "off", 0, BOND_VALFLAG_DEFAULT}, { "maxval", INT_MAX, BOND_VALFLAG_MAX}, + { NULL, -1, 0} }; static const struct bond_opt_value bond_lacp_rate_tbl[] = { diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index b8bdd0acc8f..00bea320e3b 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h @@ -36,6 +36,7 @@ #define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n" +#define BOND_MAX_VLAN_ENCAP 2 #define BOND_MAX_ARP_TARGETS 16 #define BOND_DEFAULT_MIIMON 100 diff --git a/drivers/net/can/c_can/Kconfig b/drivers/net/can/c_can/Kconfig index 8ab7103d4f4..61ffc12d8fd 100644 --- a/drivers/net/can/c_can/Kconfig +++ b/drivers/net/can/c_can/Kconfig @@ -14,13 +14,6 @@ config CAN_C_CAN_PLATFORM SPEAr1310 and SPEAr320 evaluation boards & TI (www.ti.com) boards like am335x, dm814x, dm813x and dm811x. -config CAN_C_CAN_STRICT_FRAME_ORDERING - bool "Force a strict RX CAN frame order (may cause frame loss)" - ---help--- - The RX split buffer prevents packet reordering but can cause packet - loss. Only enable this option when you accept to lose CAN frames - in favour of getting the received CAN frames in the correct order. - config CAN_C_CAN_PCI tristate "Generic PCI Bus based C_CAN/D_CAN driver" depends on PCI diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index a2ca820b537..95e04e2002d 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c @@ -732,26 +732,12 @@ static u32 c_can_adjust_pending(u32 pend) static inline void c_can_rx_object_get(struct net_device *dev, struct c_can_priv *priv, u32 obj) { -#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING - if (obj < C_CAN_MSG_RX_LOW_LAST) - c_can_object_get(dev, IF_RX, obj, IF_COMM_RCV_LOW); - else -#endif c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high); } static inline void c_can_rx_finalize(struct net_device *dev, struct c_can_priv *priv, u32 obj) { -#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING - if (obj < C_CAN_MSG_RX_LOW_LAST) - priv->rxmasked |= BIT(obj - 1); - else if (obj == C_CAN_MSG_RX_LOW_LAST) { - priv->rxmasked = 0; - /* activate all lower message objects */ - c_can_activate_all_lower_rx_msg_obj(dev, IF_RX); - } -#endif if (priv->type != BOSCH_D_CAN) c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT); } @@ -799,9 +785,6 @@ static inline u32 c_can_get_pending(struct c_can_priv *priv) { u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG); -#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING - pend &= ~priv->rxmasked; -#endif return pend; } @@ -814,25 +797,6 @@ static inline u32 c_can_get_pending(struct c_can_priv *priv) * has arrived. To work-around this issue, we keep two groups of message * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT. * - * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = y - * - * To ensure in-order frame reception we use the following - * approach while re-activating a message object to receive further - * frames: - * - if the current message object number is lower than - * C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing - * the INTPND bit. - * - if the current message object number is equal to - * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower - * receive message objects. - * - if the current message object number is greater than - * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of - * only this message object. - * - * This can cause packet loss! - * - * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = n - * * We clear the newdat bit right away. * * This can result in packet reordering when the readout is slow. diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c index c540e3d12e3..564933ae218 100644 --- a/drivers/net/can/sja1000/peak_pci.c +++ b/drivers/net/can/sja1000/peak_pci.c @@ -551,7 +551,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct sja1000_priv *priv; struct peak_pci_chan *chan; - struct net_device *dev; + struct net_device *dev, *prev_dev; void __iomem *cfg_base, *reg_base; u16 sub_sys_id, icr; int i, err, channels; @@ -688,11 +688,13 @@ failure_remove_channels: writew(0x0, cfg_base + PITA_ICR + 2); chan = NULL; - for (dev = pci_get_drvdata(pdev); dev; dev = chan->prev_dev) { - unregister_sja1000dev(dev); - free_sja1000dev(dev); + for (dev = pci_get_drvdata(pdev); dev; dev = prev_dev) { priv = netdev_priv(dev); chan = priv->priv; + prev_dev = chan->prev_dev; + + unregister_sja1000dev(dev); + free_sja1000dev(dev); } /* free any PCIeC resources too */ @@ -726,10 +728,12 @@ static void peak_pci_remove(struct pci_dev *pdev) /* Loop over all registered devices */ while (1) { + struct net_device *prev_dev = chan->prev_dev; + dev_info(&pdev->dev, "removing device %s\n", dev->name); unregister_sja1000dev(dev); free_sja1000dev(dev); - dev = chan->prev_dev; + dev = prev_dev; if (!dev) { /* do that only for first channel */ diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 39b26fe28d1..d7401017a3f 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -35,6 +35,18 @@ source "drivers/net/ethernet/calxeda/Kconfig" source "drivers/net/ethernet/chelsio/Kconfig" source "drivers/net/ethernet/cirrus/Kconfig" source "drivers/net/ethernet/cisco/Kconfig" + +config CX_ECAT + tristate "Beckhoff CX5020 EtherCAT master support" + depends on PCI + ---help--- + Driver for EtherCAT master module located on CCAT FPGA + that can be found on Beckhoff CX5020, and possibly other of CX + Beckhoff CX series industrial PCs. + + To compile this driver as a module, choose M here. The module + will be called ec_bhf. + source "drivers/net/ethernet/davicom/Kconfig" config DNET diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 545d0b3b9cb..35190e36c45 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -21,6 +21,7 @@ obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/ obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/ obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/ obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/ +obj-$(CONFIG_CX_ECAT) += ec_bhf.o obj-$(CONFIG_DM9000) += davicom/ obj-$(CONFIG_DNET) += dnet.o obj-$(CONFIG_NET_VENDOR_DEC) += dec/ diff --git a/drivers/net/ethernet/altera/Makefile b/drivers/net/ethernet/altera/Makefile index d4a187e4536..3eff2fd3997 100644 --- a/drivers/net/ethernet/altera/Makefile +++ b/drivers/net/ethernet/altera/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_ALTERA_TSE) += altera_tse.o altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \ altera_msgdma.o altera_sgdma.o altera_utils.o +ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c index 4d1f2fdd5c3..0fb986ba329 100644 --- a/drivers/net/ethernet/altera/altera_msgdma.c +++ b/drivers/net/ethernet/altera/altera_msgdma.c @@ -37,18 +37,16 @@ void msgdma_start_rxdma(struct altera_tse_private *priv) void msgdma_reset(struct altera_tse_private *priv) { int counter; - struct msgdma_csr *txcsr = - (struct msgdma_csr *)priv->tx_dma_csr; - struct msgdma_csr *rxcsr = - (struct msgdma_csr *)priv->rx_dma_csr; /* Reset Rx mSGDMA */ - iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status); - iowrite32(MSGDMA_CSR_CTL_RESET, &rxcsr->control); + csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr, + msgdma_csroffs(status)); + csrwr32(MSGDMA_CSR_CTL_RESET, priv->rx_dma_csr, + msgdma_csroffs(control)); counter = 0; while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { - if (tse_bit_is_clear(&rxcsr->status, + if (tse_bit_is_clear(priv->rx_dma_csr, msgdma_csroffs(status), MSGDMA_CSR_STAT_RESETTING)) break; udelay(1); @@ -59,15 +57,18 @@ void msgdma_reset(struct altera_tse_private *priv) "TSE Rx mSGDMA resetting bit never cleared!\n"); /* clear all status bits */ - iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status); + csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr, msgdma_csroffs(status)); /* Reset Tx mSGDMA */ - iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status); - iowrite32(MSGDMA_CSR_CTL_RESET, &txcsr->control); + csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr, + msgdma_csroffs(status)); + + csrwr32(MSGDMA_CSR_CTL_RESET, priv->tx_dma_csr, + msgdma_csroffs(control)); counter = 0; while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { - if (tse_bit_is_clear(&txcsr->status, + if (tse_bit_is_clear(priv->tx_dma_csr, msgdma_csroffs(status), MSGDMA_CSR_STAT_RESETTING)) break; udelay(1); @@ -78,58 +79,58 @@ void msgdma_reset(struct altera_tse_private *priv) "TSE Tx mSGDMA resetting bit never cleared!\n"); /* clear all status bits */ - iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status); + csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr, msgdma_csroffs(status)); } void msgdma_disable_rxirq(struct altera_tse_private *priv) { - struct msgdma_csr *csr = priv->rx_dma_csr; - |