diff options
Diffstat (limited to 'drivers/net')
54 files changed, 2055 insertions, 528 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 892a9e4e275..c155bd3ec9f 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -1725,6 +1725,7 @@ config TLAN config KS8842 tristate "Micrel KSZ8842" + depends on HAS_IOMEM help This platform driver is for Micrel KSZ8842 chip. @@ -2443,6 +2444,17 @@ config JME To compile this driver as a module, choose M here. The module will be called jme. +config S6GMAC + tristate "S6105 GMAC ethernet support" + depends on XTENSA_VARIANT_S6000 + select PHYLIB + help + This driver supports the on chip ethernet device on the + S6105 xtensa processor. + + To compile this driver as a module, choose M here. The module + will be called s6gmac. + endif # NETDEV_1000 # diff --git a/drivers/net/Makefile b/drivers/net/Makefile index d366fb2b40e..4b58a59f211 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -245,6 +245,7 @@ obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o obj-$(CONFIG_DNET) += dnet.o obj-$(CONFIG_MACB) += macb.o +obj-$(CONFIG_S6GMAC) += s6gmac.o obj-$(CONFIG_ARM) += arm/ obj-$(CONFIG_DEV_APPLETALK) += appletalk/ diff --git a/drivers/net/atl1c/atl1c_ethtool.c b/drivers/net/atl1c/atl1c_ethtool.c index e4afbd628c2..607007d75b6 100644 --- a/drivers/net/atl1c/atl1c_ethtool.c +++ b/drivers/net/atl1c/atl1c_ethtool.c @@ -281,6 +281,8 @@ static int atl1c_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) if (wol->wolopts & WAKE_PHY) adapter->wol |= AT_WUFC_LNKC; + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + return 0; } diff --git a/drivers/net/atl1e/atl1e_ethtool.c b/drivers/net/atl1e/atl1e_ethtool.c index 619c6583e1a..4003955d7a9 100644 --- a/drivers/net/atl1e/atl1e_ethtool.c +++ b/drivers/net/atl1e/atl1e_ethtool.c @@ -365,6 +365,8 @@ static int atl1e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) if (wol->wolopts & WAKE_PHY) adapter->wol |= AT_WUFC_LNKC; + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + return 0; } diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h index b4bb06fdf30..5b4bf3d2cdc 100644 --- a/drivers/net/benet/be.h +++ b/drivers/net/benet/be.h @@ -65,7 +65,7 @@ static inline char *nic_name(struct pci_dev *pdev) #define TX_CQ_LEN 1024 #define RX_Q_LEN 1024 /* Does not support any other value */ #define RX_CQ_LEN 1024 -#define MCC_Q_LEN 64 /* total size not to exceed 8 pages */ +#define MCC_Q_LEN 128 /* total size not to exceed 8 pages */ #define MCC_CQ_LEN 256 #define BE_NAPI_WEIGHT 64 @@ -73,7 +73,7 @@ static inline char *nic_name(struct pci_dev *pdev) #define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST) #define BE_MAX_LRO_DESCRIPTORS 16 -#define BE_MAX_FRAGS_PER_FRAME 16 +#define BE_MAX_FRAGS_PER_FRAME (min((u32) 16, (u32) MAX_SKB_FRAGS)) struct be_dma_mem { void *va; @@ -91,6 +91,61 @@ struct be_queue_info { atomic_t used; /* Number of valid elements in the queue */ }; +static inline u32 MODULO(u16 val, u16 limit) +{ + BUG_ON(limit & (limit - 1)); + return val & (limit - 1); +} + +static inline void index_adv(u16 *index, u16 val, u16 limit) +{ + *index = MODULO((*index + val), limit); +} + +static inline void index_inc(u16 *index, u16 limit) +{ + *index = MODULO((*index + 1), limit); +} + +static inline void *queue_head_node(struct be_queue_info *q) +{ + return q->dma_mem.va + q->head * q->entry_size; +} + +static inline void *queue_tail_node(struct be_queue_info *q) +{ + return q->dma_mem.va + q->tail * q->entry_size; +} + +static inline void queue_head_inc(struct be_queue_info *q) +{ + index_inc(&q->head, q->len); +} + +static inline void queue_tail_inc(struct be_queue_info *q) +{ + index_inc(&q->tail, q->len); +} + + +struct be_eq_obj { + struct be_queue_info q; + char desc[32]; + + /* Adaptive interrupt coalescing (AIC) info */ + bool enable_aic; + u16 min_eqd; /* in usecs */ + u16 max_eqd; /* in usecs */ + u16 cur_eqd; /* in usecs */ + + struct napi_struct napi; +}; + +struct be_mcc_obj { + struct be_queue_info q; + struct be_queue_info cq; +}; + struct be_ctrl_info { u8 __iomem *csr; u8 __iomem *db; /* Door Bell */ @@ -98,11 +153,20 @@ struct be_ctrl_info { int pci_func; /* Mbox used for cmd request/response */ - spinlock_t cmd_lock; /* For serializing cmds to BE card */ + spinlock_t mbox_lock; /* For serializing mbox cmds to BE card */ struct be_dma_mem mbox_mem; /* Mbox mem is adjusted to align to 16 bytes. The allocated addr * is stored for freeing purpose */ struct be_dma_mem mbox_mem_alloced; + + /* MCC Rings */ + struct be_mcc_obj mcc_obj; + spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */ + spinlock_t mcc_cq_lock; + + /* MCC Async callback */ + void (*async_cb)(void *adapter, bool link_up); + void *adapter_ctxt; }; #include "be_cmds.h" @@ -150,19 +214,6 @@ struct be_stats_obj { struct be_dma_mem cmd; }; -struct be_eq_obj { - struct be_queue_info q; - char desc[32]; - - /* Adaptive interrupt coalescing (AIC) info */ - bool enable_aic; - u16 min_eqd; /* in usecs */ - u16 max_eqd; /* in usecs */ - u16 cur_eqd; /* in usecs */ - - struct napi_struct napi; -}; - struct be_tx_obj { struct be_queue_info q; struct be_queue_info cq; @@ -225,8 +276,9 @@ struct be_adapter { u32 if_handle; /* Used to configure filtering */ u32 pmac_id; /* MAC addr handle used by BE card */ - struct be_link_info link; + bool link_up; u32 port_num; + bool promiscuous; }; extern struct ethtool_ops be_ethtool_ops; @@ -235,22 +287,6 @@ extern struct ethtool_ops be_ethtool_ops; #define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops) -static inline u32 MODULO(u16 val, u16 limit) -{ - BUG_ON(limit & (limit - 1)); - return val & (limit - 1); -} - -static inline void index_adv(u16 *index, u16 val, u16 limit) -{ - *index = MODULO((*index + val), limit); -} - -static inline void index_inc(u16 *index, u16 limit) -{ - *index = MODULO((*index + 1), limit); -} - #define PAGE_SHIFT_4K 12 #define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K) @@ -339,4 +375,6 @@ static inline u8 is_udp_pkt(struct sk_buff *skb) return val; } +extern void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid, bool arm, + u16 num_popped); #endif /* BE_H */ diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index d444aed962b..583517ed56f 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c @@ -17,6 +17,133 @@ #include "be.h" +static void be_mcc_notify(struct be_ctrl_info *ctrl) +{ + struct be_queue_info *mccq = &ctrl->mcc_obj.q; + u32 val = 0; + + val |= mccq->id & DB_MCCQ_RING_ID_MASK; + val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; + iowrite32(val, ctrl->db + DB_MCCQ_OFFSET); +} + +/* To check if valid bit is set, check the entire word as we don't know + * the endianness of the data (old entry is host endian while a new entry is + * little endian) */ +static inline bool be_mcc_compl_is_new(struct be_mcc_cq_entry *compl) +{ + if (compl->flags != 0) { + compl->flags = le32_to_cpu(compl->flags); + BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0); + return true; + } else { + return false; + } +} + +/* Need to reset the entire word that houses the valid bit */ +static inline void be_mcc_compl_use(struct be_mcc_cq_entry *compl) +{ + compl->flags = 0; +} + +static int be_mcc_compl_process(struct be_ctrl_info *ctrl, + struct be_mcc_cq_entry *compl) +{ + u16 compl_status, extd_status; + + /* Just swap the status to host endian; mcc tag is opaquely copied + * from mcc_wrb */ + be_dws_le_to_cpu(compl, 4); + + compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) & + CQE_STATUS_COMPL_MASK; + if (compl_status != MCC_STATUS_SUCCESS) { + extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & + CQE_STATUS_EXTD_MASK; + printk(KERN_WARNING DRV_NAME + " error in cmd completion: status(compl/extd)=%d/%d\n", + compl_status, extd_status); + return -1; + } + return 0; +} + +/* Link state evt is a string of bytes; no need for endian swapping */ +static void be_async_link_state_process(struct be_ctrl_info *ctrl, + struct be_async_event_link_state *evt) +{ + ctrl->async_cb(ctrl->adapter_ctxt, + evt->port_link_status == ASYNC_EVENT_LINK_UP ? true : false); +} + +static inline bool is_link_state_evt(u32 trailer) +{ + return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & + ASYNC_TRAILER_EVENT_CODE_MASK) == + ASYNC_EVENT_CODE_LINK_STATE); +} + +static struct be_mcc_cq_entry *be_mcc_compl_get(struct be_ctrl_info *ctrl) +{ + struct be_queue_info *mcc_cq = &ctrl->mcc_obj.cq; + struct be_mcc_cq_entry *compl = queue_tail_node(mcc_cq); + + if (be_mcc_compl_is_new(compl)) { + queue_tail_inc(mcc_cq); + return compl; + } + return NULL; +} + +void be_process_mcc(struct be_ctrl_info *ctrl) +{ + struct be_mcc_cq_entry *compl; + int num = 0; + + spin_lock_bh(&ctrl->mcc_cq_lock); + while ((compl = be_mcc_compl_get(ctrl))) { + if (compl->flags & CQE_FLAGS_ASYNC_MASK) { + /* Interpret flags as an async trailer */ + BUG_ON(!is_link_state_evt(compl->flags)); + + /* Interpret compl as a async link evt */ + be_async_link_state_process(ctrl, + (struct be_async_event_link_state *) compl); + } else { + be_mcc_compl_process(ctrl, compl); + atomic_dec(&ctrl->mcc_obj.q.used); + } + be_mcc_compl_use(compl); + num++; + } + if (num) + be_cq_notify(ctrl, ctrl->mcc_obj.cq.id, true, num); + spin_unlock_bh(&ctrl->mcc_cq_lock); +} + +/* Wait till no more pending mcc requests are present */ +static void be_mcc_wait_compl(struct be_ctrl_info *ctrl) +{ +#define mcc_timeout 50000 /* 5s timeout */ + int i; + for (i = 0; i < mcc_timeout; i++) { + be_process_mcc(ctrl); + if (atomic_read(&ctrl->mcc_obj.q.used) == 0) + break; + udelay(100); + } + if (i == mcc_timeout) + printk(KERN_WARNING DRV_NAME "mcc poll timed out\n"); +} + +/* Notify MCC requests and wait for completion */ +static void be_mcc_notify_wait(struct be_ctrl_info *ctrl) +{ + be_mcc_notify(ctrl); + be_mcc_wait_compl(ctrl); +} + static int be_mbox_db_ready_wait(void __iomem *db) { int cnt = 0, wait = 5; @@ -44,11 +171,11 @@ static int be_mbox_db_ready_wait(void __iomem *db) /* * Insert the mailbox address into the doorbell in two steps + * Polls on the mbox doorbell till a command completion (or a timeout) occurs */ static int be_mbox_db_ring(struct be_ctrl_info *ctrl) { int status; - u16 compl_status, extd_status; u32 val = 0; void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET; struct be_dma_mem *mbox_mem = &ctrl->mbox_mem; @@ -79,24 +206,17 @@ static int be_mbox_db_ring(struct be_ctrl_info *ctrl) if (status != 0) return status; - /* compl entry has been made now */ - be_dws_le_to_cpu(cqe, sizeof(*cqe)); - if (!(cqe->flags & CQE_FLAGS_VALID_MASK)) { - printk(KERN_WARNING DRV_NAME ": ERROR invalid mbox compl\n"); + /* A cq entry has been made now */ + if (be_mcc_compl_is_new(cqe)) { + status = be_mcc_compl_process(ctrl, &mbox->cqe); + be_mcc_compl_use(cqe); + if (status) + return status; + } else { + printk(KERN_WARNING DRV_NAME "invalid mailbox completion\n"); return -1; } - - compl_status = (cqe->status >> CQE_STATUS_COMPL_SHIFT) & - CQE_STATUS_COMPL_MASK; - if (compl_status != MCC_STATUS_SUCCESS) { - extd_status = (cqe->status >> CQE_STATUS_EXTD_SHIFT) & - CQE_STATUS_EXTD_MASK; - printk(KERN_WARNING DRV_NAME - ": ERROR in cmd compl. status(compl/extd)=%d/%d\n", - compl_status, extd_status); - } - - return compl_status; + return 0; } static int be_POST_stage_get(struct be_ctrl_info *ctrl, u16 *stage) @@ -235,6 +355,18 @@ static inline struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem) return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb; } +static inline struct be_mcc_wrb *wrb_from_mcc(struct be_queue_info *mccq) +{ + struct be_mcc_wrb *wrb = NULL; + if (atomic_read(&mccq->used) < mccq->len) { + wrb = queue_head_node(mccq); + queue_head_inc(mccq); + atomic_inc(&mccq->used); + memset(wrb, 0, sizeof(*wrb)); + } + return wrb; +} + int be_cmd_eq_create(struct be_ctrl_info *ctrl, struct be_queue_info *eq, int eq_delay) { @@ -244,7 +376,7 @@ int be_cmd_eq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem = &eq->dma_mem; int status; - spin_lock(&ctrl->cmd_lock); + spin_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); @@ -272,7 +404,7 @@ int be_cmd_eq_create(struct be_ctrl_info *ctrl, eq->id = le16_to_cpu(resp->eq_id); eq->created = true; } - spin_unlock(&ctrl->cmd_lock); + spin_unlock(&ctrl->mbox_lock); return status; } @@ -284,7 +416,7 @@ int be_cmd_mac_addr_query(struct be_ctrl_info *ctrl, u8 *mac_addr, struct be_cmd_resp_mac_query *resp = embedded_payload(wrb); int status; - spin_lock(&ctrl->cmd_lock); + spin_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); @@ -304,7 +436,7 @@ int be_cmd_mac_addr_query(struct be_ctrl_info *ctrl, u8 *mac_addr, if (!status) memcpy(mac_addr, resp->mac.addr, ETH_ALEN); - spin_unlock(&ctrl->cmd_lock); + spin_unlock(&ctrl->mbox_lock); return status; } @@ -315,7 +447,7 @@ int be_cmd_pmac_add(struct be_ctrl_info *ctrl, u8 *mac_addr, struct be_cmd_req_pmac_add *req = embedded_payload(wrb); int status; - spin_lock(&ctrl->cmd_lock); + spin_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); @@ -332,7 +464,7 @@ int be_cmd_pmac_add(struct be_ctrl_info *ctrl, u8 *mac_addr, *pmac_id = le32_to_cpu(resp->pmac_id); } - spin_unlock(&ctrl->cmd_lock); + spin_unlock(&ctrl->mbox_lock); return status; } @@ -342,7 +474,7 @@ int be_cmd_pmac_del(struct be_ctrl_info *ctrl, u32 if_id, u32 pmac_id) struct be_cmd_req_pmac_del *req = embedded_payload(wrb); int status; - spin_lock(&ctrl->cmd_lock); + spin_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); @@ -354,7 +486,7 @@ int be_cmd_pmac_del(struct be_ctrl_info *ctrl, u32 if_id, u32 pmac_id) req->pmac_id = cpu_to_le32(pmac_id); status = be_mbox_db_ring(ctrl); - spin_unlock(&ctrl->cmd_lock); + spin_unlock(&ctrl->mbox_lock); return status; } @@ -370,7 +502,7 @@ int be_cmd_cq_create(struct be_ctrl_info *ctrl, void *ctxt = &req->context; int status; - spin_lock(&ctrl->cmd_lock); + spin_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); @@ -388,7 +520,7 @@ int be_cmd_cq_create(struct be_ctrl_info *ctrl, AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts); AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1); AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id); - AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 0); + AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1); AMAP_SET_BITS(struct amap_cq_context, func, ctxt, ctrl->pci_func); be_dws_cpu_to_le(ctxt, sizeof(req->context)); @@ -399,7 +531,56 @@ int be_cmd_cq_create(struct be_ctrl_info *ctrl, cq->id = le16_to_cpu(resp->cq_id); cq->created = true; } - spin_unlock(&ctrl->cmd_lock); + spin_unlock(&ctrl->mbox_lock); + + return status; +} + +static u32 be_encoded_q_len(int q_len) +{ + u32 len_encoded = fls(q_len); /* log2(len) + 1 */ + if (len_encoded == 16) + len_encoded = 0; + return len_encoded; +} + +int be_cmd_mccq_create(struct be_ctrl_info *ctrl, + struct be_queue_info *mccq, + struct be_queue_info *cq) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_cmd_req_mcc_create *req = embedded_payload(wrb); + struct be_dma_mem *q_mem = &mccq->dma_mem; + void *ctxt = &req->context; + int status; + + spin_lock(&ctrl->mbox_lock); + memset(wrb, 0, sizeof(*wrb)); + + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_MCC_CREATE, sizeof(*req)); + + req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); + + AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, ctrl->pci_func); + AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1); + AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt, + be_encoded_q_len(mccq->len)); + AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id); + + be_dws_cpu_to_le(ctxt, sizeof(req->context)); + + be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); + + status = be_mbox_db_ring(ctrl); + if (!status) { + struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); + mccq->id = le16_to_cpu(resp->id); + mccq->created = true; + } + spin_unlock(&ctrl->mbox_lock); return status; } @@ -415,7 +596,7 @@ int be_cmd_txq_create(struct be_ctrl_info *ctrl, int status; u32 len_encoded; - spin_lock(&ctrl->cmd_lock); + spin_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); @@ -446,7 +627,7 @@ int be_cmd_txq_create(struct be_ctrl_info *ctrl, txq->id = le16_to_cpu(resp->cid); txq->created = true; } - spin_unlock(&ctrl->cmd_lock); + spin_unlock(&ctrl->mbox_lock); return status; } @@ -460,7 +641,7 @@ int be_cmd_rxq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem = &rxq->dma_mem; int status; - spin_lock(&ctrl->cmd_lock); + spin_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); @@ -482,7 +663,7 @@ int be_cmd_rxq_create(struct be_ctrl_info *ctrl, rxq->id = le16_to_cpu(resp->id); rxq->created = true; } - spin_unlock(&ctrl->cmd_lock); + spin_unlock(&ctrl->mbox_lock); return status; } @@ -496,7 +677,7 @@ int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, u8 subsys = 0, opcode = 0; int status; - spin_lock(&ctrl->cmd_lock); + spin_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); @@ -518,6 +699,10 @@ int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, subsys = CMD_SUBSYSTEM_ETH; opcode = OPCODE_ETH_RX_DESTROY; break; + case QTYPE_MCCQ: + subsys = CMD_SUBSYSTEM_COMMON; + opcode = OPCODE_COMMON_MCC_DESTROY; + break; default: printk(KERN_WARNING DRV_NAME ":bad Q type in Q destroy cmd\n"); status = -1; @@ -528,7 +713,7 @@ int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, status = be_mbox_db_ring(ctrl); err: - spin_unlock(&ctrl->cmd_lock); + spin_unlock(&ctrl->mbox_lock); return status; } @@ -541,7 +726,7 @@ int be_cmd_if_create(struct be_ctrl_info *ctrl, u32 flags, u8 *mac, struct be_cmd_req_if_create *req = embedded_payload(wrb); int status; - spin_lock(&ctrl->cmd_lock); + spin_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); @@ -562,7 +747,7 @@ int be_cmd_if_create(struct be_ctrl_info *ctrl, u32 flags, u8 *mac, *pmac_id = le32_to_cpu(resp->pmac_id); } - spin_unlock(&ctrl->cmd_lock); + spin_unlock(&ctrl->mbox_lock); return status; } @@ -572,7 +757,7 @@ int be_cmd_if_destroy(struct be_ctrl_info *ctrl, u32 interface_id) struct be_cmd_req_if_destroy *req = embedded_payload(wrb); int status; - spin_lock(&ctrl->cmd_lock); + spin_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); @@ -583,7 +768,7 @@ int be_cmd_if_destroy(struct be_ctrl_info *ctrl, u32 interface_id) req->interface_id = cpu_to_le32(interface_id); status = be_mbox_db_ring(ctrl); - spin_unlock(&ctrl->cmd_lock); + spin_unlock(&ctrl->mbox_lock); return status; } @@ -598,7 +783,7 @@ int be_cmd_get_stats(struct be_ctrl_info *ctrl, struct be_dma_mem *nonemb_cmd) struct be_sge *sge = nonembedded_sgl(wrb); int status; - spin_lock(&ctrl->cmd_lock); + spin_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); memset(req, 0, sizeof(*req)); @@ -617,18 +802,20 @@ int be_cmd_get_stats(struct be_ctrl_info *ctrl, struct be_dma_mem *nonemb_cmd) be_dws_le_to_cpu(&resp->hw_stats, sizeof(resp->hw_stats)); } - spin_unlock(&ctrl->cmd_lock); + spin_unlock(&ctrl->mbox_lock); return status; } int be_cmd_link_status_query(struct be_ctrl_info *ctrl, - struct be_link_info *link) + bool *link_up) { struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); struct be_cmd_req_link_status *req = embedded_payload(wrb); int status; - spin_lock(&ctrl->cmd_lock); + spin_lock(&ctrl->mbox_lock); + + *link_up = false; memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); @@ -639,14 +826,11 @@ int be_cmd_link_status_query(struct be_ctrl_info *ctrl, status = be_mbox_db_ring(ctrl); if (!status) { struct be_cmd_resp_link_status *resp = embedded_payload(wrb); - link->speed = resp->mac_speed; - link->duplex = resp->mac_duplex; - link->fault = resp->mac_fault; - } else { - link->speed = PHY_LINK_SPEED_ZERO; + if (resp->mac_speed != PHY_LINK_SPEED_ZERO) + *link_up = true; } - spin_unlock(&ctrl->cmd_lock); + spin_unlock(&ctrl->mbox_lock); return status; } @@ -656,7 +840,7 @@ int be_cmd_get_fw_ver(struct be_ctrl_info *ctrl, char *fw_ver) struct be_cmd_req_get_fw_version *req = embedded_payload(wrb); int status; - spin_lock(&ctrl->cmd_lock); + spin_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); @@ -670,7 +854,7 @@ int be_cmd_get_fw_ver(struct be_ctrl_info *ctrl, char *fw_ver) strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN); } - spin_unlock(&ctrl->cmd_lock); + spin_unlock(&ctrl->mbox_lock); return status; } @@ -681,7 +865,7 @@ int be_cmd_modify_eqd(struct be_ctrl_info *ctrl, u32 eq_id, u32 eqd) struct be_cmd_req_modify_eq_delay *req = embedded_payload(wrb); int status; - spin_lock(&ctrl->cmd_lock); + spin_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); @@ -696,7 +880,7 @@ int be_cmd_modify_eqd(struct be_ctrl_info *ctrl, u32 eq_id, u32 eqd) status = be_mbox_db_ring(ctrl); - spin_unlock(&ctrl->cmd_lock); + spin_unlock(&ctrl->mbox_lock); return status; } @@ -707,7 +891,7 @@ int be_cmd_vlan_config(struct be_ctrl_info *ctrl, u32 if_id, u16 *vtag_array, struct be_cmd_req_vlan_config *req = embedded_payload(wrb); int status; - spin_lock(&ctrl->cmd_lock); + spin_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); @@ -726,18 +910,22 @@ int be_cmd_vlan_config(struct be_ctrl_info *ctrl, u32 if_id, u16 *vtag_array, status = be_mbox_db_ring(ctrl); - spin_unlock(&ctrl->cmd_lock); + spin_unlock(&ctrl->mbox_lock); return status; } +/* Use MCC for this command as it may be called in BH context */ int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl, u8 port_num, bool en) { - struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); - struct be_cmd_req_promiscuous_config *req = embedded_payload(wrb); - int status; + struct be_mcc_wrb *wrb; + struct be_cmd_req_promiscuous_config *req; - spin_lock(&ctrl->cmd_lock); - memset(wrb, 0, sizeof(*wrb)); + spin_lock_bh(&ctrl->mcc_lock); + + wrb = wrb_from_mcc(&ctrl->mcc_obj.q); + BUG_ON(!wrb); + + req = embedded_payload(wrb); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); @@ -749,21 +937,29 @@ int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl, u8 port_num, bool en) else req->port0_promiscuous = en; - status = be_mbox_db_ring(ctrl); + be_mcc_notify_wait(ctrl); - spin_unlock(&ctrl->cmd_lock); - return status; + spin_unlock_bh(&ctrl->mcc_lock); + return 0; } -int be_cmd_mcast_mac_set(struct be_ctrl_info *ctrl, u32 if_id, u8 *mac_table, - u32 num, bool promiscuous) +/* + * Use MCC for this command as it may be called in BH context + * (mc == NULL) => multicast promiscous + */ +int be_cmd_multicast_set(struct be_ctrl_info *ctrl, u32 if_id, + struct dev_mc_list *mc_list, u32 mc_count) { - struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); - struct be_cmd_req_mcast_mac_config *req = embedded_payload(wrb); - int status; +#define BE_MAX_MC 32 /* set mcast promisc if > 32 */ + struct be_mcc_wrb *wrb; + struct be_cmd_req_mcast_mac_config *req; - spin_lock(&ctrl->cmd_lock); - memset(wrb, 0, sizeof(*wrb)); + spin_lock_bh(&ctrl->mcc_lock); + + wrb = wrb_from_mcc(&ctrl->mcc_obj.q); + BUG_ON(!wrb); + + req = embedded_payload(wrb); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); @@ -771,17 +967,23 @@ int be_cmd_mcast_mac_set(struct be_ctrl_info *ctrl, u32 if_id, u8 *mac_table, OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req)); req->interface_id = if_id; - req->promiscuous = promiscuous; - if (!promiscuous) { - req->num_mac = cpu_to_le16(num); - if (num) - memcpy(req->mac, mac_table, ETH_ALEN * num); + if (mc_list && mc_count <= BE_MAX_MC) { + int i; + struct dev_mc_list *mc; + + req->num_mac = cpu_to_le16(mc_count); + + for (mc = mc_list, i = 0; mc; mc = mc->next, i++) + memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN); + } else { + req->promiscuous = 1; } - status = be_mbox_db_ring(ctrl); + be_mcc_notify_wait(ctrl); - spin_unlock(&ctrl->cmd_lock); - return status; + spin_unlock_bh(&ctrl->mcc_lock); + + return 0; } int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, u32 tx_fc, u32 rx_fc) @@ -790,7 +992,7 @@ int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, u32 tx_fc, u32 rx_fc) struct be_cmd_req_set_flow_control *req = embedded_payload(wrb); int status; - spin_lock(&ctrl->cmd_lock); + spin_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); @@ -804,7 +1006,7 @@ int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, u32 tx_fc, u32 rx_fc) status = be_mbox_db_ring(ctrl); - spin_unlock(&ctrl->cmd_lock); + spin_unlock(&ctrl->mbox_lock); return status; } @@ -814,7 +1016,7 @@ int be_cmd_get_flow_control(struct be_ctrl_info *ctrl, u32 *tx_fc, u32 *rx_fc) struct be_cmd_req_get_flow_control *req = embedded_payload(wrb); int status; - spin_lock(&ctrl->cmd_lock); + spin_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); @@ -831,7 +1033,7 @@ int be_cmd_get_flow_control(struct be_ctrl_info *ctrl, u32 *tx_fc, u32 *rx_fc) *rx_fc = le16_to_cpu(resp->rx_flow_control); } - spin_unlock(&ctrl->cmd_lock); + spin_unlock(&ctrl->mbox_lock); return status; } @@ -841,7 +1043,7 @@ int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num) struct be_cmd_req_query_fw_cfg *req = embedded_payload(wrb); int status; - spin_lock(&ctrl->cmd_lock); + spin_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); @@ -856,6 +1058,6 @@ int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num) *port_num = le32_to_cpu(resp->phys_port); } - spin_unlock(&ctrl->cmd_lock); + spin_unlock(&ctrl->mbox_lock); return status; } diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h index e499e2d5b8c..747626da7b4 100644 --- a/drivers/net/benet/be_cmds.h +++ b/drivers/net/benet/be_cmds.h @@ -76,6 +76,34 @@ struct be_mcc_cq_entry { u32 flags; /* dword 3 */ }; +/* When the async bit of mcc_compl is set, the last 4 bytes of + * mcc_compl is interpreted as follows: + */ +#define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */ +#define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF +#define ASYNC_EVENT_CODE_LINK_STATE 0x1 +struct be_async_event_trailer { + u32 code; +}; + +enum { + ASYNC_EVENT_LINK_DOWN = 0x0, + ASYNC_EVENT_LINK_UP = 0x1 +}; + +/* When the event code of an async trailer is link-state, the mcc_compl + * must be interpreted as follows + */ +struct be_async_event_link_state { + u8 physical_port; + u8 port_link_status; + u8 port_duplex; + u8 port_speed; + u8 port_fault; + u8 rsvd0[7]; + struct be_async_event_trailer trailer; +} __packed; + struct be_mcc_mailbox { struct be_mcc_wrb wrb; struct be_mcc_cq_entry cqe; @@ -101,6 +129,7 @@ struct be_mcc_mailbox { #define OPCODE_COMMON_FIRMWARE_CONFIG 42 #define OPCODE_COMMON_NTWK_INTERFACE_CREATE 50 #define OPCODE_COMMON_NTWK_INTERFACE_DESTROY 51 +#define OPCODE_COMMON_MCC_DESTROY 53 #define OPCODE_COMMON_CQ_DESTROY 54 #define OPCODE_COMMON_EQ_DESTROY 55 #define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58 @@ -269,6 +298,38 @@ struct be_cmd_resp_cq_create { u16 rsvd0; } __packed; +/******************** Create MCCQ ***************************/ +/* Pseudo amap definition in which each bit of the actual structure is defined + * as a byte: used to calculate offset/shift/mask of each field */ +struct amap_mcc_context { + u8 con_index[14]; + u8 rsvd0[2]; + u8 ring_size[4]; + u8 fetch_wrb; + u8 fetch_r2t; + u8 cq_id[10]; + u8 prod_index[14]; + u8 fid[8]; + u8 pdid[9]; + u8 valid; + u8 rsvd1[32]; + u8 rsvd2[32]; +} __packed; + +struct be_cmd_req_mcc_create { + struct be_cmd_req_hdr hdr; + u16 num_pages; + u16 rsvd0; + u8 context[sizeof(struct amap_mcc_context) / 8]; + struct phys_addr pages[8]; +} __packed; + +struct be_cmd_resp_mcc_create { + struct be_cmd_resp_hdr hdr; + u16 id; + u16 rsvd0; +} __packed; + /******************** Create TxQ ***************************/ #define BE_ETH_TX_RING_TYPE_STANDARD 2 #define BE_ULP1_NUM 1 @@ -341,7 +402,8 @@ enum { QTYPE_EQ = 1, QTYPE_CQ, QTYPE_TXQ, - QTYPE_RXQ + QTYPE_RXQ, + QTYPE_MCCQ }; struct be_cmd_req_q_destroy { @@ -546,12 +608,6 @@ struct be_cmd_req_link_status { u32 rsvd; }; -struct be_link_info { - u8 duplex; - u8 speed; - u8 fault; -}; - enum { PHY_LINK_DUPLEX_NONE = 0x0, PHY_LINK_DUPLEX_HALF = 0x1, @@ -657,6 +713,9 @@ extern int be_cmd_cq_create(struct be_ctrl_info *ctrl, struct be_queue_info *cq, struct be_queue_info *eq, bool sol_evts, bool no_delay, int num_cqe_dma_coalesce); +extern int be_cmd_mccq_create(struct be_ctrl_info *ctrl, + struct be_queue_info *mccq, + struct be_queue_info *cq); extern int be_cmd_txq_create(struct be_ctrl_info *ctrl, struct be_queue_info *txq, struct be_queue_info *cq); @@ -667,7 +726,7 @@ extern int be_cmd_rxq_create(struct be_ctrl_info *ctrl, extern int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, int type); extern int be_cmd_link_status_query(struct be_ctrl_info *ctrl, - struct be_link_info *link); + bool *link_up); extern int be_cmd_reset(struct be_ctrl_info *ctrl); extern int be_cmd_get_stats(struct be_ctrl_info *ctrl, struct be_dma_mem *nonemb_cmd); @@ -679,10 +738,11 @@ extern int be_cmd_vlan_config(struct be_ctrl_info *ctrl, u32 if_id, bool promiscuous); extern int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl, u8 port_num, bool en); -extern int be_cmd_mcast_mac_set(struct be_ctrl_info *ctrl, u32 if_id, - u8 *mac_table, u32 num, bool promiscuous); +extern int be_cmd_multicast_set(struct be_ctrl_info *ctrl, u32 if_id, + struct dev_mc_list *mc_list, u32 mc_count); extern int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, u32 tx_fc, u32 rx_fc); extern int be_cmd_get_flow_control(struct be_ctrl_info *ctrl, u32 *tx_fc, u32 *rx_fc); extern int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num); +extern void be_process_mcc(struct be_ctrl_info *ctrl); diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c index 9592f22e4c8..cccc5419ad7 100644 --- a/drivers/net/benet/be_ethtool.c +++ b/drivers/net/benet/be_ethtool.c @@ -162,8 +162,8 @@ be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) return -EINVAL; adapter->max_rx_coal = coalesce->rx_max_coalesced_frames; - if (adapter->max_rx_coal > MAX_SKB_FRAGS) - adapter->max_rx_coal = MAX_SKB_FRAGS - 1; + if (adapter->max_rx_coal > BE_MAX_FRAGS_PER_FRAME) + adapter->max_rx_coal = BE_MAX_FRAGS_PER_FRAME; /* if AIC is being turned on now, start with an EQD of 0 */ if (rx_eq->enable_aic == 0 && diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h index b132aa4893c..b02e805c1db 100644 --- a/drivers/net/benet/be_hw.h +++ b/drivers/net/benet/be_hw.h @@ -61,7 +61,7 @@ /* Clear the interrupt for this eq */ #define DB_EQ_CLR_SHIFT (9) /* bit 9 */ /* Must be 1 */ -#define DB_EQ_EVNT_SHIFT (10) /* bit 10 */ +#define DB_EQ_EVNT_SHIFT (10) /* bit 10 */ /* Number of event entries processed */ #define DB_EQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */ /* Rearm bit */ @@ -88,6 +88,12 @@ /* Number of rx frags posted */ #define DB_RQ_NUM_POSTED_SHIFT (24) /* bits 24 - 31 */ +/********** MCC door bell ************/ +#define DB_MCCQ_OFFSET 0x140 +#define DB_MCCQ_RING_ID_MASK 0x7FF /* bits 0 - 10 */ +/* Number of entries posted */ +#define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */ + /* * BE descriptors: host memory data structures whose formats * are hardwired in BE silicon. diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 66bb56874d9..308eb09ca56 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c @@ -60,26 +60,6 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q, return 0; } -static inline void *queue_head_node(struct be_queue_info *q) -{ - return q->dma_mem.va + q->head * q->entry_size; -} - -static inline void *queue_tail_node(struct be_queue_info *q) -{ - return q->dma_mem.va + q->tail * q->entry_size; -} - -static inline void queue_head_inc(struct be_queue_info *q) -{ - index_inc(&q->head, q->len); -} - -static inline void queue_tail_inc(struct be_queue_info *q) -{ - index_inc(&q->tail, q->len); -} - static void be_intr_set(struct be_ctrl_info *ctrl, bool enable) { u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET; @@ -127,7 +107,7 @@ static void be_eq_notify(struct be_ctrl_info *ctrl, u16 qid, iowrite32(val, ctrl->db + DB_EQ_OFFSET); } -static void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid, +void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid, bool arm, u16 num_popped) { u32 val = 0; @@ -234,28 +214,24 @@ static void netdev_stats_update(struct be_adapter *adapter) dev_stats->tx_window_errors = 0; } -static void be_link_status_update(struct be_adapter *adapter) +void be_link_status_update(void *ctxt, bool link_up) { - struct be_link_info *prev = &adapter->link; - struct be_link_info now = { 0 }; + struct be_adapter *adapter = ctxt; struct net_device *netdev = adapter->netdev; - be_cmd_link_status_query(&adapter->ctrl, &now); - /* If link came up or went down */ - if (now.speed != prev->speed && (now.speed == PHY_LINK_SPEED_ZERO || - prev->speed == PHY_LINK_SPEED_ZERO)) { - if (now.speed == PHY_LINK_SPEED_ZERO) { - netif_stop_queue(netdev); - netif_carrier_off(netdev); - printk(KERN_INFO "%s: Link down\n", netdev->name); - } else { + if (adapter->link_up != link_up) { + if (link_up) { netif_start_queue(netdev); netif_carrier_on(netdev); printk(KERN_INFO "%s: Link up\n", netdev->name); + } else { + netif_stop_queue(netdev); + netif_carrier_off(netdev); + printk(KERN_INFO "%s: Link down\n", netdev->name); } + adapter->link_up = link_up; } - *prev = now; } /* Update the EQ delay n BE based on the RX frags consumed / sec */ @@ -569,47 +545,32 @@ static void be_vlan_rem_vid(struct net_device *netdev, u16 vid) be_vid_config(netdev); } -static void be_set_multicast_filter(struct net_device *netdev) +static void be_set_multicast_list(struct net_device *netdev) { struct be_adapter *adapter = netdev_priv(netdev); - struct dev_mc_list *mc_ptr; - u8 mac_addr[32][ETH_ALEN]; - int i = 0; + struct be_ctrl_info *ctrl = &adapter->ctrl; - if (netdev->flags & IFF_ALLMULTI) { - /* set BE in Multicast promiscuous */ - be_cmd_mcast_mac_set(&adapter->ctrl, - adapter->if_handle, NULL, 0, true); - return; + if (netdev->flags & IFF_PROMISC) { + be_cmd_promiscuous_config(ctrl, adapter->port_num, 1); + adapter->promiscuous = true; + goto done; } - for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { - memcpy(&mac_addr[i][0], mc_ptr->dmi_addr, ETH_ALEN); - if (++i >= 32) { - be_cmd_mcast_mac_set(&adapter->ctrl, - adapter->if_handle, &mac_addr[0][0], i, false); - i = 0; - } - + /* BE was previously in promiscous mode; disable it */ + if (adapter->promiscuous) { + adapter->promiscuous = false; + be_cmd_promiscuous_config(ctrl, adapter->port_num, 0); } - if (i) { - /* reset the promiscuous mode also. */ - be_cmd_mcast_mac_set(&adapter->ctrl, - adapter->if_handle, &mac_addr[0][0], i, false); + if (netdev->flags & IFF_ALLMULTI) { + be_cmd_multicast_set(ctrl, adapter->if_handle, NULL, 0); + goto done; } -} - -static void be_set_multicast_list(struct net_device *netdev) -{ - struct be_adapter *adapter = netdev_priv(netdev); - if (netdev->flags & IFF_PROMISC) { - be_cmd_promiscuous_config(&adapter->ctrl, adapter->port_num, 1); - } else { - be_cmd_promiscuous_config(&adapter->ctrl, adapter->port_num, 0); - be_set_multicast_filter(netdev); - } + be_cmd_multicast_set(ctrl, adapter->if_handle, netdev->mc_list, + netdev->mc_count); +done: + return; } static void be_rx_rate_update(struct be_adapter *adapter) @@ -705,7 +666,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter, { struct be_queue_info *rxq = &adapter->rx_obj.q; struct be_rx_page_info *page_info; - u16 rxq_idx, i, num_rcvd; + u16 rxq_idx, i, num_rcvd, j; u32 pktsize, hdr_len, curr_frag_len; u8 *start; @@ -748,22 +709,33 @@ static void skb_fill_rx_data(struct be_adapter *adapter, /* More frags present for this completion */ pktsize -= curr_frag_len; /* account for above copied frag */ - for (i = 1; i < num_rcvd; i++) { + for (i = 1, j = 0; i < num_rcvd; i++) { index_inc(&rxq_idx, rxq->len); page_info = get_rx_page_info(adapter, rxq_idx); curr_frag_len = min(pktsize, rx_frag_size); - skb_shinfo(skb)->frags[i].page = page_info->page; - skb_shinfo(skb)->frags[i].page_offset = page_info->page_offset; - skb_shinfo(skb)->frags[i].size = curr_frag_len; + /* Coalesce all frags from the same physical page in one slot */ + if (page_info->page_offset == 0) { + /* Fresh page */ + j++; + skb_shinfo(skb)->frags[j].page = page_info->page; + skb_shinfo(skb)->frags[j].page_offset = + page_info->page_offset; + skb_shinfo(skb)->frags[j].size = 0; + skb_shinfo(skb)->nr_frags++; + } else { + put_page(page_info->page); + } + + skb_shinfo(skb)->frags[j].size += curr_frag_len; skb->len += curr_frag_len; skb->data_len += curr_frag_len; - skb_shinfo(skb)->nr_frags++; pktsize -= curr_frag_len; memset(page_info, 0, sizeof(*page_info)); } + BUG_ON(j > MAX_SKB_FRAGS); done: be_rx_stats_update(adapter, pktsize, num_rcvd); @@ -825,7 +797,7 @@ static void be_rx_compl_process_lro(struct be_adapter *adapter, struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME]; struct be_queue_info *rxq = &adapter->rx_obj.q; u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; - u16 i, rxq_idx = 0, vid; + u16 i, rxq_idx = 0, vid, j; num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); @@ -833,20 +805,28 @@ static void be_rx_compl_process_lro(struct be_adapter *adapter, rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); remaining = pkt_size; - for (i = 0; i < num_rcvd; i++) { + for (i = 0, j = -1; i < num_rcvd; i++) { page_info = get_rx_page_info(adapter, rxq_idx); curr_frag_len = min(remaining, rx_frag_size); - rx_frags[i].page = page_info->page; - rx_frags[i].page_offset = page_info->page_offset; - rx_frags[i].size = curr_frag_len; - remaining -= curr_frag_len; + /* Coalesce all frags from the same physical page in one slot */ + if (i == 0 || page_info->page_offset == 0) { + /* First frag or Fresh page */ + j++; + rx_frags[j].page = page_info->page; + rx_frags[j].page_offset = page_info->page_offset; + rx_frags[j].size = 0; + } else { + put_page(page_info->page); + } + rx_frags[j].size += curr_frag_len; + remaining -= curr_frag_len; index_inc(&rxq_idx, rxq->len); - memset(page_info, 0, sizeof(*page_info)); } + BUG_ON(j > MAX_SKB_FRAGS); if (likely(!vlanf)) { lro_receive_frags(&adapter->rx_obj.lro_mgr, rx_frags, pkt_size, @@ -960,10 +940,8 @@ static void be_post_rx_frags(struct be_adapter *adapter) return; } -static struct be_eth_tx_compl * -be_tx_compl_get(struct be_adapter *adapter) +static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq) { - struct be_queue_info *tx_cq = &adapter->tx_obj.cq; struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq); if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0) @@ -1051,6 +1029,59 @@ static void be_tx_q_clean(struct be_adapter *adapter) } } +static void be_mcc_queues_destroy(struct be_adapter *adapter) +{ + struct be_queue_info *q; + struct be_ctrl_info *ctrl = &adapter->ctrl; + + q = &ctrl->mcc_obj.q; + if (q->created) + be_cmd_q_destroy(ctrl, q, QTYPE_MCCQ); + be_queue_free(adapter, q); + + q = &ctrl->mcc_obj.cq; + if (q->created) + be_cmd_q_destroy(ctrl, q, QTYPE_CQ); + be_queue_free(adapter, q); +} + +/* Must be called only after TX qs are created as MCC shares TX EQ */ +static int be_mcc_queues_create(struct be_adapter *adapter) +{ + struct be_queue_info *q, *cq; + struct be_ctrl_info *ctrl = &adapter->ctrl; + + /* Alloc MCC compl queue */ + cq = &ctrl->mcc_obj.cq; + if (be_queue_alloc(adapter, cq, MCC_CQ_LEN, + sizeof(struct be_mcc_cq_entry))) + goto err; + + /* Ask BE to create MCC compl queue; share TX's eq */ + if (be_cmd_cq_create(ctrl, cq, &adapter->tx_eq.q, false, true, 0)) + goto mcc_cq_free; + + /* Alloc MCC queue */ + q = &ctrl->mcc_obj.q; + if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb))) + goto mcc_cq_destroy; + + /* Ask BE to create MCC queue */ + if (be_cmd_mccq_create(ctrl, q, cq)) + goto mcc_q_free; + + return 0; + +mcc_q_free: + be_queue_free(adapter, q); +mcc_cq_destroy: + be_cmd_q_destroy(ctrl, cq, QTYPE_CQ); +mcc_cq_free: + be_queue_free(adapter, cq); +err: + return -1; +} + static void be_tx_queues_destroy(struct be_adapter *adapter) { struct be_queue_info *q; @@ -1263,7 +1294,7 @@ static irqreturn_t be_msix_rx(int irq, void *dev) return IRQ_HANDLED; } -static irqreturn_t be_msix_tx(int irq, void *dev) +static irqreturn_t be_msix_tx_mcc(int irq, void *dev) { struct be_adapter *adapter = dev; @@ -1324,40 +1355,51 @@ int be_poll_rx(struct napi_struct *napi, int budget) return work_done; } -/* For TX we don't honour budget; consume everything */ -int be_poll_tx(struct napi_struct *napi, int budget) +void be_process_tx(struct be_adapter *adapter) { - struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi); - struct be_adapter *adapter = - container_of(tx_eq, struct be_adapter, tx_eq); - struct be_tx_obj *tx_obj = &adapter->tx_obj; - struct be_queue_info *tx_cq = &tx_obj->cq; - struct be_queue_info *txq = &tx_obj->q; + struct be_queue_info *txq = &adapter->tx_obj.q; + struct be_queue_info *tx_cq = &adapter->tx_obj.cq; struct be_eth_tx_compl *txcp; u32 num_cmpl = 0; u16 end_idx; - while ((txcp = be_tx_compl_get(adapter))) { + while ((txcp = be_tx_compl_get(tx_cq))) { end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl, wrb_index, txcp); be_tx_compl_process(adapter, end_idx); num_cmpl++; } - /* As Tx wrbs have been freed up, wake up netdev queue if - * it was stopped due to lack of tx wrbs. - */ - if (netif_queue_stopped(adapter->netdev) && + if (num_cmpl) { + be_cq_notify(&adapter->ctrl, tx_cq->id, true, num_cmpl); + + /* As Tx wrbs have been freed up, wake up netdev queue if + * it was stopped due to lack of tx wrbs. + */ + if (netif_queue_stopped(adapter->netdev) && atomic_read(&txq->used) < txq->len / 2) { - netif_wake_queue(adapter->netdev); + netif_wake_queue(adapter->netdev); + } + + drvr_stats(adapter)->be_tx_events++; + drvr_stats(adapter)->be_tx_compl += num_cmpl; } +} + +/* As TX and MCC share the same EQ check for both TX and MCC completions. + * For TX/MCC we don't honour budget; consume everything + */ +static int be_poll_tx_mcc(struct napi_struct *napi, int budget) +{ + struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi); + struct be_adapter *adapter = + container_of(tx_eq, struct be_adapter, tx_eq); napi_complete(napi); - be_cq_notify(&adapter->ctrl, tx_cq->id, true, num_cmpl); + be_process_tx(adapter); - drvr_stats(adapter)->be_tx_events++; - drvr_stats(adapter)->be_tx_compl += num_cmpl; + be_process_mcc(&adapter->ctrl); return 1; } @@ -1368,9 +1410,6 @@ static void be_worker(struct work_struct *work) container_of(work, struct be_adapter, work.work); int status; - /* Check link */ - be_link_status_update(adapter); - /* Get Stats */ status = be_cmd_get_stats(&adapter->ctrl, &adapter->stats.cmd); if (!status) @@ -1419,7 +1458,7 @@ static int be_msix_register(struct be_adapter *adapter) sprintf(tx_eq->desc, "%s-tx", netdev->name); vec = be_msix_vec_get(adapter, tx_eq->q.id); - status = request_irq(vec, be_msix_tx, 0, tx_eq->desc, adapter); + status = request_irq(vec, be_msix_tx_mcc, 0, tx_eq->desc, adapter); if (status) goto err; @@ -1495,6 +1534,39 @@ static int be_open(struct net_device *netdev) struct be_ctrl_info *ctrl = &adapter->ctrl; struct be_eq_obj *rx_eq = &adapter->rx_eq; struct be_eq_obj *tx_eq = &adapter->tx_eq; + bool link_up; + int status; + + /* First time posting */ + be_post_rx_frags(adapter); + + napi_enable(&rx_eq->napi); + napi_enable(&tx_eq->napi); + + be_irq_register(adapter); + + be_intr_set(ctrl, true); + + /* The evt queues are created in unarmed state; arm them */ + be_eq_notify(ctrl, rx_eq->q.id, true, false, 0); + be_eq_notify(ctrl, tx_eq->q.id, true, false, 0); + + /* Rx compl queue may be in unarmed state; rearm it */ + be_cq_notify(ctrl, adapter->rx_obj.cq.id, true, 0); + + status = be_cmd_link_status_query(ctrl, &link_up); + if (status) + return status; + be_link_status_update(adapter, link_up); + + schedule_delayed_work(&adapter->work, msecs_to_jiffies(100)); + return 0; +} + +static int be_setup(struct be_adapter *adapter) +{ + struct be_ctrl_info *ctrl = &adapter->ctrl; + struct net_device *netdev = adapter->netdev; u32 if_flags; int status; @@ -1521,29 +1593,14 @@ static int be_open(struct net_device *netdev) if (status != 0) goto tx_qs_destroy; - /* First time posting */ - be_post_rx_frags(adapter); - - napi_enable(&rx_eq->napi); - napi_enable(&tx_eq->napi); - - be_irq_register(adapter); - - be_intr_set(ctrl, true); - - /* The evt queues are created in the unarmed state; arm them */ - be_eq_notify(ctrl, rx_eq->q.id, true, false, 0); - be_eq_notify(ctrl, tx_eq->q.id, true, false, 0); - - /* The compl queues are created in the unarmed state; arm them */ - be_cq_notify(ctrl, adapter->rx_obj.cq.id, true, 0); - be_cq_notify(ctrl, adapter->tx_obj.cq.id, true, 0); - - be_link_status_update(adapter); + status = be_mcc_queues_create(adapter); + if (status != 0) + goto rx_qs_destroy; - schedule_delayed_work(&adapter->work, msecs_to_jiffies(100)); return 0; +rx_qs_destroy: + be_rx_queues_destroy(adapter); tx_qs_destroy: be_tx_queues_destroy(adapter); if_destroy: @@ -1552,6 +1609,19 @@ do_none: return status; } +static int be_clear(struct be_adapter *adapter) +{ + struct be_ctrl_info *ctrl = &adapter->ctrl; + + be_rx_queues_destroy(adapter); + be_tx_queues_destroy(adapter); + + be_cmd_if_destroy(ctrl, adapter->if_handle); + + be_mcc_queues_destroy(adapter); + return 0; +} + static int be_close(struct net_device *netdev) { struct be_adapter *adapter = netdev_priv(netdev); @@ -1564,7 +1634,7 @@ static int be_close(struct net_device *netdev) netif_stop_queue(netdev); netif_carrier_off(netdev); - adapter->link.speed = PHY_LINK_SPEED_ZERO; + adapter->link_up = false; be_intr_set(ctrl, false); @@ -1581,10 +1651,6 @@ static int be_close(struct net_device *netdev) napi_disable(&rx_eq->napi); napi_disable(&tx_eq->napi); - be_rx_queues_destroy(adapter); - be_tx_queues_destroy(adapter); - - be_cmd_if_destroy(ctrl, adapter->if_handle); return 0; } @@ -1673,7 +1739,7 @@ static void be_netdev_init(struct net_device *netdev) netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx, BE_NAPI_WEIGHT); - netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx, + netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc, BE_NAPI_WEIGHT); netif_carrier_off(netdev); @@ -1755,7 +1821,12 @@ static int be_ctrl_init(struct be_adapter *adapter) mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); - spin_lock_init(&ctrl->cmd_lock); + spin_lock_init(&ctrl->mbox_lock); + spin_lock_init(&ctrl->mcc_lock); + spin_lock_init(&ctrl->mcc_cq_lock); + + ctrl->async_cb = be_link_status_update; + ctrl->adapter_ctxt = adapter; val = ioread32(ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET); ctrl->pci_func = (val >> MEMBAR_CTRL_INT_CTRL_PFUNC_SHIFT) & @@ -1793,6 +1864,8 @@ static void __devexit be_remove(struct pci_dev *pdev) unregister_netdev(adapter->netdev); + be_clear(adapter); + be_stats_cleanup(adapter); be_ctrl_cleanup(adapter); @@ -1890,13 +1963,18 @@ static int __devinit be_probe(struct pci_dev *pdev, be_netdev_init(netdev); SET_NETDEV_DEV(netdev, &adapter->pdev->dev); + status = be_setup(adapter); + if (status) + goto stats_clean; status = register_netdev(netdev); if (status != 0) - goto stats_clean; + goto unsetup; dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num); return 0; +unsetup: + be_clear(adapter); stats_clean: be_stats_cleanup(adapter); ctrl_clean: @@ -1921,6 +1999,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state) if (netif_running(netdev)) { rtnl_lock(); be_close(netdev); + be_clear(adapter); rtnl_unlock(); } @@ -1947,6 +2026,7 @@ static int be_resume(struct pci_dev *pdev) if (netif_running(netdev)) { rtnl_lock(); + be_setup(adapter); be_open(netdev); rtnl_unlock(); } diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 38f1c3375d7..b70cc99962f 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -6825,6 +6825,14 @@ bnx2_nway_reset(struct net_device *dev) return 0; } +static u32 +bnx2_get_link(struct net_device *dev) +{ + struct bnx2 *bp = netdev_priv(dev); + + return bp->link_up; +} + static int bnx2_get_eeprom_len(struct net_device *dev) { @@ -7392,7 +7400,7 @@ static const struct ethtool_ops bnx2_ethtool_ops = { .get_wol = bnx2_get_wol, .set_wol = bnx2_set_wol, .nway_reset = bnx2_nway_reset, - .get_link = ethtool_op_get_link, + .get_link = bnx2_get_link, .get_eeprom_len = bnx2_get_eeprom_len, .get_eeprom = bnx2_get_eeprom, .set_eeprom = bnx2_set_eeprom, diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index d5e18812bf4..33821a81cbf 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -36,7 +36,7 @@ config CAN_CALC_BITTIMING If unsure, say Y. config CAN_SJA1000 - depends on CAN_DEV + depends on CAN_DEV && HAS_IOMEM tristate "Philips SJA1000" ---help--- Driver for the SJA1000 CAN controllers from Philips or NXP diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index 44f77eb1180..4d1515f45ba 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c @@ -25,8 +25,6 @@ #include <linux/delay.h> #include <linux/ethtool.h> #include <linux/if_vlan.h> -#include <linux/module.h> - #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) #define BCM_VLAN 1 #endif @@ -2521,9 +2519,9 @@ static struct cnic_dev *init_bnx2_cnic(struct net_device *dev) struct cnic_dev *cdev; struct cnic_local *cp; struct cnic_eth_dev *ethdev = NULL; - struct cnic_eth_dev *(*probe)(void *) = NULL; + struct cnic_eth_dev *(*probe)(struct net_device *) = NULL; - probe = __symbol_get("bnx2_cnic_probe"); + probe = symbol_get(bnx2_cnic_probe); if (probe) { ethdev = (*probe)(dev); symbol_put_addr(probe); diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h index 06380963a34..d1bce27ee99 100644 --- a/drivers/net/cnic_if.h +++ b/drivers/net/cnic_if.h @@ -296,4 +296,6 @@ extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops); extern int cnic_unregister_driver(int ulp_type); +extern struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev); + #endif diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c index 58afafbd3b9..fd5e32cbcb8 100644 --- a/drivers/net/cpmac.c +++ b/drivers/net/cpmac.c @@ -1097,7 +1097,7 @@ static const struct net_device_ops cpmac_netdev_ops = { .ndo_start_xmit = cpmac_start_xmit, .ndo_tx_timeout = cpmac_tx_timeout, .ndo_set_multicast_list = cpmac_set_multicast_list, - .ndo_so_ioctl = cpmac_ioctl, + .ndo_do_ioctl = cpmac_ioctl, .ndo_set_config = cpmac_config, .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 677f60490f6..679885a122b 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -1997,7 +1997,7 @@ static int e1000_clean(struct napi_struct *napi, int budget) struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); struct e1000_hw *hw = &adapter->hw; struct net_device *poll_dev = adapter->netdev; - int tx_cleaned = 0, work_done = 0; + int tx_cleaned = 1, work_done = 0; adapter = netdev_priv(poll_dev); diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c index 7d443405bbe..cc786333d95 100644 --- a/drivers/net/fec_mpc52xx.c +++ b/drivers/net/fec_mpc52xx.c @@ -948,7 +948,7 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match) /* Start with safe defaults for link connection */ priv->speed = 100; priv->duplex = DUPLEX_HALF; - priv->mdio_speed = ((mpc52xx_find_ipb_freq(op->node) >> 20) / 5) << 1; + priv->mdio_speed = ((mpc5xxx_get_bus_frequency(op->node) >> 20) / 5) << 1; /* The current speed preconfigures the speed of the MII link */ prop = of_get_property(op->node, "current-speed", &prop_size); diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c index fec9f245116..31e6d62b785 100644 --- a/drivers/net/fec_mpc52xx_phy.c +++ b/drivers/net/fec_mpc52xx_phy.c @@ -106,7 +106,7 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of, /* set MII speed */ out_be32(&priv->regs->mii_speed, - ((mpc52xx_find_ipb_freq(of->node) >> 20) / 5) << 1); + ((mpc5xxx_get_bus_frequency(of->node) >> 20) / 5) << 1); err = of_mdiobus_register(bus, np); if (err) diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c index 3af581303ca..d167090248e 100644 --- a/drivers/net/fsl_pq_mdio.c +++ b/drivers/net/fsl_pq_mdio.c @@ -188,7 +188,7 @@ static int fsl_pq_mdio_find_free(struct mii_bus *new_bus) } -#ifdef CONFIG_GIANFAR +#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs) { struct gfar __iomem *enet_regs; @@ -206,7 +206,7 @@ static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs) #endif -#ifdef CONFIG_UCC_GETH +#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE) static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id) { struct device_node *np = NULL; @@ -291,7 +291,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev, if (of_device_is_compatible(np, "fsl,gianfar-mdio") || of_device_is_compatible(np, "fsl,gianfar-tbi") || of_device_is_compatible(np, "gianfar")) { -#ifdef CONFIG_GIANFAR +#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) tbipa = get_gfar_tbipa(regs); #else err = -ENODEV; @@ -299,7 +299,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev, #endif } else if (of_device_is_compatible(np, "fsl,ucc-mdio") || of_device_is_compatible(np, "ucc_geth_phy")) { -#ifdef CONFIG_UCC_GETH +#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE) u32 id; static u32 mii_mng_master; diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c index 22aadb7884f..2bc9d63027d 100644 --- a/drivers/net/igbvf/netdev.c +++ b/drivers/net/igbvf/netdev.c @@ -1281,7 +1281,7 @@ static void igbvf_configure_tx(struct igbvf_adapter *adapter) /* Setup the HW Tx Head and Tail descriptor pointers */ ew32(TDLEN(0), tx_ring->count * sizeof(union e1000_adv_tx_desc)); tdba = tx_ring->dma; - ew32(TDBAL(0), (tdba & DMA_32BIT_MASK)); + ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32))); ew32(TDBAH(0), (tdba >> 32)); ew32(TDH(0), 0); ew32(TDT(0), 0); @@ -1367,7 +1367,7 @@ static void igbvf_configure_rx(struct igbvf_adapter *adapter) * the Base and Length of the Rx Descriptor Ring */ rdba = rx_ring->dma; - ew32(RDBAL(0), (rdba & DMA_32BIT_MASK)); + ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32))); ew32(RDBAH(0), (rdba >> 32)); ew32(RDLEN(0), rx_ring->count * sizeof(union e1000_adv_rx_desc)); rx_ring->head = E1000_RDH(0); @@ -2628,15 +2628,16 @@ static int __devinit igbvf_probe(struct pci_dev *pdev, return err; pci_using_dac = 0; - err = pci_set_dma_mask(pdev, DMA_64BIT_MASK); + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if (!err) { - err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (!err) pci_using_dac = 1; } else { - err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { - err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); + err = pci_set_consistent_dma_mask(pdev, + DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "No usable DMA " "configuration, aborting\n"); diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c index 3c3bf1f07b8..fa9f24e2368 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ixgbe/ixgbe_fcoe.c @@ -251,7 +251,7 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, /* program DMA context */ hw = &adapter->hw; spin_lock_bh(&fcoe->lock); - IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_32BIT_MASK); + IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32)); IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32); IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff); IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw); diff --git a/drivers/net/mdio.c b/drivers/net/mdio.c index dc45e9856c3..6851bdb2ce2 100644 --- a/drivers/net/mdio.c +++ b/drivers/net/mdio.c @@ -14,6 +14,10 @@ #include <linux/mdio.h> #include <linux/module.h> +MODULE_DESCRIPTION("Generic support for MDIO-compatible transceivers"); +MODULE_AUTHOR("Copyright 2006-2009 Solarflare Communications Inc."); +MODULE_LICENSE("GPL"); + /** * mdio45_probe - probe for an MDIO (clause 45) device * @mdio: MDIO interface diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c index e02bafdd368..93f4abd990a 100644 --- a/drivers/net/mlx4/en_netdev.c +++ b/drivers/net/mlx4/en_netdev.c @@ -668,7 +668,7 @@ int mlx4_en_start_port(struct net_device *dev) queue_work(mdev->workqueue, &priv->mcast_task); priv->port_up = true; - netif_start_queue(dev); + netif_tx_start_all_queues(dev); return 0; mac_err: @@ -700,14 +700,14 @@ void mlx4_en_stop_port(struct net_device *dev) en_dbg(DRV, priv, "stop port called while port already down\n"); return; } - netif_stop_queue(dev); /* Synchronize with tx routine */ netif_tx_lock_bh(dev); - priv->port_up = false; + netif_tx_stop_all_queues(dev); netif_tx_unlock_bh(dev); /* close port*/ + priv->port_up = false; mlx4_CLOSE_PORT(mdev->dev, priv->port); /* Unregister Mac address for the port */ @@ -881,7 +881,6 @@ void mlx4_en_destroy_netdev(struct net_device *dev) mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); cancel_delayed_work(&priv->stats_task); - cancel_delayed_work(&priv->refill_task); /* flush any pending task for this netdev */ flush_workqueue(mdev->workqueue); @@ -986,7 +985,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, spin_lock_init(&priv->stats_lock); INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast); INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac); - INIT_DELAYED_WORK(&priv->refill_task, mlx4_en_rx_refill); INIT_WORK(&priv->watchdog_task, mlx4_en_restart); INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c index 5a14899c1e2..91bdfdfd431 100644 --- a/drivers/net/mlx4/en_rx.c +++ b/drivers/net/mlx4/en_rx.c @@ -269,31 +269,6 @@ reduce_rings: return 0; } -static int mlx4_en_fill_rx_buf(struct net_device *dev, - struct mlx4_en_rx_ring *ring) -{ - struct mlx4_en_priv *priv = netdev_priv(dev); - int num = 0; - int err; - - while ((u32) (ring->prod - ring->cons) < ring->actual_size) { - err = mlx4_en_prepare_rx_desc(priv, ring, ring->prod & - ring->size_mask); - if (err) { - if (netif_msg_rx_err(priv)) - en_warn(priv, "Failed preparing rx descriptor\n"); - priv->port_stats.rx_alloc_failed++; - break; - } - ++num; - ++ring->prod; - } - if ((u32) (ring->prod - ring->cons) == ring->actual_size) - ring->full = 1; - - return num; -} - static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring) { @@ -312,42 +287,6 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, } } - -void mlx4_en_rx_refill(struct work_struct *work) -{ - struct delayed_work *delay = to_delayed_work(work); - struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, - refill_task); - struct mlx4_en_dev *mdev = priv->mdev; - struct net_device *dev = priv->dev; - struct mlx4_en_rx_ring *ring; - int need_refill = 0; - int i; - - mutex_lock(&mdev->state_lock); - if (!mdev->device_up || !priv->port_up) - goto out; - - /* We only get here if there are no receive buffers, so we can't race - * with Rx interrupts while filling buffers */ - for (i = 0; i < priv->rx_ring_num; i++) { - ring = &priv->rx_ring[i]; - if (ring->need_refill) { - if (mlx4_en_fill_rx_buf(dev, ring)) { - ring->need_refill = 0; - mlx4_en_update_rx_prod_db(ring); - } else - need_refill = 1; - } - } - if (need_refill) - queue_delayed_work(mdev->workqueue, &priv->refill_task, HZ); - -out: - mutex_unlock(&mdev->state_lock); -} - - int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring, u32 size, u16 stride) { @@ -457,9 +396,6 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) ring_ind--; goto err_allocator; } - - /* Fill Rx buffers */ - ring->full = 0; } err = mlx4_en_fill_rx_buffers(priv); if (err) @@ -647,33 +583,6 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv, return skb; } -static void mlx4_en_copy_desc(struct mlx4_en_priv *priv, - struct mlx4_en_rx_ring *ring, - int from, int to, int num) -{ - struct skb_frag_struct *skb_frags_from; - struct skb_frag_struct *skb_frags_to; - struct mlx4_en_rx_desc *rx_desc_from; - struct mlx4_en_rx_desc *rx_desc_to; - int from_index, to_index; - int nr, i; - - for (i = 0; i < num; i++) { - from_index = (from + i) & ring->size_mask; - to_index = (to + i) & ring->size_mask; - skb_frags_from = ring->rx_info + (from_index << priv->log_rx_info); - skb_frags_to = ring->rx_info + (to_index << priv->log_rx_info); - rx_desc_from = ring->buf + (from_index << ring->log_stride); - rx_desc_to = ring->buf + (to_index << ring->log_stride); - - for (nr = 0; nr < priv->num_frags; nr++) { - skb_frags_to[nr].page = skb_frags_from[nr].page; - skb_frags_to[nr].page_offset = skb_frags_from[nr].page_offset; - rx_desc_to->data[nr].addr = rx_desc_from->data[nr].addr; - } - } -} - int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) { @@ -821,11 +730,6 @@ out: wmb(); /* ensure HW sees CQ consumer before we post new buffers */ ring->cons = cq->mcq.cons_index; ring->prod += polled; /* Polled descriptors were realocated in place */ - if (unlikely(!ring->full)) { - mlx4_en_copy_desc(priv, ring, ring->cons - polled, - ring->prod - polled, polled); - mlx4_en_fill_rx_buf(dev, ring); - } mlx4_en_update_rx_prod_db(ring); return polled; } diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c index 5dc7466ad03..08c43f2ae72 100644 --- a/drivers/net/mlx4/en_tx.c +++ b/drivers/net/mlx4/en_tx.c @@ -515,16 +515,9 @@ static int get_real_size(struct sk_buff *skb, struct net_device *dev, else { if (netif_msg_tx_err(priv)) en_warn(priv, "Non-linear headers\n"); - dev_kfree_skb_any(skb); return 0; } } - if (unlikely(*lso_header_size > MAX_LSO_HDR_SIZE)) { - if (netif_msg_tx_err(priv)) - en_warn(priv, "LSO header size too big\n"); - dev_kfree_skb_any(skb); - return 0; - } } else { *lso_header_size = 0; if (!is_inline(skb, NULL)) @@ -616,13 +609,9 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) int lso_header_size; void *fragptr; - if (unlikely(!skb->len)) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } real_size = get_real_size(skb, dev, &lso_header_size); if (unlikely(!real_size)) - return NETDEV_TX_OK; + goto tx_drop; /* Allign descriptor to TXBB size */ desc_size = ALIGN(real_size, TXBB_SIZE); @@ -630,8 +619,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(nr_txbb > MAX_DESC_TXBBS)) { if (netif_msg_tx_err(priv)) en_warn(priv, "Oversized header or SG list\n"); - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; + goto tx_drop; } tx_ind = skb->queue_mapping; @@ -653,14 +641,6 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; } - /* Now that we know what Tx ring to use */ - if (unlikely(!priv->port_up)) { - if (netif_msg_tx_err(priv)) - en_warn(priv, "xmit: port down!\n"); - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - /* Track current inflight packets for performance analysis */ AVG_PERF_COUNTER(priv->pstats.inflight_avg, (u32) (ring->prod - ring->cons - 1)); @@ -785,5 +765,10 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) mlx4_en_xmit_poll(priv, tx_ind); return 0; + +tx_drop: + dev_kfree_skb_any(skb); + priv->stats.tx_dropped++; + return NETDEV_TX_OK; } diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h index d43a9e4c2ae..c7c5e86804f 100644 --- a/drivers/net/mlx4/mlx4_en.h +++ b/drivers/net/mlx4/mlx4_en.h @@ -99,7 +99,6 @@ #define RSS_FACTOR 2 #define TXBB_SIZE 64 #define HEADROOM (2048 / TXBB_SIZE + 1) -#define MAX_LSO_HDR_SIZE 92 #define STAMP_STRIDE 64 #define STAMP_DWORDS (STAMP_STRIDE / 4) #define STAMP_SHIFT 31 @@ -296,8 +295,6 @@ struct mlx4_en_rx_ring { u32 prod; u32 cons; u32 buf_size; - int need_refill; - int full; void *buf; void *rx_info; unsigned long bytes; @@ -495,7 +492,6 @@ struct mlx4_en_priv { struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; struct work_struct mcast_task; struct work_struct mac_task; - struct delayed_work refill_task; struct work_struct watchdog_task; struct work_struct linkstate_task; struct delayed_work stats_task; @@ -565,7 +561,6 @@ void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv, int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv); void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv); int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring); -void mlx4_en_rx_refill(struct work_struct *work); void mlx4_en_rx_irq(struct mlx4_cq *mcq); int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode); diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c index 5887e4764d2..f96948be0a4 100644 --- a/drivers/net/mlx4/mr.c +++ b/drivers/net/mlx4/mr.c @@ -399,11 +399,14 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, if (!mtts) return -ENOMEM; + dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle, + npages * sizeof (u64), DMA_TO_DEVICE); + for (i = 0; i < npages; ++i) mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); - dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle, - npages * sizeof (u64), DMA_TO_DEVICE); + dma_sync_single_for_device(&dev->pdev->dev, dma_handle, + npages * sizeof (u64), DMA_TO_DEVICE); return 0; } @@ -547,11 +550,14 @@ int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list /* Make sure MPT status is visible before writing MTT entries */ wmb(); + dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle, + npages * sizeof(u64), DMA_TO_DEVICE); + for (i = 0; i < npages; ++i) fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); - dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle, - npages * sizeof(u64), DMA_TO_DEVICE); + dma_sync_single_for_device(&dev->pdev->dev, fmr->dma_handle, + npages * sizeof(u64), DMA_TO_DEVICE); fmr->mpt->key = cpu_to_be32(key); fmr->mpt->lkey = cpu_to_be32(key); diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 745ae8b4a2e..0f32db3e92a 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c @@ -1750,12 +1750,12 @@ static void mv643xx_eth_program_unicast_filter(struct net_device *dev) uc_addr_set(mp, dev->dev_addr); - port_config = rdlp(mp, PORT_CONFIG); + port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE; + nibbles = uc_addr_filter_mask(dev); if (!nibbles) { port_config |= UNICAST_PROMISCUOUS_MODE; - wrlp(mp, PORT_CONFIG, port_config); - return; + nibbles = 0xffff; } for (i = 0; i < 16; i += 4) { @@ -1776,7 +1776,6 @@ static void mv643xx_eth_program_unicast_filter(struct net_device *dev) wrl(mp, off, v); } - port_config &= ~UNICAST_PROMISCUOUS_MODE; wrlp(mp, PORT_CONFIG, port_config); } diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index bdb143d2b5c..055bb61d6e7 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c @@ -944,28 +944,31 @@ int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val) u32 val = 0; int retries = 60; - if (!pegtune_val) { - do { - val = NXRD32(adapter, CRB_CMDPEG_STATE); + if (pegtune_val) + return 0; - if (val == PHAN_INITIALIZE_COMPLETE || - val == PHAN_INITIALIZE_ACK) - return 0; + do { + val = NXRD32(adapter, CRB_CMDPEG_STATE); - msleep(500); + switch (val) { + case PHAN_INITIALIZE_COMPLETE: + case PHAN_INITIALIZE_ACK: + return 0; + case PHAN_INITIALIZE_FAILED: + goto out_err; + default: + break; + } - } while (--retries); + msleep(500); - if (!retries) { - pegtune_val = NXRD32(adapter, - NETXEN_ROMUSB_GLB_PEGTUNE_DONE); - printk(KERN_WARNING "netxen_phantom_init: init failed, " - "pegtune_val=%x\n", pegtune_val); - return -1; - } - } + } while (--retries); - return 0; + NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED); + +out_err: + dev_warn(&adapter->pdev->dev, "firmware init failed\n"); + return -EIO; } static int diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 71daa3d5f11..2919a2d12bf 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c @@ -705,7 +705,7 @@ netxen_start_firmware(struct netxen_adapter *adapter, int request_fw) first_driver = (adapter->ahw.pci_func == 0); if (!first_driver) - return 0; + goto wait_init; first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc)); @@ -752,6 +752,7 @@ netxen_start_firmware(struct netxen_adapter *adapter, int request_fw) | (_NETXEN_NIC_LINUX_SUBVERSION); NXWR32(adapter, CRB_DRIVER_VERSION, val); +wait_init: /* Handshake with the card before we register the devices. */ err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE); if (err) { @@ -1178,6 +1179,7 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev) free_netdev(netdev); } +#ifdef CONFIG_PM static int netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state) { @@ -1242,6 +1244,7 @@ netxen_nic_resume(struct pci_dev *pdev) return 0; } +#endif static int netxen_nic_open(struct net_device *netdev) { @@ -1771,8 +1774,10 @@ static struct pci_driver netxen_driver = { .id_table = netxen_pci_tbl, .probe = netxen_nic_probe, .remove = __devexit_p(netxen_nic_remove), +#ifdef CONFIG_PM .suspend = netxen_nic_suspend, .resume = netxen_nic_resume +#endif }; /* Driver Registration on NetXen card */ diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c index 6de8399d6dd..17c116bb332 100644 --- a/drivers/net/ppp_async.c +++ b/drivers/net/ppp_async.c @@ -356,7 +356,6 @@ ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf, if (!skb_queue_empty(&ap->rqueue)) tasklet_schedule(&ap->tsk); ap_put(ap); - tty_unthrottle(tty); } static void diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c index d2fa2db1358..aa3d39f38e2 100644 --- a/drivers/net/ppp_synctty.c +++ b/drivers/net/ppp_synctty.c @@ -397,7 +397,6 @@ ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf, if (!skb_queue_empty(&ap->rqueue)) tasklet_schedule(&ap->tsk); sp_put(ap); - tty_unthrottle(tty); } static void diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c index 2b38f39924a..d1a5fb4d6ac 100644 --- a/drivers/net/ps3_gelic_net.c +++ b/drivers/net/ps3_gelic_net.c @@ -214,9 +214,10 @@ static void gelic_card_free_chain(struct gelic_card *card, * * returns 0 on success, <0 on failure */ -static int gelic_card_init_chain(struct gelic_card *card, - struct gelic_descr_chain *chain, - struct gelic_descr *start_descr, int no) +static int __devinit gelic_card_init_chain(struct gelic_card *card, + struct gelic_descr_chain *chain, + struct gelic_descr *start_descr, + int no) { int i; struct gelic_descr *descr; @@ -407,7 +408,7 @@ rewind: * * returns 0 on success, < 0 on failure */ -static int gelic_card_alloc_rx_skbs(struct gelic_card *card) +static int __devinit gelic_card_alloc_rx_skbs(struct gelic_card *card) { struct gelic_descr_chain *chain; int ret; @@ -1422,8 +1423,8 @@ static const struct net_device_ops gelic_netdevice_ops = { * * fills out function pointers in the net_device structure */ -static void gelic_ether_setup_netdev_ops(struct net_device *netdev, - struct napi_struct *napi) +static void __devinit gelic_ether_setup_netdev_ops(struct net_device *netdev, + struct napi_struct *napi) { netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; /* NAPI */ @@ -1443,7 +1444,8 @@ static void gelic_ether_setup_netdev_ops(struct net_device *netdev, * gelic_ether_setup_netdev initializes the net_device structure * and register it. **/ -int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card) +int __devinit gelic_net_setup_netdev(struct net_device *netdev, + struct gelic_card *card) { int status; u64 v1, v2; @@ -1491,7 +1493,7 @@ int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card) * the card and net_device structures are linked to each other */ #define GELIC_ALIGN (32) -static struct gelic_card *gelic_alloc_card_net(struct net_device **netdev) +static struct gelic_card * __devinit gelic_alloc_card_net(struct net_device **netdev) { struct gelic_card *card; struct gelic_port *port; @@ -1542,7 +1544,7 @@ static struct gelic_card *gelic_alloc_card_net(struct net_device **netdev) return card; } -static void gelic_card_get_vlan_info(struct gelic_card *card) +static void __devinit gelic_card_get_vlan_info(struct gelic_card *card) { u64 v1, v2; int status; @@ -1616,7 +1618,7 @@ static void gelic_card_get_vlan_info(struct gelic_card *card) /** * ps3_gelic_driver_probe - add a device to the control of this driver */ -static int ps3_gelic_driver_probe(struct ps3_system_bus_device *dev) +static int __devinit ps3_gelic_driver_probe(struct ps3_system_bus_device *dev) { struct gelic_card *card; struct net_device *netdev; diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c index 4f3ada622f9..b6b3ca9bdb2 100644 --- a/drivers/net/ps3_gelic_wireless.c +++ b/drivers/net/ps3_gelic_wireless.c @@ -2442,7 +2442,7 @@ static const struct iw_handler_def gelic_wl_wext_handler_def = { #endif }; -static struct net_device *gelic_wl_alloc(struct gelic_card *card) +static struct net_device * __devinit gelic_wl_alloc(struct gelic_card *card) { struct net_device *netdev; struct gelic_port *port; @@ -2722,7 +2722,7 @@ static struct ethtool_ops gelic_wl_ethtool_ops = { .set_rx_csum = gelic_net_set_rx_csum, }; -static void gelic_wl_setup_netdev_ops(struct net_device *netdev) +static void __devinit gelic_wl_setup_netdev_ops(struct net_device *netdev) { struct gelic_wl_info *wl; wl = port_wl(netdev_priv(netdev)); @@ -2738,7 +2738,7 @@ static void gelic_wl_setup_netdev_ops(struct net_device *netdev) /* * driver probe/remove */ -int gelic_wl_driver_probe(struct gelic_card *card) +int __devinit gelic_wl_driver_probe(struct gelic_card *card) { int ret; struct net_device *netdev; diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c index 8a823ecc99a..3e4b67aaa6e 100644 --- a/drivers/net/qla3xxx.c +++ b/drivers/net/qla3xxx.c @@ -3142,6 +3142,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev) (void __iomem *)port_regs; u32 delay = 10; int status = 0; + unsigned long hw_flags = 0; if(ql_mii_setup(qdev)) return -1; @@ -3150,7 +3151,8 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev) ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, (ISP_SERIAL_PORT_IF_WE | (ISP_SERIAL_PORT_IF_WE << 16))); - + /* Give the PHY time to come out of reset. */ + mdelay(100); qdev->port_link_state = LS_DOWN; netif_carrier_off(qdev->ndev); @@ -3350,7 +3352,9 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev) value = ql_read_page0_reg(qdev, &port_regs->portStatus); if (value & PORT_STATUS_IC) break; + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); msleep(500); + spin_lock_irqsave(&qdev->hw_lock, hw_flags); } while (--delay); if (delay == 0) { @@ -3837,7 +3841,9 @@ static void ql_reset_work(struct work_struct *work) 16) | ISP_CONTROL_RI)); } + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); ssleep(1); + spin_lock_irqsave(&qdev->hw_lock, hw_flags); } while (--max_wait_time); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 4e22462684c..4b53b58d75f 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -51,9 +51,6 @@ #define TX_BUFFS_AVAIL(tp) \ (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1) -/* Maximum events (Rx packets, etc.) to handle at each interrupt. */ -static const int max_interrupt_work = 20; - /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). The RTL chips use a 64 element hash table based on the Ethernet CRC. */ static const int multicast_filter_limit = 32; diff --git a/drivers/net/s6gmac.c b/drivers/net/s6gmac.c new file mode 100644 index 00000000000..5345e47b35a --- /dev/null +++ b/drivers/net/s6gmac.c @@ -0,0 +1,1073 @@ +/* + * Ethernet driver for S6105 on chip network device + * (c)2008 emlix GmbH http://www.emlix.com + * Authors: Oskar Schirmer <os@emlix.com> + * Daniel Gloeckner <dg@emlix.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/types.h> +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/spinlock.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/if.h> +#include <linux/stddef.h> +#include <linux/mii.h> +#include <linux/phy.h> +#include <linux/platform_device.h> +#include <variant/hardware.h> +#include <variant/dmac.h> + +#define DRV_NAME "s6gmac" +#define DRV_PRMT DRV_NAME ": " + + +/* register declarations */ + +#define S6_GMAC_MACCONF1 0x000 +#define S6_GMAC_MACCONF1_TXENA 0 +#define S6_GMAC_MACCONF1_SYNCTX 1 +#define S6_GMAC_MACCONF1_RXENA 2 +#define S6_GMAC_MACCONF1_SYNCRX 3 +#define S6_GMAC_MACCONF1_TXFLOWCTRL 4 +#define S6_GMAC_MACCONF1_RXFLOWCTRL 5 +#define S6_GMAC_MACCONF1_LOOPBACK 8 +#define S6_GMAC_MACCONF1_RESTXFUNC 16 +#define S6_GMAC_MACCONF1_RESRXFUNC 17 +#define S6_GMAC_MACCONF1_RESTXMACCTRL 18 +#define S6_GMAC_MACCONF1_RESRXMACCTRL 19 +#define S6_GMAC_MACCONF1_SIMULRES 30 +#define S6_GMAC_MACCONF1_SOFTRES 31 +#define S6_GMAC_MACCONF2 0x004 +#define S6_GMAC_MACCONF2_FULL 0 +#define S6_GMAC_MACCONF2_CRCENA 1 +#define S6_GMAC_MACCONF2_PADCRCENA 2 +#define S6_GMAC_MACCONF2_LENGTHFCHK 4 +#define S6_GMAC_MACCONF2_HUGEFRAMENA 5 +#define S6_GMAC_MACCONF2_IFMODE 8 +#define S6_GMAC_MACCONF2_IFMODE_NIBBLE 1 +#define S6_GMAC_MACCONF2_IFMODE_BYTE 2 +#define S6_GMAC_MACCONF2_IFMODE_MASK 3 +#define S6_GMAC_MACCONF2_PREAMBLELEN 12 +#define S6_GMAC_MACCONF2_PREAMBLELEN_MASK 0x0F +#define S6_GMAC_MACIPGIFG 0x008 +#define S6_GMAC_MACIPGIFG_B2BINTERPGAP 0 +#define S6_GMAC_MACIPGIFG_B2BINTERPGAP_MASK 0x7F +#define S6_GMAC_MACIPGIFG_MINIFGENFORCE 8 +#define S6_GMAC_MACIPGIFG_B2BINTERPGAP2 16 +#define S6_GMAC_MACIPGIFG_B2BINTERPGAP1 24 +#define S6_GMAC_MACHALFDUPLEX 0x00C +#define S6_GMAC_MACHALFDUPLEX_COLLISWIN 0 +#define S6_GMAC_MACHALFDUPLEX_COLLISWIN_MASK 0x3F +#define S6_GMAC_MACHALFDUPLEX_RETXMAX 12 +#define S6_GMAC_MACHALFDUPLEX_RETXMAX_MASK 0x0F +#define S6_GMAC_MACHALFDUPLEX_EXCESSDEF 16 +#define S6_GMAC_MACHALFDUPLEX_NOBACKOFF 17 +#define S6_GMAC_MACHALFDUPLEX_BPNOBCKOF 18 +#define S6_GMAC_MACHALFDUPLEX_ALTBEBENA 19 +#define S6_GMAC_MACHALFDUPLEX_ALTBEBTRN 20 +#define S6_GMAC_MACHALFDUPLEX_ALTBEBTR_MASK 0x0F +#define S6_GMAC_MACMAXFRAMELEN 0x010 +#define S6_GMAC_MACMIICONF 0x020 +#define S6_GMAC_MACMIICONF_CSEL 0 +#define S6_GMAC_MACMIICONF_CSEL_DIV10 0 +#define S6_GMAC_MACMIICONF_CSEL_DIV12 1 +#define S6_GMAC_MACMIICONF_CSEL_DIV14 2 +#define S6_GMAC_MACMIICONF_CSEL_DIV18 3 +#define S6_GMAC_MACMIICONF_CSEL_DIV24 4 +#define S6_GMAC_MACMIICONF_CSEL_DIV34 5 +#define S6_GMAC_MACMIICONF_CSEL_DIV68 6 +#define S6_GMAC_MACMIICONF_CSEL_DIV168 7 +#define S6_GMAC_MACMIICONF_CSEL_MASK 7 +#define S6_GMAC_MACMIICONF_PREAMBLESUPR 4 +#define S6_GMAC_MACMIICONF_SCANAUTOINCR 5 +#define S6_GMAC_MACMIICMD 0x024 +#define S6_GMAC_MACMIICMD_READ 0 +#define S6_GMAC_MACMIICMD_SCAN 1 +#define S6_GMAC_MACMIIADDR 0x028 +#define S6_GMAC_MACMIIADDR_REG 0 +#define S6_GMAC_MACMIIADDR_REG_MASK 0x1F +#define S6_GMAC_MACMIIADDR_PHY 8 +#define S6_GMAC_MACMIIADDR_PHY_MASK 0x1F +#define S6_GMAC_MACMIICTRL 0x02C +#define S6_GMAC_MACMIISTAT 0x030 +#define S6_GMAC_MACMIIINDI 0x034 +#define S6_GMAC_MACMIIINDI_BUSY 0 +#define S6_GMAC_MACMIIINDI_SCAN 1 +#define S6_GMAC_MACMIIINDI_INVAL 2 +#define S6_GMAC_MACINTERFSTAT 0x03C +#define S6_GMAC_MACINTERFSTAT_LINKFAIL 3 +#define S6_GMAC_MACINTERFSTAT_EXCESSDEF 9 +#define S6_GMAC_MACSTATADDR1 0x040 +#define S6_GMAC_MACSTATADDR2 0x044 + +#define S6_GMAC_FIFOCONF0 0x048 +#define S6_GMAC_FIFOCONF0_HSTRSTWT 0 +#define S6_GMAC_FIFOCONF0_HSTRSTSR 1 +#define S6_GMAC_FIFOCONF0_HSTRSTFR 2 +#define S6_GMAC_FIFOCONF0_HSTRSTST 3 +#define S6_GMAC_FIFOCONF0_HSTRSTFT 4 +#define S6_GMAC_FIFOCONF0_WTMENREQ 8 +#define S6_GMAC_FIFOCONF0_SRFENREQ 9 +#define S6_GMAC_FIFOCONF0_FRFENREQ 10 +#define S6_GMAC_FIFOCONF0_STFENREQ 11 +#define S6_GMAC_FIFOCONF0_FTFENREQ 12 +#define S6_GMAC_FIFOCONF0_WTMENRPLY 16 +#define S6_GMAC_FIFOCONF0_SRFENRPLY 17 +#define S6_GMAC_FIFOCONF0_FRFENRPLY 18 +#define S6_GMAC_FIFOCONF0_STFENRPLY 19 +#define S6_GMAC_FIFOCONF0_FTFENRPLY 20 +#define S6_GMAC_FIFOCONF1 0x04C +#define S6_GMAC_FIFOCONF2 0x050 +#define S6_GMAC_FIFOCONF2_CFGLWM 0 +#define S6_GMAC_FIFOCONF2_CFGHWM 16 +#define S6_GMAC_FIFOCONF3 0x054 +#define S6_GMAC_FIFOCONF3_CFGFTTH 0 +#define S6_GMAC_FIFOCONF3_CFGHWMFT 16 +#define S6_GMAC_FIFOCONF4 0x058 +#define S6_GMAC_FIFOCONF_RSV_PREVDROP 0 +#define S6_GMAC_FIFOCONF_RSV_RUNT 1 +#define S6_GMAC_FIFOCONF_RSV_FALSECAR 2 +#define S6_GMAC_FIFOCONF_RSV_CODEERR 3 +#define S6_GMAC_FIFOCONF_RSV_CRCERR 4 +#define S6_GMAC_FIFOCONF_RSV_LENGTHERR 5 +#define S6_GMAC_FIFOCONF_RSV_LENRANGE 6 +#define S6_GMAC_FIFOCONF_RSV_OK 7 +#define S6_GMAC_FIFOCONF_RSV_MULTICAST 8 +#define S6_GMAC_FIFOCONF_RSV_BROADCAST 9 +#define S6_GMAC_FIFOCONF_RSV_DRIBBLE 10 +#define S6_GMAC_FIFOCONF_RSV_CTRLFRAME 11 +#define S6_GMAC_FIFOCONF_RSV_PAUSECTRL 12 +#define S6_GMAC_FIFOCONF_RSV_UNOPCODE 13 +#define S6_GMAC_FIFOCONF_RSV_VLANTAG 14 +#define S6_GMAC_FIFOCONF_RSV_LONGEVENT 15 +#define S6_GMAC_FIFOCONF_RSV_TRUNCATED 16 +#define S6_GMAC_FIFOCONF_RSV_MASK 0x3FFFF +#define S6_GMAC_FIFOCONF5 0x05C +#define S6_GMAC_FIFOCONF5_DROPLT64 18 +#define S6_GMAC_FIFOCONF5_CFGBYTM 19 +#define S6_GMAC_FIFOCONF5_RXDROPSIZE 20 +#define S6_GMAC_FIFOCONF5_RXDROPSIZE_MASK 0xF + +#define S6_GMAC_STAT_REGS 0x080 +#define S6_GMAC_STAT_SIZE_MIN 12 +#define S6_GMAC_STATTR64 0x080 +#define S6_GMAC_STATTR64_SIZE 18 +#define S6_GMAC_STATTR127 0x084 +#define S6_GMAC_STATTR127_SIZE 18 +#define S6_GMAC_STATTR255 0x088 +#define S6_GMAC_STATTR255_SIZE 18 +#define S6_GMAC_STATTR511 0x08C +#define S6_GMAC_STATTR511_SIZE 18 +#define S6_GMAC_STATTR1K 0x090 +#define S6_GMAC_STATTR1K_SIZE 18 +#define S6_GMAC_STATTRMAX 0x094 +#define S6_GMAC_STATTRMAX_SIZE 18 +#define S6_GMAC_STATTRMGV 0x098 +#define S6_GMAC_STATTRMGV_SIZE 18 +#define S6_GMAC_STATRBYT 0x09C +#define S6_GMAC_STATRBYT_SIZE 24 +#define S6_GMAC_STATRPKT 0x0A0 +#define S6_GMAC_STATRPKT_SIZE 18 +#define S6_GMAC_STATRFCS 0x0A4 +#define S6_GMAC_STATRFCS_SIZE 12 +#define S6_GMAC_STATRMCA 0x0A8 +#define S6_GMAC_STATRMCA_SIZE 18 +#define S6_GMAC_STATRBCA 0x0AC +#define S6_GMAC_STATRBCA_SIZE 22 +#define S6_GMAC_STATRXCF 0x0B0 +#define S6_GMAC_STATRXCF_SIZE 18 +#define S6_GMAC_STATRXPF 0x0B4 +#define S6_GMAC_STATRXPF_SIZE 12 +#define S6_GMAC_STATRXUO 0x0B8 +#define S6_GMAC_STATRXUO_SIZE 12 +#define S6_GMAC_STATRALN 0x0BC +#define S6_GMAC_STATRALN_SIZE 12 +#define S6_GMAC_STATRFLR 0x0C0 +#define S6_GMAC_STATRFLR_SIZE 16 +#define S6_GMAC_STATRCDE 0x0C4 +#define S6_GMAC_STATRCDE_SIZE 12 +#define S6_GMAC_STATRCSE 0x0C8 +#define S6_GMAC_STATRCSE_SIZE 12 +#define S6_GMAC_STATRUND 0x0CC +#define S6_GMAC_STATRUND_SIZE 12 +#define S6_GMAC_STATROVR 0x0D0 +#define S6_GMAC_STATROVR_SIZE 12 +#define S6_GMAC_STATRFRG 0x0D4 +#define S6_GMAC_STATRFRG_SIZE 12 +#define S6_GMAC_STATRJBR 0x0D8 +#define S6_GMAC_STATRJBR_SIZE 12 +#define S6_GMAC_STATRDRP 0x0DC +#define S6_GMAC_STATRDRP_SIZE 12 +#define S6_GMAC_STATTBYT 0x0E0 +#define S6_GMAC_STATTBYT_SIZE 24 +#define S6_GMAC_STATTPKT 0x0E4 +#define S6_GMAC_STATTPKT_SIZE 18 +#define S6_GMAC_STATTMCA 0x0E8 +#define S6_GMAC_STATTMCA_SIZE 18 +#define S6_GMAC_STATTBCA 0x0EC +#define S6_GMAC_STATTBCA_SIZE 18 +#define S6_GMAC_STATTXPF 0x0F0 +#define S6_GMAC_STATTXPF_SIZE 12 +#define S6_GMAC_STATTDFR 0x0F4 +#define S6_GMAC_STATTDFR_SIZE 12 +#define S6_GMAC_STATTEDF 0x0F8 +#define S6_GMAC_STATTEDF_SIZE 12 +#define S6_GMAC_STATTSCL 0x0FC +#define S6_GMAC_STATTSCL_SIZE 12 +#define S6_GMAC_STATTMCL 0x100 +#define S6_GMAC_STATTMCL_SIZE 12 +#define S6_GMAC_STATTLCL 0x104 +#define S6_GMAC_STATTLCL_SIZE 12 +#define S6_GMAC_STATTXCL 0x108 +#define S6_GMAC_STATTXCL_SIZE 12 +#define S6_GMAC_STATTNCL 0x10C +#define S6_GMAC_STATTNCL_SIZE 13 +#define S6_GMAC_STATTPFH 0x110 +#define S6_GMAC_STATTPFH_SIZE 12 +#define S6_GMAC_STATTDRP 0x114 +#define S6_GMAC_STATTDRP_SIZE 12 +#define S6_GMAC_STATTJBR 0x118 +#define S6_GMAC_STATTJBR_SIZE 12 +#define S6_GMAC_STATTFCS 0x11C +#define S6_GMAC_STATTFCS_SIZE 12 +#define S6_GMAC_STATTXCF 0x120 +#define S6_GMAC_STATTXCF_SIZE 12 +#define S6_GMAC_STATTOVR 0x124 +#define S6_GMAC_STATTOVR_SIZE 12 +#define S6_GMAC_STATTUND 0x128 +#define S6_GMAC_STATTUND_SIZE 12 +#define S6_GMAC_STATTFRG 0x12C +#define S6_GMAC_STATTFRG_SIZE 12 +#define S6_GMAC_STATCARRY(n) (0x130 + 4*(n)) +#define S6_GMAC_STATCARRYMSK(n) (0x138 + 4*(n)) +#define S6_GMAC_STATCARRY1_RDRP 0 +#define S6_GMAC_STATCARRY1_RJBR 1 +#define S6_GMAC_STATCARRY1_RFRG 2 +#define S6_GMAC_STATCARRY1_ROVR 3 +#define S6_GMAC_STATCARRY1_RUND 4 +#define S6_GMAC_STATCARRY1_RCSE 5 +#define S6_GMAC_STATCARRY1_RCDE 6 +#define S6_GMAC_STATCARRY1_RFLR 7 +#define S6_GMAC_STATCARRY1_RALN 8 +#define S6_GMAC_STATCARRY1_RXUO 9 +#define S6_GMAC_STATCARRY1_RXPF 10 +#define S6_GMAC_STATCARRY1_RXCF 11 +#define S6_GMAC_STATCARRY1_RBCA 12 +#define S6_GMAC_STATCARRY1_RMCA 13 +#define S6_GMAC_STATCARRY1_RFCS 14 +#define S6_GMAC_STATCARRY1_RPKT 15 +#define S6_GMAC_STATCARRY1_RBYT 16 +#define S6_GMAC_STATCARRY1_TRMGV 25 +#define S6_GMAC_STATCARRY1_TRMAX 26 +#define S6_GMAC_STATCARRY1_TR1K 27 +#define S6_GMAC_STATCARRY1_TR511 28 +#define S6_GMAC_STATCARRY1_TR255 29 +#define S6_GMAC_STATCARRY1_TR127 30 +#define S6_GMAC_STATCARRY1_TR64 31 +#define S6_GMAC_STATCARRY2_TDRP 0 +#define S6_GMAC_STATCARRY2_TPFH 1 +#define S6_GMAC_STATCARRY2_TNCL 2 +#define S6_GMAC_STATCARRY2_TXCL 3 +#define S6_GMAC_STATCARRY2_TLCL 4 +#define S6_GMAC_STATCARRY2_TMCL 5 +#define S6_GMAC_STATCARRY2_TSCL 6 +#define S6_GMAC_STATCARRY2_TEDF 7 +#define S6_GMAC_STATCARRY2_TDFR 8 +#define S6_GMAC_STATCARRY2_TXPF 9 +#define S6_GMAC_STATCARRY2_TBCA 10 +#define S6_GMAC_STATCARRY2_TMCA 11 +#define S6_GMAC_STATCARRY2_TPKT 12 +#define S6_GMAC_STATCARRY2_TBYT 13 +#define S6_GMAC_STATCARRY2_TFRG 14 +#define S6_GMAC_STATCARRY2_TUND 15 +#define S6_GMAC_STATCARRY2_TOVR 16 +#define S6_GMAC_STATCARRY2_TXCF 17 +#define S6_GMAC_STATCARRY2_TFCS 18 +#define S6_GMAC_STATCARRY2_TJBR 19 + +#define S6_GMAC_HOST_PBLKCTRL 0x140 +#define S6_GMAC_HOST_PBLKCTRL_TXENA 0 +#define S6_GMAC_HOST_PBLKCTRL_RXENA 1 +#define S6_GMAC_HOST_PBLKCTRL_TXSRES 2 +#define S6_GMAC_HOST_PBLKCTRL_RXSRES 3 +#define S6_GMAC_HOST_PBLKCTRL_TXBSIZ 8 +#define S6_GMAC_HOST_PBLKCTRL_RXBSIZ 12 +#define S6_GMAC_HOST_PBLKCTRL_SIZ_16 4 +#define S6_GMAC_HOST_PBLKCTRL_SIZ_32 5 +#define S6_GMAC_HOST_PBLKCTRL_SIZ_64 6 +#define S6_GMAC_HOST_PBLKCTRL_SIZ_128 7 +#define S6_GMAC_HOST_PBLKCTRL_SIZ_MASK 0xF +#define S6_GMAC_HOST_PBLKCTRL_STATENA 16 +#define S6_GMAC_HOST_PBLKCTRL_STATAUTOZ 17 +#define S6_GMAC_HOST_PBLKCTRL_STATCLEAR 18 +#define S6_GMAC_HOST_PBLKCTRL_RGMII 19 +#define S6_GMAC_HOST_INTMASK 0x144 +#define S6_GMAC_HOST_INTSTAT 0x148 +#define S6_GMAC_HOST_INT_TXBURSTOVER 3 +#define S6_GMAC_HOST_INT_TXPREWOVER 4 +#define S6_GMAC_HOST_INT_RXBURSTUNDER 5 +#define S6_GMAC_HOST_INT_RXPOSTRFULL 6 +#define S6_GMAC_HOST_INT_RXPOSTRUNDER 7 +#define S6_GMAC_HOST_RXFIFOHWM 0x14C +#define S6_GMAC_HOST_CTRLFRAMXP 0x150 +#define S6_GMAC_HOST_DSTADDRLO(n) (0x160 + 8*(n)) +#define S6_GMAC_HOST_DSTADDRHI(n) (0x164 + 8*(n)) +#define S6_GMAC_HOST_DSTMASKLO(n) (0x180 + 8*(n)) +#define S6_GMAC_HOST_DSTMASKHI(n) (0x184 + 8*(n)) + +#define S6_GMAC_BURST_PREWR 0x1B0 +#define S6_GMAC_BURST_PREWR_LEN 0 +#define S6_GMAC_BURST_PREWR_LEN_MASK ((1 << 20) - 1) +#define S6_GMAC_BURST_PREWR_CFE 20 +#define S6_GMAC_BURST_PREWR_PPE 21 +#define S6_GMAC_BURST_PREWR_FCS 22 +#define S6_GMAC_BURST_PREWR_PAD 23 +#define S6_GMAC_BURST_POSTRD 0x1D0 +#define S6_GMAC_BURST_POSTRD_LEN 0 +#define S6_GMAC_BURST_POSTRD_LEN_MASK ((1 << 20) - 1) +#define S6_GMAC_BURST_POSTRD_DROP 20 + + +/* data handling */ + +#define S6_NUM_TX_SKB 8 /* must be larger than TX fifo size */ +#define S6_NUM_RX_SKB 16 +#define S6_MAX_FRLEN 1536 + +struct s6gmac { + u32 reg; + u32 tx_dma; + u32 rx_dma; + u32 io; + u8 tx_chan; + u8 rx_chan; + spinlock_t lock; + u8 tx_skb_i, tx_skb_o; + u8 rx_skb_i, rx_skb_o; + struct sk_buff *tx_skb[S6_NUM_TX_SKB]; + struct sk_buff *rx_skb[S6_NUM_RX_SKB]; + unsigned long carry[sizeof(struct net_device_stats) / sizeof(long)]; + unsigned long stats[sizeof(struct net_device_stats) / sizeof(long)]; + struct phy_device *phydev; + struct { + struct mii_bus *bus; + int irq[PHY_MAX_ADDR]; + } mii; + struct { + unsigned int mbit; + u8 giga; + u8 isup; + u8 full; + } link; +}; + +static void s6gmac_rx_fillfifo(struct s6gmac *pd) +{ + struct sk_buff *skb; + while ((((u8)(pd->rx_skb_i - pd->rx_skb_o)) < S6_NUM_RX_SKB) + && (!s6dmac_fifo_full(pd->rx_dma, pd->rx_chan)) + && (skb = dev_alloc_skb(S6_MAX_FRLEN + 2))) { + pd->rx_skb[(pd->rx_skb_i++) % S6_NUM_RX_SKB] = skb; + s6dmac_put_fifo_cache(pd->rx_dma, pd->rx_chan, + pd->io, (u32)skb->data, S6_MAX_FRLEN); + } +} + +static void s6gmac_rx_interrupt(struct net_device *dev) +{ + struct s6gmac *pd = netdev_priv(dev); + u32 pfx; + struct sk_buff *skb; + while (((u8)(pd->rx_skb_i - pd->rx_skb_o)) > + s6dmac_pending_count(pd->rx_dma, pd->rx_chan)) { + skb = pd->rx_skb[(pd->rx_skb_o++) % S6_NUM_RX_SKB]; + pfx = readl(pd->reg + S6_GMAC_BURST_POSTRD); + if (pfx & (1 << S6_GMAC_BURST_POSTRD_DROP)) { + dev_kfree_skb_irq(skb); + } else { + skb_put(skb, (pfx >> S6_GMAC_BURST_POSTRD_LEN) + & S6_GMAC_BURST_POSTRD_LEN_MASK); + skb->dev = dev; + skb->protocol = eth_type_trans(skb, dev); + skb->ip_summed = CHECKSUM_UNNECESSARY; + netif_rx(skb); + } + } +} + +static void s6gmac_tx_interrupt(struct net_device *dev) +{ + struct s6gmac *pd = netdev_priv(dev); + while (((u8)(pd->tx_skb_i - pd->tx_skb_o)) > + s6dmac_pending_count(pd->tx_dma, pd->tx_chan)) { + dev_kfree_skb_irq(pd->tx_skb[(pd->tx_skb_o++) % S6_NUM_TX_SKB]); + } + if (!s6dmac_fifo_full(pd->tx_dma, pd->tx_chan)) + netif_wake_queue(dev); +} + +struct s6gmac_statinf { + unsigned reg_size : 4; /* 0: unused */ + unsigned reg_off : 6; + unsigned net_index : 6; +}; + +#define S6_STATS_B (8 * sizeof(u32)) +#define S6_STATS_C(b, r, f) [b] = { \ + BUILD_BUG_ON_ZERO(r##_SIZE < S6_GMAC_STAT_SIZE_MIN) + \ + BUILD_BUG_ON_ZERO((r##_SIZE - (S6_GMAC_STAT_SIZE_MIN - 1)) \ + >= (1<<4)) + \ + r##_SIZE - (S6_GMAC_STAT_SIZE_MIN - 1), \ + BUILD_BUG_ON_ZERO(((unsigned)((r - S6_GMAC_STAT_REGS) / sizeof(u32))) \ + >= ((1<<6)-1)) + \ + (r - S6_GMAC_STAT_REGS) / sizeof(u32), \ + BUILD_BUG_ON_ZERO((offsetof(struct net_device_stats, f)) \ + % sizeof(unsigned long)) + \ + BUILD_BUG_ON_ZERO((((unsigned)(offsetof(struct net_device_stats, f)) \ + / sizeof(unsigned long)) >= (1<<6))) + \ + BUILD_BUG_ON_ZERO((sizeof(((struct net_device_stats *)0)->f) \ + != sizeof(unsigned long))) + \ + (offsetof(struct net_device_stats, f)) / sizeof(unsigned long)}, + +static const struct s6gmac_statinf statinf[2][S6_STATS_B] = { { + S6_STATS_C(S6_GMAC_STATCARRY1_RBYT, S6_GMAC_STATRBYT, rx_bytes) + S6_STATS_C(S6_GMAC_STATCARRY1_RPKT, S6_GMAC_STATRPKT, rx_packets) + S6_STATS_C(S6_GMAC_STATCARRY1_RFCS, S6_GMAC_STATRFCS, rx_crc_errors) + S6_STATS_C(S6_GMAC_STATCARRY1_RMCA, S6_GMAC_STATRMCA, multicast) + S6_STATS_C(S6_GMAC_STATCARRY1_RALN, S6_GMAC_STATRALN, rx_frame_errors) + S6_STATS_C(S6_GMAC_STATCARRY1_RFLR, S6_GMAC_STATRFLR, rx_length_errors) + S6_STATS_C(S6_GMAC_STATCARRY1_RCDE, S6_GMAC_STATRCDE, rx_missed_errors) + S6_STATS_C(S6_GMAC_STATCARRY1_RUND, S6_GMAC_STATRUND, rx_length_errors) + S6_STATS_C(S6_GMAC_STATCARRY1_ROVR, S6_GMAC_STATROVR, rx_length_errors) + S6_STATS_C(S6_GMAC_STATCARRY1_RFRG, S6_GMAC_STATRFRG, rx_crc_errors) + S6_STATS_C(S6_GMAC_STATCARRY1_RJBR, S6_GMAC_STATRJBR, rx_crc_errors) + S6_STATS_C(S6_GMAC_STATCARRY1_RDRP, S6_GMAC_STATRDRP, rx_dropped) +}, { + S6_STATS_C(S6_GMAC_STATCARRY2_TBYT, S6_GMAC_STATTBYT, tx_bytes) + S6_STATS_C(S6_GMAC_STATCARRY2_TPKT, S6_GMAC_STATTPKT, tx_packets) + S6_STATS_C(S6_GMAC_STATCARRY2_TEDF, S6_GMAC_STATTEDF, tx_aborted_errors) + S6_STATS_C(S6_GMAC_STATCARRY2_TXCL, S6_GMAC_STATTXCL, tx_aborted_errors) + S6_STATS_C(S6_GMAC_STATCARRY2_TNCL, S6_GMAC_STATTNCL, collisions) + S6_STATS_C(S6_GMAC_STATCARRY2_TDRP, S6_GMAC_STATTDRP, tx_dropped) + S6_STATS_C(S6_GMAC_STATCARRY2_TJBR, S6_GMAC_STATTJBR, tx_errors) + S6_STATS_C(S6_GMAC_STATCARRY2_TFCS, S6_GMAC_STATTFCS, tx_errors) + S6_STATS_C(S6_GMAC_STATCARRY2_TOVR, S6_GMAC_STATTOVR, tx_errors) + S6_STATS_C(S6_GMAC_STATCARRY2_TUND, S6_GMAC_STATTUND, tx_errors) + S6_STATS_C(S6_GMAC_STATCARRY2_TFRG, S6_GMAC_STATTFRG, tx_errors) +} }; + +static void s6gmac_stats_collect(struct s6gmac *pd, + const struct s6gmac_statinf *inf) +{ + int b; + for (b = 0; b < S6_STATS_B; b++) { + if (inf[b].reg_size) { + pd->stats[inf[b].net_index] += + readl(pd->reg + S6_GMAC_STAT_REGS + + sizeof(u32) * inf[b].reg_off); + } + } +} + +static void s6gmac_stats_carry(struct s6gmac *pd, + const struct s6gmac_statinf *inf, u32 mask) +{ + int b; + while (mask) { + b = fls(mask) - 1; + mask &= ~(1 << b); + pd->carry[inf[b].net_index] += (1 << inf[b].reg_size); + } +} + +static inline u32 s6gmac_stats_pending(struct s6gmac *pd, int carry) +{ + int r = readl(pd->reg + S6_GMAC_STATCARRY(carry)) & + ~readl(pd->reg + S6_GMAC_STATCARRYMSK(carry)); + return r; +} + +static inline void s6gmac_stats_interrupt(struct s6gmac *pd, int carry) +{ + u32 mask; + mask = s6gmac_stats_pending(pd, carry); + if (mask) { + writel(mask, pd->reg + S6_GMAC_STATCARRY(carry)); + s6gmac_stats_carry(pd, &statinf[carry][0], mask); + } +} + +static irqreturn_t s6gmac_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct s6gmac *pd = netdev_priv(dev); + if (!dev) + return IRQ_NONE; + spin_lock(&pd->lock); + if (s6dmac_termcnt_irq(pd->rx_dma, pd->rx_chan)) + s6gmac_rx_interrupt(dev); + s6gmac_rx_fillfifo(pd); + if (s6dmac_termcnt_irq(pd->tx_dma, pd->tx_chan)) + s6gmac_tx_interrupt(dev); + s6gmac_stats_interrupt(pd, 0); + s6gmac_stats_interrupt(pd, 1); + spin_unlock(&pd->lock); + return IRQ_HANDLED; +} + +static inline void s6gmac_set_dstaddr(struct s6gmac *pd, int n, + u32 addrlo, u32 addrhi, u32 masklo, u32 maskhi) +{ + writel(addrlo, pd->reg + S6_GMAC_HOST_DSTADDRLO(n)); + writel(addrhi, pd->reg + S6_GMAC_HOST_DSTADDRHI(n)); + writel(masklo, pd->reg + S6_GMAC_HOST_DSTMASKLO(n)); + writel(maskhi, pd->reg + S6_GMAC_HOST_DSTMASKHI(n)); +} + +static inline void s6gmac_stop_device(struct net_device *dev) +{ + struct s6gmac *pd = netdev_priv(dev); + writel(0, pd->reg + S6_GMAC_MACCONF1); +} + +static inline void s6gmac_init_device(struct net_device *dev) +{ + struct s6gmac *pd = netdev_priv(dev); + int is_rgmii = !!(pd->phydev->supported + & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half)); +#if 0 + writel(1 << S6_GMAC_MACCONF1_SYNCTX | + 1 << S6_GMAC_MACCONF1_SYNCRX | + 1 << S6_GMAC_MACCONF1_TXFLOWCTRL | + 1 << S6_GMAC_MACCONF1_RXFLOWCTRL | + 1 << S6_GMAC_MACCONF1_RESTXFUNC | + 1 << S6_GMAC_MACCONF1_RESRXFUNC | + 1 << S6_GMAC_MACCONF1_RESTXMACCTRL | + 1 << S6_GMAC_MACCONF1_RESRXMACCTRL, + pd->reg + S6_GMAC_MACCONF1); +#endif + writel(1 << S6_GMAC_MACCONF1_SOFTRES, pd->reg + S6_GMAC_MACCONF1); + udelay(1000); + writel(1 << S6_GMAC_MACCONF1_TXENA | 1 << S6_GMAC_MACCONF1_RXENA, + pd->reg + S6_GMAC_MACCONF1); + writel(1 << S6_GMAC_HOST_PBLKCTRL_TXSRES | + 1 << S6_GMAC_HOST_PBLKCTRL_RXSRES, + pd->reg + S6_GMAC_HOST_PBLKCTRL); + writel(S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_TXBSIZ | + S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_RXBSIZ | + 1 << S6_GMAC_HOST_PBLKCTRL_STATENA | + 1 << S6_GMAC_HOST_PBLKCTRL_STATCLEAR | + is_rgmii << S6_GMAC_HOST_PBLKCTRL_RGMII, + pd->reg + S6_GMAC_HOST_PBLKCTRL); + writel(1 << S6_GMAC_MACCONF1_TXENA | + 1 << S6_GMAC_MACCONF1_RXENA | + (dev->flags & IFF_LOOPBACK ? 1 : 0) + << S6_GMAC_MACCONF1_LOOPBACK, + pd->reg + S6_GMAC_MACCONF1); + writel(dev->mtu && (dev->mtu < (S6_MAX_FRLEN - ETH_HLEN-ETH_FCS_LEN)) ? + dev->mtu+ETH_HLEN+ETH_FCS_LEN : S6_MAX_FRLEN, + pd->reg + S6_GMAC_MACMAXFRAMELEN); + writel((pd->link.full ? 1 : 0) << S6_GMAC_MACCONF2_FULL | + 1 << S6_GMAC_MACCONF2_PADCRCENA | + 1 << S6_GMAC_MACCONF2_LENGTHFCHK | + (pd->link.giga ? + S6_GMAC_MACCONF2_IFMODE_BYTE : + S6_GMAC_MACCONF2_IFMODE_NIBBLE) + << S6_GMAC_MACCONF2_IFMODE | + 7 << S6_GMAC_MACCONF2_PREAMBLELEN, + pd->reg + S6_GMAC_MACCONF2); + writel(0, pd->reg + S6_GMAC_MACSTATADDR1); + writel(0, pd->reg + S6_GMAC_MACSTATADDR2); + writel(1 << S6_GMAC_FIFOCONF0_WTMENREQ | + 1 << S6_GMAC_FIFOCONF0_SRFENREQ | + 1 << S6_GMAC_FIFOCONF0_FRFENREQ | + 1 << S6_GMAC_FIFOCONF0_STFENREQ | + 1 << S6_GMAC_FIFOCONF0_FTFENREQ, + pd->reg + S6_GMAC_FIFOCONF0); + writel(128 << S6_GMAC_FIFOCONF3_CFGFTTH | + 128 << S6_GMAC_FIFOCONF3_CFGHWMFT, + pd->reg + S6_GMAC_FIFOCONF3); + writel((S6_GMAC_FIFOCONF_RSV_MASK & ~( + 1 << S6_GMAC_FIFOCONF_RSV_RUNT | + 1 << S6_GMAC_FIFOCONF_RSV_CRCERR | + 1 << S6_GMAC_FIFOCONF_RSV_OK | + 1 << S6_GMAC_FIFOCONF_RSV_DRIBBLE | + 1 << S6_GMAC_FIFOCONF_RSV_CTRLFRAME | + 1 << S6_GMAC_FIFOCONF_RSV_PAUSECTRL | + 1 << S6_GMAC_FIFOCONF_RSV_UNOPCODE | + 1 << S6_GMAC_FIFOCONF_RSV_TRUNCATED)) | + 1 << S6_GMAC_FIFOCONF5_DROPLT64 | + pd->link.giga << S6_GMAC_FIFOCONF5_CFGBYTM | + 1 << S6_GMAC_FIFOCONF5_RXDROPSIZE, + pd->reg + S6_GMAC_FIFOCONF5); + writel(1 << S6_GMAC_FIFOCONF_RSV_RUNT | + 1 << S6_GMAC_FIFOCONF_RSV_CRCERR | + 1 << S6_GMAC_FIFOCONF_RSV_DRIBBLE | + 1 << S6_GMAC_FIFOCONF_RSV_CTRLFRAME | + 1 << S6_GMAC_FIFOCONF_RSV_PAUSECTRL | + 1 << S6_GMAC_FIFOCONF_RSV_UNOPCODE | + 1 << S6_GMAC_FIFOCONF_RSV_TRUNCATED, + pd->reg + S6_GMAC_FIFOCONF4); + s6gmac_set_dstaddr(pd, 0, + 0xFFFFFFFF, 0x0000FFFF, 0xFFFFFFFF, 0x0000FFFF); + s6gmac_set_dstaddr(pd, 1, + dev->dev_addr[5] | + dev->dev_addr[4] << 8 | + dev->dev_addr[3] << 16 | + dev->dev_addr[2] << 24, + dev->dev_addr[1] | + dev->dev_addr[0] << 8, + 0xFFFFFFFF, 0x0000FFFF); + s6gmac_set_dstaddr(pd, 2, + 0x00000000, 0x00000100, 0x00000000, 0x00000100); + s6gmac_set_dstaddr(pd, 3, + 0x00000000, 0x00000000, 0x00000000, 0x00000000); + writel(1 << S6_GMAC_HOST_PBLKCTRL_TXENA | + 1 << S6_GMAC_HOST_PBLKCTRL_RXENA | + S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_TXBSIZ | + S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_RXBSIZ | + 1 << S6_GMAC_HOST_PBLKCTRL_STATENA | + 1 << S6_GMAC_HOST_PBLKCTRL_STATCLEAR | + is_rgmii << S6_GMAC_HOST_PBLKCTRL_RGMII, + pd->reg + S6_GMAC_HOST_PBLKCTRL); +} + +static void s6mii_enable(struct s6gmac *pd) +{ + writel(readl(pd->reg + S6_GMAC_MACCONF1) & + ~(1 << S6_GMAC_MACCONF1_SOFTRES), + pd->reg + S6_GMAC_MACCONF1); + writel((readl(pd->reg + S6_GMAC_MACMIICONF) + & ~(S6_GMAC_MACMIICONF_CSEL_MASK << S6_GMAC_MACMIICONF_CSEL)) + | (S6_GMAC_MACMIICONF_CSEL_DIV168 << S6_GMAC_MACMIICONF_CSEL), + pd->reg + S6_GMAC_MACMIICONF); +} + +static int s6mii_busy(struct s6gmac *pd, int tmo) +{ + while (readl(pd->reg + S6_GMAC_MACMIIINDI)) { + if (--tmo == 0) + return -ETIME; + udelay(64); + } + return 0; +} + +static int s6mii_read(struct mii_bus *bus, int phy_addr, int regnum) +{ + struct s6gmac *pd = bus->priv; + s6mii_enable(pd); + if (s6mii_busy(pd, 256)) + return -ETIME; + writel(phy_addr << S6_GMAC_MACMIIADDR_PHY | + regnum << S6_GMAC_MACMIIADDR_REG, + pd->reg + S6_GMAC_MACMIIADDR); + writel(1 << S6_GMAC_MACMIICMD_READ, pd->reg + S6_GMAC_MACMIICMD); + writel(0, pd->reg + S6_GMAC_MACMIICMD); + if (s6mii_busy(pd, 256)) + return -ETIME; + return (u16)readl(pd->reg + S6_GMAC_MACMIISTAT); +} + +static int s6mii_write(struct mii_bus *bus, int phy_addr, int regnum, u16 value) +{ + struct s6gmac *pd = bus->priv; + s6mii_enable(pd); + if (s6mii_busy(pd, 256)) + return -ETIME; + writel(phy_addr << S6_GMAC_MACMIIADDR_PHY | + regnum << S6_GMAC_MACMIIADDR_REG, + pd->reg + S6_GMAC_MACMIIADDR); + writel(value, pd->reg + S6_GMAC_MACMIICTRL); + if (s6mii_busy(pd, 256)) + return -ETIME; + return 0; +} + +static int s6mii_reset(struct mii_bus *bus) +{ + struct s6gmac *pd = bus->priv; + s6mii_enable(pd); + if (s6mii_busy(pd, PHY_INIT_TIMEOUT)) + return -ETIME; + return 0; +} + +static void s6gmac_set_rgmii_txclock(struct s6gmac *pd) +{ + u32 pllsel = readl(S6_REG_GREG1 + S6_GREG1_PLLSEL); + pllsel &= ~(S6_GREG1_PLLSEL_GMAC_MASK << S6_GREG1_PLLSEL_GMAC); + switch (pd->link.mbit) { + case 10: + pllsel |= S6_GREG1_PLLSEL_GMAC_2500KHZ << S6_GREG1_PLLSEL_GMAC; + break; + case 100: + pllsel |= S6_GREG1_PLLSEL_GMAC_25MHZ << S6_GREG1_PLLSEL_GMAC; + break; + case 1000: + pllsel |= S6_GREG1_PLLSEL_GMAC_125MHZ << S6_GREG1_PLLSEL_GMAC; + break; + default: + return; + } + writel(pllsel, S6_REG_GREG1 + S6_GREG1_PLLSEL); +} + +static inline void s6gmac_linkisup(struct net_device *dev, int isup) +{ + struct s6gmac *pd = netdev_priv(dev); + struct phy_device *phydev = pd->phydev; + + pd->link.full = phydev->duplex; + pd->link.giga = (phydev->speed == 1000); + if (pd->link.mbit != phydev->speed) { + pd->link.mbit = phydev->speed; + s6gmac_set_rgmii_txclock(pd); + } + pd->link.isup = isup; + if (isup) + netif_carrier_on(dev); + phy_print_status(phydev); +} + +static void s6gmac_adjust_link(struct net_device *dev) +{ + struct s6gmac *pd = netdev_priv(dev); + struct phy_device *phydev = pd->phydev; + if (pd->link.isup && + (!phydev->link || + (pd->link.mbit != phydev->speed) || + (pd->link.full != phydev->duplex))) { + pd->link.isup = 0; + netif_tx_disable(dev); + if (!phydev->link) { + netif_carrier_off(dev); + phy_print_status(phydev); + } + } + if (!pd->link.isup && phydev->link) { + if (pd->link.full != phydev->duplex) { + u32 maccfg = readl(pd->reg + S6_GMAC_MACCONF2); + if (phydev->duplex) + maccfg |= 1 << S6_GMAC_MACCONF2_FULL; + else + maccfg &= ~(1 << S6_GMAC_MACCONF2_FULL); + writel(maccfg, pd->reg + S6_GMAC_MACCONF2); + } + + if (pd->link.giga != (phydev->speed == 1000)) { + u32 fifocfg = readl(pd->reg + S6_GMAC_FIFOCONF5); + u32 maccfg = readl(pd->reg + S6_GMAC_MACCONF2); + maccfg &= ~(S6_GMAC_MACCONF2_IFMODE_MASK + << S6_GMAC_MACCONF2_IFMODE); + if (phydev->speed == 1000) { + fifocfg |= 1 << S6_GMAC_FIFOCONF5_CFGBYTM; + maccfg |= S6_GMAC_MACCONF2_IFMODE_BYTE + << S6_GMAC_MACCONF2_IFMODE; + } else { + fifocfg &= ~(1 << S6_GMAC_FIFOCONF5_CFGBYTM); + maccfg |= S6_GMAC_MACCONF2_IFMODE_NIBBLE + << S6_GMAC_MACCONF2_IFMODE; + } + writel(fifocfg, pd->reg + S6_GMAC_FIFOCONF5); + writel(maccfg, pd->reg + S6_GMAC_MACCONF2); + } + + if (!s6dmac_fifo_full(pd->tx_dma, pd->tx_chan)) + netif_wake_queue(dev); + s6gmac_linkisup(dev, 1); + } +} + +static inline int s6gmac_phy_start(struct net_device *dev) +{ + struct s6gmac *pd = netdev_priv(dev); + int i = 0; + struct phy_device *p = NULL; + while ((!(p = pd->mii.bus->phy_map[i])) && (i < PHY_MAX_ADDR)) + i++; + p = phy_connect(dev, dev_name(&p->dev), &s6gmac_adjust_link, 0, + PHY_INTERFACE_MODE_RGMII); + if (IS_ERR(p)) { + printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); + return PTR_ERR(p); + } + p->supported &= PHY_GBIT_FEATURES; + p->advertising = p->supported; + pd->phydev = p; + return 0; +} + +static inline void s6gmac_init_stats(struct net_device *dev) +{ + struct s6gmac *pd = netdev_priv(dev); + u32 mask; + mask = 1 << S6_GMAC_STATCARRY1_RDRP | + 1 << S6_GMAC_STATCARRY1_RJBR | + 1 << S6_GMAC_STATCARRY1_RFRG | + 1 << S6_GMAC_STATCARRY1_ROVR | + 1 << S6_GMAC_STATCARRY1_RUND | + 1 << S6_GMAC_STATCARRY1_RCDE | + 1 << S6_GMAC_STATCARRY1_RFLR | + 1 << S6_GMAC_STATCARRY1_RALN | + 1 << S6_GMAC_STATCARRY1_RMCA | + 1 << S6_GMAC_STATCARRY1_RFCS | + 1 << S6_GMAC_STATCARRY1_RPKT | + 1 << S6_GMAC_STATCARRY1_RBYT; + writel(mask, pd->reg + S6_GMAC_STATCARRY(0)); + writel(~mask, pd->reg + S6_GMAC_STATCARRYMSK(0)); + mask = 1 << S6_GMAC_STATCARRY2_TDRP | + 1 << S6_GMAC_STATCARRY2_TNCL | + 1 << S6_GMAC_STATCARRY2_TXCL | + 1 << S6_GMAC_STATCARRY2_TEDF | + 1 << S6_GMAC_STATCARRY2_TPKT | + 1 << S6_GMAC_STATCARRY2_TBYT | + 1 << S6_GMAC_STATCARRY2_TFRG | + 1 << S6_GMAC_STATCARRY2_TUND | + 1 << S6_GMAC_STATCARRY2_TOVR | + 1 << S6_GMAC_STATCARRY2_TFCS | + 1 << S6_GMAC_STATCARRY2_TJBR; + writel(mask, pd->reg + S6_GMAC_STATCARRY(1)); + writel(~mask, pd->reg + S6_GMAC_STATCARRYMSK(1)); +} + +static inline void s6gmac_init_dmac(struct net_device *dev) +{ + struct s6gmac *pd = netdev_priv(dev); + s6dmac_disable_chan(pd->tx_dma, pd->tx_chan); + s6dmac_disable_chan(pd->rx_dma, pd->rx_chan); + s6dmac_disable_error_irqs(pd->tx_dma, 1 << S6_HIFDMA_GMACTX); + s6dmac_disable_error_irqs(pd->rx_dma, 1 << S6_HIFDMA_GMACRX); +} + +static int s6gmac_tx(struct sk_buff *skb, struct net_device *dev) +{ + struct s6gmac *pd = netdev_priv(dev); + unsigned long flags; + spin_lock_irqsave(&pd->lock, flags); + dev->trans_start = jiffies; + writel(skb->len << S6_GMAC_BURST_PREWR_LEN | + 0 << S6_GMAC_BURST_PREWR_CFE | + 1 << S6_GMAC_BURST_PREWR_PPE | + 1 << S6_GMAC_BURST_PREWR_FCS | + ((skb->len < ETH_ZLEN) ? 1 : 0) << S6_GMAC_BURST_PREWR_PAD, + pd->reg + S6_GMAC_BURST_PREWR); + s6dmac_put_fifo_cache(pd->tx_dma, pd->tx_chan, + (u32)skb->data, pd->io, skb->len); + if (s6dmac_fifo_full(pd->tx_dma, pd->tx_chan)) + netif_stop_queue(dev); + if (((u8)(pd->tx_skb_i - pd->tx_skb_o)) >= S6_NUM_TX_SKB) { + printk(KERN_ERR "GMAC BUG: skb tx ring overflow [%x, %x]\n", + pd->tx_skb_o, pd->tx_skb_i); + BUG(); + } + pd->tx_skb[(pd->tx_skb_i++) % S6_NUM_TX_SKB] = skb; + spin_unlock_irqrestore(&pd->lock, flags); + return 0; +} + +static void s6gmac_tx_timeout(struct net_device *dev) +{ + struct s6gmac *pd = netdev_priv(dev); + unsigned long flags; + spin_lock_irqsave(&pd->lock, flags); + s6gmac_tx_interrupt(dev); + spin_unlock_irqrestore(&pd->lock, flags); +} + +static int s6gmac_open(struct net_device *dev) +{ + struct s6gmac *pd = netdev_priv(dev); + unsigned long flags; + phy_read_status(pd->phydev); + spin_lock_irqsave(&pd->lock, flags); + pd->link.mbit = 0; + s6gmac_linkisup(dev, pd->phydev->link); + s6gmac_init_device(dev); + s6gmac_init_stats(dev); + s6gmac_init_dmac(dev); + s6gmac_rx_fillfifo(pd); + s6dmac_enable_chan(pd->rx_dma, pd->rx_chan, + 2, 1, 0, 1, 0, 0, 0, 7, -1, 2, 0, 1); + s6dmac_enable_chan(pd->tx_dma, pd->tx_chan, + 2, 0, 1, 0, 0, 0, 0, 7, -1, 2, 0, 1); + writel(0 << S6_GMAC_HOST_INT_TXBURSTOVER | + 0 << S6_GMAC_HOST_INT_TXPREWOVER | + 0 << S6_GMAC_HOST_INT_RXBURSTUNDER | + 0 << S6_GMAC_HOST_INT_RXPOSTRFULL | + 0 << S6_GMAC_HOST_INT_RXPOSTRUNDER, + pd->reg + S6_GMAC_HOST_INTMASK); + spin_unlock_irqrestore(&pd->lock, flags); + phy_start(pd->phydev); + netif_start_queue(dev); + return 0; +} + +static int s6gmac_stop(struct net_device *dev) +{ + struct s6gmac *pd = netdev_priv(dev); + unsigned long flags; + netif_stop_queue(dev); + phy_stop(pd->phydev); + spin_lock_irqsave(&pd->lock, flags); + s6gmac_init_dmac(dev); + s6gmac_stop_device(dev); + while (pd->tx_skb_i != pd->tx_skb_o) + dev_kfree_skb(pd->tx_skb[(pd->tx_skb_o++) % S6_NUM_TX_SKB]); + while (pd->rx_skb_i != pd->rx_skb_o) + dev_kfree_skb(pd->rx_skb[(pd->rx_skb_o++) % S6_NUM_RX_SKB]); + spin_unlock_irqrestore(&pd->lock, flags); + return 0; +} + +static struct net_device_stats *s6gmac_stats(struct net_device *dev) +{ + struct s6gmac *pd = netdev_priv(dev); + struct net_device_stats *st = (struct net_device_stats *)&pd->stats; + int i; + do { + unsigned long flags; + spin_lock_irqsave(&pd->lock, flags); + for (i = 0; i < sizeof(pd->stats) / sizeof(unsigned long); i++) + pd->stats[i] = + pd->carry[i] << (S6_GMAC_STAT_SIZE_MIN - 1); + s6gmac_stats_collect(pd, &statinf[0][0]); + s6gmac_stats_collect(pd, &statinf[1][0]); + i = s6gmac_stats_pending(pd, 0) | + s6gmac_stats_pending(pd, 1); + spin_unlock_irqrestore(&pd->lock, flags); + } while (i); + st->rx_errors = st->rx_crc_errors + + st->rx_frame_errors + + st->rx_length_errors + + st->rx_missed_errors; + st->tx_errors += st->tx_aborted_errors; + return st; +} + +static int __devinit s6gmac_probe(struct platform_device *pdev) +{ + struct net_device *dev; + struct s6gmac *pd; + int res; + unsigned long i; + struct mii_bus *mb; + dev = alloc_etherdev(sizeof(*pd)); + if (!dev) { + printk(KERN_ERR DRV_PRMT "etherdev alloc failed, aborting.\n"); + return -ENOMEM; + } + dev->open = s6gmac_open; + dev->stop = s6gmac_stop; + dev->hard_start_xmit = s6gmac_tx; + dev->tx_timeout = s6gmac_tx_timeout; + dev->watchdog_timeo = HZ; + dev->get_stats = s6gmac_stats; + dev->irq = platform_get_irq(pdev, 0); + pd = netdev_priv(dev); + memset(pd, 0, sizeof(*pd)); + spin_lock_init(&pd->lock); + pd->reg = platform_get_resource(pdev, IORESOURCE_MEM, 0)->start; + i = platform_get_resource(pdev, IORESOURCE_DMA, 0)->start; + pd->tx_dma = DMA_MASK_DMAC(i); + pd->tx_chan = DMA_INDEX_CHNL(i); + i = platform_get_resource(pdev, IORESOURCE_DMA, 1)->start; + pd->rx_dma = DMA_MASK_DMAC(i); + pd->rx_chan = DMA_INDEX_CHNL(i); + pd->io = platform_get_resource(pdev, IORESOURCE_IO, 0)->start; + res = request_irq(dev->irq, &s6gmac_interrupt, 0, dev->name, dev); + if (res) { + printk(KERN_ERR DRV_PRMT "irq request failed: %d\n", dev->irq); + goto errirq; + } + res = register_netdev(dev); + if (res) { + printk(KERN_ERR DRV_PRMT "error registering device %s\n", + dev->name); + goto errdev; + } + mb = mdiobus_alloc(); + if (!mb) { + printk(KERN_ERR DRV_PRMT "error allocating mii bus\n"); + goto errmii; + } + mb->name = "s6gmac_mii"; + mb->read = s6mii_read; + mb->write = s6mii_write; + mb->reset = s6mii_reset; + mb->priv = pd; + snprintf(mb->id, MII_BUS_ID_SIZE, "0"); + mb->phy_mask = ~(1 << 0); + mb->irq = &pd->mii.irq[0]; + for (i = 0; i < PHY_MAX_ADDR; i++) { + int n = platform_get_irq(pdev, i + 1); + if (n < 0) + n = PHY_POLL; + pd->mii.irq[i] = n; + } + mdiobus_register(mb); + pd->mii.bus = mb; + res = s6gmac_phy_start(dev); + if (res) + return res; + platform_set_drvdata(pdev, dev); + return 0; +errmii: + unregister_netdev(dev); +errdev: + free_irq(dev->irq, dev); +errirq: + free_netdev(dev); + return res; +} + +static int __devexit s6gmac_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + if (dev) { + struct s6gmac *pd = netdev_priv(dev); + mdiobus_unregister(pd->mii.bus); + unregister_netdev(dev); + free_irq(dev->irq, dev); + free_netdev(dev); + platform_set_drvdata(pdev, NULL); + } + return 0; +} + +static struct platform_driver s6gmac_driver = { + .probe = s6gmac_probe, + .remove = __devexit_p(s6gmac_remove), + .driver = { + .name = "s6gmac", + .owner = THIS_MODULE, + }, +}; + +static int __init s6gmac_init(void) +{ + printk(KERN_INFO DRV_PRMT "S6 GMAC ethernet driver\n"); + return platform_driver_register(&s6gmac_driver); +} + + +static void __exit s6gmac_exit(void) +{ + platform_driver_unregister(&s6gmac_driver); +} + +module_init(s6gmac_init); +module_exit(s6gmac_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("S6105 on chip Ethernet driver"); +MODULE_AUTHOR("Oskar Schirmer <os@emlix.com>"); diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c index 341882f959f..a2d82ddb3b4 100644 --- a/drivers/net/sh_eth.c +++ b/drivers/net/sh_eth.c @@ -865,8 +865,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) struct sh_eth_private *mdp = netdev_priv(ndev); struct sh_eth_cpu_data *cd = mdp->cd; irqreturn_t ret = IRQ_NONE; - u32 ioaddr, boguscnt = RX_RING_SIZE; - u32 intr_status = 0; + u32 ioaddr, intr_status = 0; ioaddr = ndev->base_addr; spin_lock(&mdp->lock); @@ -901,12 +900,6 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) if (intr_status & cd->eesr_err_check) sh_eth_error(ndev, intr_status); - if (--boguscnt < 0) { - printk(KERN_WARNING - "%s: Too much work at interrupt, status=0x%4.4x.\n", - ndev->name, intr_status); - } - other_irq: spin_unlock(&mdp->lock); diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 7681d28c53d..daf961ab68b 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -2495,7 +2495,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx) if (likely(status >> 16 == (status & 0xffff))) { skb = sky2->rx_ring[sky2->rx_next].skb; skb->ip_summed = CHECKSUM_COMPLETE; - skb->csum = status & 0xffff; + skb->csum = le16_to_cpu(status); } else { printk(KERN_NOTICE PFX "%s: hardware receive " "checksum problem (status = %#x)\n", diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index 3717569828b..a906d399813 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig @@ -169,10 +169,12 @@ config USB_NET_CDCETHER The Linux-USB CDC Ethernet Gadget driver is an open implementation. This driver should work with at least the following devices: + * Dell Wireless 5530 HSPA * Ericsson PipeRider (all variants) + * Ericsson Mobile Broadband Module (all variants) * Motorola (DM100 and SB4100) * Broadcom Cable Modem (reference design) - * Toshiba PCX1100U + * Toshiba (PCX1100U and F3507g) * ... This driver creates an interface named "ethX", where X depends on diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 01fd528306e..4a6aff57940 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -533,6 +533,31 @@ static const struct usb_device_id products [] = { USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1900, USB_CLASS_COMM, USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), .driver_info = (unsigned long) &cdc_info, +}, { + /* Ericsson F3507g ver. 2 */ + USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1902, USB_CLASS_COMM, + USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long) &cdc_info, +}, { + /* Ericsson F3607gw */ + USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1904, USB_CLASS_COMM, + USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long) &cdc_info, +}, { + /* Ericsson F3307 */ + USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1906, USB_CLASS_COMM, + USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long) &cdc_info, +}, { + /* Toshiba F3507g */ + USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM, + USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long) &cdc_info, +}, { + /* Dell F3507g */ + USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8147, USB_CLASS_COMM, + USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long) &cdc_info, }, { }, // END }; diff --git a/drivers/net/usb/cdc_subset.c b/drivers/net/usb/cdc_subset.c index c66b9c324f5..ca39ace0b0e 100644 --- a/drivers/net/usb/cdc_subset.c +++ b/drivers/net/usb/cdc_subset.c @@ -307,9 +307,10 @@ static const struct usb_device_id products [] = { USB_DEVICE (0x1286, 0x8001), // "blob" bootloader .driver_info = (unsigned long) &blob_info, }, { - // Linux Ethernet/RNDIS gadget on pxa210/25x/26x, second config - // e.g. Gumstix, current OpenZaurus, ... - USB_DEVICE_VER (0x0525, 0xa4a2, 0x0203, 0x0203), + // Linux Ethernet/RNDIS gadget, mostly on PXA, second config + // e.g. Gumstix, current OpenZaurus, ... or anything else + // that just enables this gadget option. + USB_DEVICE (0x0525, 0xa4a2), .driver_info = (unsigned long) &linuxdev_info, }, #endif diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index 2138535f233..73acbd244aa 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -297,7 +297,7 @@ static int update_eth_regs_async(pegasus_t * pegasus) pegasus->dr.bRequestType = PEGASUS_REQT_WRITE; pegasus->dr.bRequest = PEGASUS_REQ_SET_REGS; - pegasus->dr.wValue = 0; + pegasus->dr.wValue = cpu_to_le16(0); pegasus->dr.wIndex = cpu_to_le16(EthCtrl0); pegasus->dr.wLength = cpu_to_le16(3); pegasus->ctrl_urb->transfer_buffer_length = 3; @@ -446,11 +446,12 @@ static int write_eprom_word(pegasus_t * pegasus, __u8 index, __u16 data) int i; __u8 tmp, d[4] = { 0x3f, 0, 0, EPROM_WRITE }; int ret; + __le16 le_data = cpu_to_le16(data); set_registers(pegasus, EpromOffset, 4, d); enable_eprom_write(pegasus); set_register(pegasus, EpromOffset, index); - set_registers(pegasus, EpromData, 2, &data); + set_registers(pegasus, EpromData, 2, &le_data); set_register(pegasus, EpromCtrl, EPROM_WRITE); for (i = 0; i < REG_TIMEOUT; i++) { @@ -923,29 +924,32 @@ static struct net_device_stats *pegasus_netdev_stats(struct net_device *dev) static inline void disable_net_traffic(pegasus_t * pegasus) { - int tmp = 0; + __le16 tmp = cpu_to_le16(0); - set_registers(pegasus, EthCtrl0, 2, &tmp); + set_registers(pegasus, EthCtrl0, sizeof(tmp), &tmp); } static inline void get_interrupt_interval(pegasus_t * pegasus) { - __u8 data[2]; + u16 data; + u8 interval; - read_eprom_word(pegasus, 4, (__u16 *) data); + read_eprom_word(pegasus, 4, &data); + interval = data >> 8; if (pegasus->usb->speed != USB_SPEED_HIGH) { - if (data[1] < 0x80) { + if (interval < 0x80) { if (netif_msg_timer(pegasus)) dev_info(&pegasus->intf->dev, "intr interval " "changed from %ums to %ums\n", - data[1], 0x80); - data[1] = 0x80; + interval, 0x80); + interval = 0x80; + data = (data & 0x00FF) | ((u16)interval << 8); #ifdef PEGASUS_WRITE_EEPROM - write_eprom_word(pegasus, 4, *(__u16 *) data); + write_eprom_word(pegasus, 4, data); #endif } } - pegasus->intr_interval = data[1]; + pegasus->intr_interval = interval; } static void set_carrier(struct net_device *net) @@ -1299,7 +1303,8 @@ static int pegasus_blacklisted(struct usb_device *udev) /* Special quirk to keep the driver from handling the Belkin Bluetooth * dongle which happens to have the same ID. */ - if ((udd->idVendor == VENDOR_BELKIN && udd->idProduct == 0x0121) && + if ((udd->idVendor == cpu_to_le16(VENDOR_BELKIN)) && + (udd->idProduct == cpu_to_le16(0x0121)) && (udd->bDeviceClass == USB_CLASS_WIRELESS_CONTROLLER) && (udd->bDeviceProtocol == 1)) return 1; diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 87197dd9c78..1097c72e44d 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -208,11 +208,14 @@ rx_drop: static struct net_device_stats *veth_get_stats(struct net_device *dev) { - struct veth_priv *priv = netdev_priv(dev); - struct net_device_stats *dev_stats = &dev->stats; - unsigned int cpu; + struct veth_priv *priv; + struct net_device_stats *dev_stats; + int cpu; struct veth_net_stats *stats; + priv = netdev_priv(dev); + dev_stats = &dev->stats; + dev_stats->rx_packets = 0; dev_stats->tx_packets = 0; dev_stats->rx_bytes = 0; @@ -220,17 +223,16 @@ static struct net_device_stats *veth_get_stats(struct net_device *dev) dev_stats->tx_dropped = 0; dev_stats->rx_dropped = 0; - if (priv->stats) - for_each_online_cpu(cpu) { - stats = per_cpu_ptr(priv->stats, cpu); + for_each_online_cpu(cpu) { + stats = per_cpu_ptr(priv->stats, cpu); - dev_stats->rx_packets += stats->rx_packets; - dev_stats->tx_packets += stats->tx_packets; - dev_stats->rx_bytes += stats->rx_bytes; - dev_stats->tx_bytes += stats->tx_bytes; - dev_stats->tx_dropped += stats->tx_dropped; - dev_stats->rx_dropped += stats->rx_dropped; - } + dev_stats->rx_packets += stats->rx_packets; + dev_stats->tx_packets += stats->tx_packets; + dev_stats->rx_bytes += stats->rx_bytes; + dev_stats->tx_bytes += stats->tx_bytes; + dev_stats->tx_dropped += stats->tx_dropped; + dev_stats->rx_dropped += stats->rx_dropped; + } return dev_stats; } @@ -257,8 +259,6 @@ static int veth_close(struct net_device *dev) netif_carrier_off(dev); netif_carrier_off(priv->peer); - free_percpu(priv->stats); - priv->stats = NULL; return 0; } @@ -289,6 +289,15 @@ static int veth_dev_init(struct net_device *dev) return 0; } +static void veth_dev_free(struct net_device *dev) +{ + struct veth_priv *priv; + + priv = netdev_priv(dev); + free_percpu(priv->stats); + free_netdev(dev); +} + static const struct net_device_ops veth_netdev_ops = { .ndo_init = veth_dev_init, .ndo_open = veth_open, @@ -306,7 +315,7 @@ static void veth_setup(struct net_device *dev) dev->netdev_ops = &veth_netdev_ops; dev->ethtool_ops = &veth_ethtool_ops; dev->features |= NETIF_F_LLTX; - dev->destructor = free_netdev; + dev->destructor = veth_dev_free; } /* diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index b02f7adff5d..3ba35956327 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c @@ -1847,7 +1847,7 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_ */ if (tdinfo->skb_dma) { - pktlen = (skb->len > ETH_ZLEN ? : ETH_ZLEN); + pktlen = max_t(unsigned int, skb->len, ETH_ZLEN); for (i = 0; i < tdinfo->nskb_dma; i++) { #ifdef VELOCITY_ZERO_COPY_SUPPORT pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], le16_to_cpu(td->tdesc1.len), PCI_DMA_TODEVICE); diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 55f7de09d13..ea045151f95 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -538,6 +538,7 @@ ath5k_pci_probe(struct pci_dev *pdev, sc->iobase = mem; /* So we can unmap it on detach */ sc->cachelsz = csz * sizeof(u32); /* convert to bytes */ sc->opmode = NL80211_IFTYPE_STATION; + sc->bintval = 1000; mutex_init(&sc->lock); spin_lock_init(&sc->rxbuflock); spin_lock_init(&sc->txbuflock); @@ -686,6 +687,13 @@ ath5k_pci_resume(struct pci_dev *pdev) if (err) return err; + /* + * Suspend/Resume resets the PCI configuration space, so we have to + * re-disable the RETRY_TIMEOUT register (0x41) to keep + * PCI Tx retries from interfering with C3 CPU state + */ + pci_write_config_byte(pdev, 0x41, 0); + err = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc); if (err) { ATH5K_ERR(sc, "request_irq failed\n"); @@ -2748,9 +2756,6 @@ static int ath5k_add_interface(struct ieee80211_hw *hw, goto end; } - /* Set to a reasonable value. Note that this will - * be set to mac80211's value at ath5k_config(). */ - sc->bintval = 1000; ath5k_hw_set_lladdr(sc->ah, conf->mac_addr); ret = 0; diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 9f49a3251d4..66a6c1f5022 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -1196,8 +1196,8 @@ void ath_radio_disable(struct ath_softc *sc) ath9k_hw_phy_disable(ah); ath9k_hw_configpcipowersave(ah, 1); - ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP); ath9k_ps_restore(sc); + ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP); } /*******************/ diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index ccdf20a2e9b..170c5b32e49 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -87,6 +87,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) struct ath_softc *sc; struct ieee80211_hw *hw; u8 csz; + u32 val; int ret = 0; struct ath_hw *ah; @@ -133,6 +134,14 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_master(pdev); + /* + * Disable the RETRY_TIMEOUT register (0x41) to keep + * PCI Tx retries from interfering with C3 CPU state. + */ + pci_read_config_dword(pdev, 0x40, &val); + if ((val & 0x0000ff00) != 0) + pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); + ret = pci_request_region(pdev, 0, "ath9k"); if (ret) { dev_err(&pdev->dev, "PCI memory region reserve error\n"); @@ -239,12 +248,21 @@ static int ath_pci_resume(struct pci_dev *pdev) struct ieee80211_hw *hw = pci_get_drvdata(pdev); struct ath_wiphy *aphy = hw->priv; struct ath_softc *sc = aphy->sc; + u32 val; int err; err = pci_enable_device(pdev); if (err) return err; pci_restore_state(pdev); + /* + * Suspend/Resume resets the PCI configuration space, so we have to + * re-disable the RETRY_TIMEOUT register (0x41) to keep + * PCI Tx retries from interfering with C3 CPU state + */ + pci_read_config_dword(pdev, 0x40, &val); + if ((val & 0x0000ff00) != 0) + pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); /* Enable LED */ ath9k_hw_cfg_output(sc->sc_ah, ATH_LED_PIN, diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index f99f3a76df3..cece1c4c6bd 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -539,11 +539,14 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) if (ath_beacon_dtim_pending_cab(skb)) { /* * Remain awake waiting for buffered broadcast/multicast - * frames. + * frames. If the last broadcast/multicast frame is not + * received properly, the next beacon frame will work as + * a backup trigger for returning into NETWORK SLEEP state, + * so we are waiting for it as well. */ DPRINTF(sc, ATH_DBG_PS, "Received DTIM beacon indicating " "buffered broadcast/multicast frame(s)\n"); - sc->sc_flags |= SC_OP_WAIT_FOR_CAB; + sc->sc_flags |= SC_OP_WAIT_FOR_CAB | SC_OP_WAIT_FOR_BEACON; return; } diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h index 635c16ee618..77c339f8516 100644 --- a/drivers/net/wireless/iwmc3200wifi/iwm.h +++ b/drivers/net/wireless/iwmc3200wifi/iwm.h @@ -288,6 +288,7 @@ struct iwm_priv { u8 *eeprom; struct timer_list watchdog; struct work_struct reset_worker; + struct mutex mutex; struct rfkill *rfkill; char private[0] __attribute__((__aligned__(NETDEV_ALIGN))); @@ -315,8 +316,11 @@ extern const struct iw_handler_def iwm_iw_handler_def; void *iwm_if_alloc(int sizeof_bus, struct device *dev, struct iwm_if_ops *if_ops); void iwm_if_free(struct iwm_priv *iwm); +int iwm_if_add(struct iwm_priv *iwm); +void iwm_if_remove(struct iwm_priv *iwm); int iwm_mode_to_nl80211_iftype(int mode); int iwm_priv_init(struct iwm_priv *iwm); +void iwm_priv_deinit(struct iwm_priv *iwm); void iwm_reset(struct iwm_priv *iwm); void iwm_tx_credit_init_pools(struct iwm_priv *iwm, struct iwm_umac_notif_alive *alive); diff --git a/drivers/net/wireless/iwmc3200wifi/main.c b/drivers/net/wireless/iwmc3200wifi/main.c index 6a2640f16b6..8be206d5822 100644 --- a/drivers/net/wireless/iwmc3200wifi/main.c +++ b/drivers/net/wireless/iwmc3200wifi/main.c @@ -112,6 +112,9 @@ static void iwm_statistics_request(struct work_struct *work) iwm_send_umac_stats_req(iwm, 0); } +int __iwm_up(struct iwm_priv *iwm); +int __iwm_down(struct iwm_priv *iwm); + static void iwm_reset_worker(struct work_struct *work) { struct iwm_priv *iwm; @@ -120,6 +123,19 @@ static void iwm_reset_worker(struct work_struct *work) iwm = container_of(work, struct iwm_priv, reset_worker); + /* + * XXX: The iwm->mutex is introduced purely for this reset work, + * because the other users for iwm_up and iwm_down are only netdev + * ndo_open and ndo_stop which are already protected by rtnl. + * Please remove iwm->mutex together if iwm_reset_worker() is not + * required in the future. + */ + if (!mutex_trylock(&iwm->mutex)) { + IWM_WARN(iwm, "We are in the middle of interface bringing " + "UP/DOWN. Skip driver resetting.\n"); + return; + } + if (iwm->umac_profile_active) { profile = kmalloc(sizeof(struct iwm_umac_profile), GFP_KERNEL); if (profile) @@ -128,10 +144,10 @@ static void iwm_reset_worker(struct work_struct *work) IWM_ERR(iwm, "Couldn't alloc memory for profile\n"); } - iwm_down(iwm); + __iwm_down(iwm); while (retry++ < 3) { - ret = iwm_up(iwm); + ret = __iwm_up(iwm); if (!ret) break; @@ -142,7 +158,7 @@ static void iwm_reset_worker(struct work_struct *work) IWM_WARN(iwm, "iwm_up() failed: %d\n", ret); kfree(profile); - return; + goto out; } if (profile) { @@ -151,6 +167,9 @@ static void iwm_reset_worker(struct work_struct *work) iwm_send_mlme_profile(iwm); kfree(profile); } + + out: + mutex_unlock(&iwm->mutex); } static void iwm_watchdog(unsigned long data) @@ -215,10 +234,21 @@ int iwm_priv_init(struct iwm_priv *iwm) init_timer(&iwm->watchdog); iwm->watchdog.function = iwm_watchdog; iwm->watchdog.data = (unsigned long)iwm; + mutex_init(&iwm->mutex); return 0; } +void iwm_priv_deinit(struct iwm_priv *iwm) +{ + int i; + + for (i = 0; i < IWM_TX_QUEUES; i++) + destroy_workqueue(iwm->txq[i].wq); + + destroy_workqueue(iwm->rx_wq); +} + /* * We reset all the structures, and we reset the UMAC. * After calling this routine, you're expected to reload @@ -466,7 +496,7 @@ void iwm_link_off(struct iwm_priv *iwm) iwm_rx_free(iwm); - cancel_delayed_work(&iwm->stats_request); + cancel_delayed_work_sync(&iwm->stats_request); memset(wstats, 0, sizeof(struct iw_statistics)); wstats->qual.updated = IW_QUAL_ALL_INVALID; @@ -511,7 +541,7 @@ static int iwm_channels_init(struct iwm_priv *iwm) return 0; } -int iwm_up(struct iwm_priv *iwm) +int __iwm_up(struct iwm_priv *iwm) { int ret; struct iwm_notif *notif_reboot, *notif_ack = NULL; @@ -647,7 +677,18 @@ int iwm_up(struct iwm_priv *iwm) return -EIO; } -int iwm_down(struct iwm_priv *iwm) +int iwm_up(struct iwm_priv *iwm) +{ + int ret; + + mutex_lock(&iwm->mutex); + ret = __iwm_up(iwm); + mutex_unlock(&iwm->mutex); + + return ret; +} + +int __iwm_down(struct iwm_priv *iwm) { int ret; @@ -678,3 +719,14 @@ int iwm_down(struct iwm_priv *iwm) return 0; } + +int iwm_down(struct iwm_priv *iwm) +{ + int ret; + + mutex_lock(&iwm->mutex); + ret = __iwm_down(iwm); + mutex_unlock(&iwm->mutex); + + return ret; +} diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c index 68e2c3b6c7a..aaa20c6885c 100644 --- a/drivers/net/wireless/iwmc3200wifi/netdev.c +++ b/drivers/net/wireless/iwmc3200wifi/netdev.c @@ -114,32 +114,31 @@ void *iwm_if_alloc(int sizeof_bus, struct device *dev, iwm = wdev_to_iwm(wdev); iwm->bus_ops = if_ops; iwm->wdev = wdev; - iwm_priv_init(iwm); + + ret = iwm_priv_init(iwm); + if (ret) { + dev_err(dev, "failed to init iwm_priv\n"); + goto out_wdev; + } + wdev->iftype = iwm_mode_to_nl80211_iftype(iwm->conf.mode); - ndev = alloc_netdev_mq(0, "wlan%d", ether_setup, - IWM_TX_QUEUES); + ndev = alloc_netdev_mq(0, "wlan%d", ether_setup, IWM_TX_QUEUES); if (!ndev) { dev_err(dev, "no memory for network device instance\n"); - goto out_wdev; + goto out_priv; } ndev->netdev_ops = &iwm_netdev_ops; ndev->wireless_handlers = &iwm_iw_handler_def; ndev->ieee80211_ptr = wdev; SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy)); - ret = register_netdev(ndev); - if (ret < 0) { - dev_err(dev, "Failed to register netdev: %d\n", ret); - goto out_ndev; - } - wdev->netdev = ndev; return iwm; - out_ndev: - free_netdev(ndev); + out_priv: + iwm_priv_deinit(iwm); out_wdev: iwm_wdev_free(iwm); @@ -148,15 +147,29 @@ void *iwm_if_alloc(int sizeof_bus, struct device *dev, void iwm_if_free(struct iwm_priv *iwm) { - int i; - if (!iwm_to_ndev(iwm)) return; - unregister_netdev(iwm_to_ndev(iwm)); free_netdev(iwm_to_ndev(iwm)); iwm_wdev_free(iwm); - destroy_workqueue(iwm->rx_wq); - for (i = 0; i < IWM_TX_QUEUES; i++) - destroy_workqueue(iwm->txq[i].wq); + iwm_priv_deinit(iwm); +} + +int iwm_if_add(struct iwm_priv *iwm) +{ + struct net_device *ndev = iwm_to_ndev(iwm); + int ret; + + ret = register_netdev(ndev); + if (ret < 0) { + dev_err(&ndev->dev, "Failed to register netdev: %d\n", ret); + return ret; + } + + return 0; +} + +void iwm_if_remove(struct iwm_priv *iwm) +{ + unregister_netdev(iwm_to_ndev(iwm)); } diff --git a/drivers/net/wireless/iwmc3200wifi/sdio.c b/drivers/net/wireless/iwmc3200wifi/sdio.c index b54da677b37..916681837fd 100644 --- a/drivers/net/wireless/iwmc3200wifi/sdio.c +++ b/drivers/net/wireless/iwmc3200wifi/sdio.c @@ -454,10 +454,18 @@ static int iwm_sdio_probe(struct sdio_func *func, INIT_WORK(&hw->isr_worker, iwm_sdio_isr_worker); + ret = iwm_if_add(iwm); + if (ret) { + dev_err(dev, "add SDIO interface failed\n"); + goto destroy_wq; + } + dev_info(dev, "IWM SDIO probe\n"); return 0; + destroy_wq: + destroy_workqueue(hw->isr_wq); debugfs_exit: iwm_debugfs_exit(iwm); if_free: @@ -471,9 +479,10 @@ static void iwm_sdio_remove(struct sdio_func *func) struct iwm_priv *iwm = hw_to_iwm(hw); struct device *dev = &func->dev; + iwm_if_remove(iwm); + destroy_workqueue(hw->isr_wq); iwm_debugfs_exit(iwm); iwm_if_free(iwm); - destroy_workqueue(hw->isr_wq); sdio_set_drvdata(func, NULL); diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c index f0e5e943f6e..14a19baff21 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zd1211rw/zd_usb.c @@ -67,6 +67,7 @@ static struct usb_device_id usb_ids[] = { { USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x050d, 0x705c), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x083a, 0xe503), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x083a, 0xe506), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x083a, 0x4505), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B }, |