diff options
Diffstat (limited to 'drivers/net/bna/bna_txrx.c')
-rw-r--r-- | drivers/net/bna/bna_txrx.c | 4185 |
1 files changed, 0 insertions, 4185 deletions
diff --git a/drivers/net/bna/bna_txrx.c b/drivers/net/bna/bna_txrx.c deleted file mode 100644 index f0983c83244..00000000000 --- a/drivers/net/bna/bna_txrx.c +++ /dev/null @@ -1,4185 +0,0 @@ -/* - * Linux network driver for Brocade Converged Network Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -/* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - */ -#include "bna.h" -#include "bfa_cs.h" -#include "bfi.h" - -/** - * IB - */ -#define bna_ib_find_free_ibidx(_mask, _pos)\ -do {\ - (_pos) = 0;\ - while (((_pos) < (BFI_IBIDX_MAX_SEGSIZE)) &&\ - ((1 << (_pos)) & (_mask)))\ - (_pos)++;\ -} while (0) - -#define bna_ib_count_ibidx(_mask, _count)\ -do {\ - int pos = 0;\ - (_count) = 0;\ - while (pos < (BFI_IBIDX_MAX_SEGSIZE)) {\ - if ((1 << pos) & (_mask))\ - (_count) = pos + 1;\ - pos++;\ - } \ -} while (0) - -#define bna_ib_select_segpool(_count, _q_idx)\ -do {\ - int i;\ - (_q_idx) = -1;\ - for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {\ - if ((_count <= ibidx_pool[i].pool_entry_size)) {\ - (_q_idx) = i;\ - break;\ - } \ - } \ -} while (0) - -struct bna_ibidx_pool { - int pool_size; - int pool_entry_size; -}; -init_ibidx_pool(ibidx_pool); - -static struct bna_intr * -bna_intr_get(struct bna_ib_mod *ib_mod, enum bna_intr_type intr_type, - int vector) -{ - struct bna_intr *intr; - struct list_head *qe; - - list_for_each(qe, &ib_mod->intr_active_q) { - intr = (struct bna_intr *)qe; - - if ((intr->intr_type == intr_type) && - (intr->vector == vector)) { - intr->ref_count++; - return intr; - } - } - - if (list_empty(&ib_mod->intr_free_q)) - return NULL; - - bfa_q_deq(&ib_mod->intr_free_q, &intr); - bfa_q_qe_init(&intr->qe); - - intr->ref_count = 1; - intr->intr_type = intr_type; - intr->vector = vector; - - list_add_tail(&intr->qe, &ib_mod->intr_active_q); - - return intr; -} - -static void -bna_intr_put(struct bna_ib_mod *ib_mod, - struct bna_intr *intr) -{ - intr->ref_count--; - - if (intr->ref_count == 0) { - intr->ib = NULL; - list_del(&intr->qe); - bfa_q_qe_init(&intr->qe); - list_add_tail(&intr->qe, &ib_mod->intr_free_q); - } -} - -void -bna_ib_mod_init(struct bna_ib_mod *ib_mod, struct bna *bna, - struct bna_res_info *res_info) -{ - int i; - int j; - int count; - u8 offset; - struct bna_doorbell_qset *qset; - unsigned long off; - - ib_mod->bna = bna; - - ib_mod->ib = (struct bna_ib *) - res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.mdl[0].kva; - ib_mod->intr = (struct bna_intr *) - res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.mdl[0].kva; - ib_mod->idx_seg = (struct bna_ibidx_seg *) - res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.mdl[0].kva; - - INIT_LIST_HEAD(&ib_mod->ib_free_q); - INIT_LIST_HEAD(&ib_mod->intr_free_q); - INIT_LIST_HEAD(&ib_mod->intr_active_q); - - for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) - INIT_LIST_HEAD(&ib_mod->ibidx_seg_pool[i]); - - for (i = 0; i < BFI_MAX_IB; i++) { - ib_mod->ib[i].ib_id = i; - - ib_mod->ib[i].ib_seg_host_addr_kva = - res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva; - ib_mod->ib[i].ib_seg_host_addr.lsb = - res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb; - ib_mod->ib[i].ib_seg_host_addr.msb = - res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb; - - qset = (struct bna_doorbell_qset *)0; - off = (unsigned long)(&qset[i >> 1].ib0[(i & 0x1) - * (0x20 >> 2)]); - ib_mod->ib[i].door_bell.doorbell_addr = off + - BNA_GET_DOORBELL_BASE_ADDR(bna->pcidev.pci_bar_kva); - - bfa_q_qe_init(&ib_mod->ib[i].qe); - list_add_tail(&ib_mod->ib[i].qe, &ib_mod->ib_free_q); - - bfa_q_qe_init(&ib_mod->intr[i].qe); - list_add_tail(&ib_mod->intr[i].qe, &ib_mod->intr_free_q); - } - - count = 0; - offset = 0; - for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) { - for (j = 0; j < ibidx_pool[i].pool_size; j++) { - bfa_q_qe_init(&ib_mod->idx_seg[count]); - ib_mod->idx_seg[count].ib_seg_size = - ibidx_pool[i].pool_entry_size; - ib_mod->idx_seg[count].ib_idx_tbl_offset = offset; - list_add_tail(&ib_mod->idx_seg[count].qe, - &ib_mod->ibidx_seg_pool[i]); - count++; - offset += ibidx_pool[i].pool_entry_size; - } - } -} - -void -bna_ib_mod_uninit(struct bna_ib_mod *ib_mod) -{ - int i; - int j; - struct list_head *qe; - - i = 0; - list_for_each(qe, &ib_mod->ib_free_q) - i++; - - i = 0; - list_for_each(qe, &ib_mod->intr_free_q) - i++; - - for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) { - j = 0; - list_for_each(qe, &ib_mod->ibidx_seg_pool[i]) - j++; - } - - ib_mod->bna = NULL; -} - -static struct bna_ib * -bna_ib_get(struct bna_ib_mod *ib_mod, - enum bna_intr_type intr_type, - int vector) -{ - struct bna_ib *ib; - struct bna_intr *intr; - - if (intr_type == BNA_INTR_T_INTX) - vector = (1 << vector); - - intr = bna_intr_get(ib_mod, intr_type, vector); - if (intr == NULL) - return NULL; - - if (intr->ib) { - if (intr->ib->ref_count == BFI_IBIDX_MAX_SEGSIZE) { - bna_intr_put(ib_mod, intr); - return NULL; - } - intr->ib->ref_count++; - return intr->ib; - } - - if (list_empty(&ib_mod->ib_free_q)) { - bna_intr_put(ib_mod, intr); - return NULL; - } - - bfa_q_deq(&ib_mod->ib_free_q, &ib); - bfa_q_qe_init(&ib->qe); - - ib->ref_count = 1; - ib->start_count = 0; - ib->idx_mask = 0; - - ib->intr = intr; - ib->idx_seg = NULL; - intr->ib = ib; - - ib->bna = ib_mod->bna; - - return ib; -} - -static void -bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib) -{ - bna_intr_put(ib_mod, ib->intr); - - ib->ref_count--; - - if (ib->ref_count == 0) { - ib->intr = NULL; - ib->bna = NULL; - list_add_tail(&ib->qe, &ib_mod->ib_free_q); - } -} - -/* Returns index offset - starting from 0 */ -static int -bna_ib_reserve_idx(struct bna_ib *ib) -{ - struct bna_ib_mod *ib_mod = &ib->bna->ib_mod; - struct bna_ibidx_seg *idx_seg; - int idx; - int num_idx; - int q_idx; - - /* Find the first free index position */ - bna_ib_find_free_ibidx(ib->idx_mask, idx); - if (idx == BFI_IBIDX_MAX_SEGSIZE) - return -1; - - /* - * Calculate the total number of indexes held by this IB, - * including the index newly reserved above. - */ - bna_ib_count_ibidx((ib->idx_mask | (1 << idx)), num_idx); - - /* See if there is a free space in the index segment held by this IB */ - if (ib->idx_seg && (num_idx <= ib->idx_seg->ib_seg_size)) { - ib->idx_mask |= (1 << idx); - return idx; - } - - if (ib->start_count) - return -1; - - /* Allocate a new segment */ - bna_ib_select_segpool(num_idx, q_idx); - while (1) { - if (q_idx == BFI_IBIDX_TOTAL_POOLS) - return -1; - if (!list_empty(&ib_mod->ibidx_seg_pool[q_idx])) - break; - q_idx++; - } - bfa_q_deq(&ib_mod->ibidx_seg_pool[q_idx], &idx_seg); - bfa_q_qe_init(&idx_seg->qe); - - /* Free the old segment */ - if (ib->idx_seg) { - bna_ib_select_segpool(ib->idx_seg->ib_seg_size, q_idx); - list_add_tail(&ib->idx_seg->qe, &ib_mod->ibidx_seg_pool[q_idx]); - } - - ib->idx_seg = idx_seg; - - ib->idx_mask |= (1 << idx); - - return idx; -} - -static void -bna_ib_release_idx(struct bna_ib *ib, int idx) -{ - struct bna_ib_mod *ib_mod = &ib->bna->ib_mod; - struct bna_ibidx_seg *idx_seg; - int num_idx; - int cur_q_idx; - int new_q_idx; - - ib->idx_mask &= ~(1 << idx); - - if (ib->start_count) - return; - - bna_ib_count_ibidx(ib->idx_mask, num_idx); - - /* - * Free the segment, if there are no more indexes in the segment - * held by this IB - */ - if (!num_idx) { - bna_ib_select_segpool(ib->idx_seg->ib_seg_size, cur_q_idx); - list_add_tail(&ib->idx_seg->qe, - &ib_mod->ibidx_seg_pool[cur_q_idx]); - ib->idx_seg = NULL; - return; - } - - /* See if we can move to a smaller segment */ - bna_ib_select_segpool(num_idx, new_q_idx); - bna_ib_select_segpool(ib->idx_seg->ib_seg_size, cur_q_idx); - while (new_q_idx < cur_q_idx) { - if (!list_empty(&ib_mod->ibidx_seg_pool[new_q_idx])) - break; - new_q_idx++; - } - if (new_q_idx < cur_q_idx) { - /* Select the new smaller segment */ - bfa_q_deq(&ib_mod->ibidx_seg_pool[new_q_idx], &idx_seg); - bfa_q_qe_init(&idx_seg->qe); - /* Free the old segment */ - list_add_tail(&ib->idx_seg->qe, - &ib_mod->ibidx_seg_pool[cur_q_idx]); - ib->idx_seg = idx_seg; - } -} - -static int -bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config) -{ - if (ib->start_count) - return -1; - - ib->ib_config.coalescing_timeo = ib_config->coalescing_timeo; - ib->ib_config.interpkt_timeo = ib_config->interpkt_timeo; - ib->ib_config.interpkt_count = ib_config->interpkt_count; - ib->ib_config.ctrl_flags = ib_config->ctrl_flags; - - ib->ib_config.ctrl_flags |= BFI_IB_CF_MASTER_ENABLE; - if (ib->intr->intr_type == BNA_INTR_T_MSIX) - ib->ib_config.ctrl_flags |= BFI_IB_CF_MSIX_MODE; - - return 0; -} - -static void -bna_ib_start(struct bna_ib *ib) -{ - struct bna_ib_blk_mem ib_cfg; - struct bna_ib_blk_mem *ib_mem; - u32 pg_num; - u32 intx_mask; - int i; - void __iomem *base_addr; - unsigned long off; - - ib->start_count++; - - if (ib->start_count > 1) - return; - - ib_cfg.host_addr_lo = (u32)(ib->ib_seg_host_addr.lsb); - ib_cfg.host_addr_hi = (u32)(ib->ib_seg_host_addr.msb); - - ib_cfg.clsc_n_ctrl_n_msix = (((u32) - ib->ib_config.coalescing_timeo << 16) | - ((u32)ib->ib_config.ctrl_flags << 8) | - (ib->intr->vector)); - ib_cfg.ipkt_n_ent_n_idxof = - ((u32) - (ib->ib_config.interpkt_timeo & 0xf) << 16) | - ((u32)ib->idx_seg->ib_seg_size << 8) | - (ib->idx_seg->ib_idx_tbl_offset); - ib_cfg.ipkt_cnt_cfg_n_unacked = ((u32) - ib->ib_config.interpkt_count << 24); - - pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + ib->bna->port_num, - HQM_IB_RAM_BASE_OFFSET); - writel(pg_num, ib->bna->regs.page_addr); - - base_addr = BNA_GET_MEM_BASE_ADDR(ib->bna->pcidev.pci_bar_kva, - HQM_IB_RAM_BASE_OFFSET); - - ib_mem = (struct bna_ib_blk_mem *)0; - off = (unsigned long)&ib_mem[ib->ib_id].host_addr_lo; - writel(htonl(ib_cfg.host_addr_lo), base_addr + off); - - off = (unsigned long)&ib_mem[ib->ib_id].host_addr_hi; - writel(htonl(ib_cfg.host_addr_hi), base_addr + off); - - off = (unsigned long)&ib_mem[ib->ib_id].clsc_n_ctrl_n_msix; - writel(ib_cfg.clsc_n_ctrl_n_msix, base_addr + off); - - off = (unsigned long)&ib_mem[ib->ib_id].ipkt_n_ent_n_idxof; - writel(ib_cfg.ipkt_n_ent_n_idxof, base_addr + off); - - off = (unsigned long)&ib_mem[ib->ib_id].ipkt_cnt_cfg_n_unacked; - writel(ib_cfg.ipkt_cnt_cfg_n_unacked, base_addr + off); - - ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK( - (u32)ib->ib_config.coalescing_timeo, 0); - - pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + ib->bna->port_num, - HQM_INDX_TBL_RAM_BASE_OFFSET); - writel(pg_num, ib->bna->regs.page_addr); - - base_addr = BNA_GET_MEM_BASE_ADDR(ib->bna->pcidev.pci_bar_kva, - HQM_INDX_TBL_RAM_BASE_OFFSET); - for (i = 0; i < ib->idx_seg->ib_seg_size; i++) { - off = (unsigned long) - ((ib->idx_seg->ib_idx_tbl_offset + i) * BFI_IBIDX_SIZE); - writel(0, base_addr + off); - } - - if (ib->intr->intr_type == BNA_INTR_T_INTX) { - bna_intx_disable(ib->bna, intx_mask); - intx_mask &= ~(ib->intr->vector); - bna_intx_enable(ib->bna, intx_mask); - } -} - -static void -bna_ib_stop(struct bna_ib *ib) -{ - u32 intx_mask; - - ib->start_count--; - - if (ib->start_count == 0) { - writel(BNA_DOORBELL_IB_INT_DISABLE, - ib->door_bell.doorbell_addr); - if (ib->intr->intr_type == BNA_INTR_T_INTX) { - bna_intx_disable(ib->bna, intx_mask); - intx_mask |= (ib->intr->vector); - bna_intx_enable(ib->bna, intx_mask); - } - } -} - -static void -bna_ib_fail(struct bna_ib *ib) -{ - ib->start_count = 0; -} - -/** - * RXF - */ -static void rxf_enable(struct bna_rxf *rxf); -static void rxf_disable(struct bna_rxf *rxf); -static void __rxf_config_set(struct bna_rxf *rxf); -static void __rxf_rit_set(struct bna_rxf *rxf); -static void __bna_rxf_stat_clr(struct bna_rxf *rxf); -static int rxf_process_packet_filter(struct bna_rxf *rxf); -static int rxf_clear_packet_filter(struct bna_rxf *rxf); -static void rxf_reset_packet_filter(struct bna_rxf *rxf); -static void rxf_cb_enabled(void *arg, int status); -static void rxf_cb_disabled(void *arg, int status); -static void bna_rxf_cb_stats_cleared(void *arg, int status); -static void __rxf_enable(struct bna_rxf *rxf); -static void __rxf_disable(struct bna_rxf *rxf); - -bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf, - enum bna_rxf_event); -bfa_fsm_state_decl(bna_rxf, start_wait, struct bna_rxf, - enum bna_rxf_event); -bfa_fsm_state_decl(bna_rxf, cam_fltr_mod_wait, struct bna_rxf, - enum bna_rxf_event); -bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf, - enum bna_rxf_event); -bfa_fsm_state_decl(bna_rxf, cam_fltr_clr_wait, struct bna_rxf, - enum bna_rxf_event); -bfa_fsm_state_decl(bna_rxf, stop_wait, struct bna_rxf, - enum bna_rxf_event); -bfa_fsm_state_decl(bna_rxf, pause_wait, struct bna_rxf, - enum bna_rxf_event); -bfa_fsm_state_decl(bna_rxf, resume_wait, struct bna_rxf, - enum bna_rxf_event); -bfa_fsm_state_decl(bna_rxf, stat_clr_wait, struct bna_rxf, - enum bna_rxf_event); - -static struct bfa_sm_table rxf_sm_table[] = { - {BFA_SM(bna_rxf_sm_stopped), BNA_RXF_STOPPED}, - {BFA_SM(bna_rxf_sm_start_wait), BNA_RXF_START_WAIT}, - {BFA_SM(bna_rxf_sm_cam_fltr_mod_wait), BNA_RXF_CAM_FLTR_MOD_WAIT}, - {BFA_SM(bna_rxf_sm_started), BNA_RXF_STARTED}, - {BFA_SM(bna_rxf_sm_cam_fltr_clr_wait), BNA_RXF_CAM_FLTR_CLR_WAIT}, - {BFA_SM(bna_rxf_sm_stop_wait), BNA_RXF_STOP_WAIT}, - {BFA_SM(bna_rxf_sm_pause_wait), BNA_RXF_PAUSE_WAIT}, - {BFA_SM(bna_rxf_sm_resume_wait), BNA_RXF_RESUME_WAIT}, - {BFA_SM(bna_rxf_sm_stat_clr_wait), BNA_RXF_STAT_CLR_WAIT} -}; - -static void -bna_rxf_sm_stopped_entry(struct bna_rxf *rxf) -{ - call_rxf_stop_cbfn(rxf, BNA_CB_SUCCESS); -} - -static void -bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event) -{ - switch (event) { - case RXF_E_START: - bfa_fsm_set_state(rxf, bna_rxf_sm_start_wait); - break; - - case RXF_E_STOP: - bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); - break; - - case RXF_E_FAIL: - /* No-op */ - break; - - case RXF_E_CAM_FLTR_MOD: - call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS); - break; - - case RXF_E_STARTED: - case RXF_E_STOPPED: - case RXF_E_CAM_FLTR_RESP: - /** - * These events are received due to flushing of mbox - * when device fails - */ - /* No-op */ - break; - - case RXF_E_PAUSE: - rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED; - call_rxf_pause_cbfn(rxf, BNA_CB_SUCCESS); - break; - - case RXF_E_RESUME: - rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING; - call_rxf_resume_cbfn(rxf, BNA_CB_SUCCESS); - break; - - default: - bfa_sm_fault(event); - } -} - -static void -bna_rxf_sm_start_wait_entry(struct bna_rxf *rxf) -{ - __rxf_config_set(rxf); - __rxf_rit_set(rxf); - rxf_enable(rxf); -} - -static void -bna_rxf_sm_start_wait(struct bna_rxf *rxf, enum bna_rxf_event event) -{ - switch (event) { - case RXF_E_STOP: - /** - * STOP is originated from bnad. When this happens, - * it can not be waiting for filter update - */ - call_rxf_start_cbfn(rxf, BNA_CB_INTERRUPT); - bfa_fsm_set_state(rxf, bna_rxf_sm_stop_wait); - break; - - case RXF_E_FAIL: - call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS); - call_rxf_start_cbfn(rxf, BNA_CB_FAIL); - bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); - break; - - case RXF_E_CAM_FLTR_MOD: - /* No-op */ - break; - - case RXF_E_STARTED: - /** - * Force rxf_process_filter() to go through initial - * config - */ - if ((rxf->ucast_active_mac != NULL) && - (rxf->ucast_pending_set == 0)) - rxf->ucast_pending_set = 1; - - if (rxf->rss_status == BNA_STATUS_T_ENABLED) - rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING; - - rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING; - - bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_mod_wait); - break; - - case RXF_E_PAUSE: - case RXF_E_RESUME: - rxf->rxf_flags |= BNA_RXF_FL_OPERSTATE_CHANGED; - break; - - default: - bfa_sm_fault(event); - } -} - -static void -bna_rxf_sm_cam_fltr_mod_wait_entry(struct bna_rxf *rxf) -{ - if (!rxf_process_packet_filter(rxf)) { - /* No more pending CAM entries to update */ - bfa_fsm_set_state(rxf, bna_rxf_sm_started); - } -} - -static void -bna_rxf_sm_cam_fltr_mod_wait(struct bna_rxf *rxf, enum bna_rxf_event event) -{ - switch (event) { - case RXF_E_STOP: - /** - * STOP is originated from bnad. When this happens, - * it can not be waiting for filter update - */ - call_rxf_start_cbfn(rxf, BNA_CB_INTERRUPT); - bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_clr_wait); - break; - - case RXF_E_FAIL: - rxf_reset_packet_filter(rxf); - call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS); - call_rxf_start_cbfn(rxf, BNA_CB_FAIL); - bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); - break; - - case RXF_E_CAM_FLTR_MOD: - /* No-op */ - break; - - case RXF_E_CAM_FLTR_RESP: - if (!rxf_process_packet_filter(rxf)) { - /* No more pending CAM entries to update */ - call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS); - bfa_fsm_set_state(rxf, bna_rxf_sm_started); - } - break; - - case RXF_E_PAUSE: - case RXF_E_RESUME: - rxf->rxf_flags |= BNA_RXF_FL_OPERSTATE_CHANGED; - break; - - default: - bfa_sm_fault(event); - } -} - -static void -bna_rxf_sm_started_entry(struct bna_rxf *rxf) -{ - call_rxf_start_cbfn(rxf, BNA_CB_SUCCESS); - - if (rxf->rxf_flags & BNA_RXF_FL_OPERSTATE_CHANGED) { - if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED) - bfa_fsm_send_event(rxf, RXF_E_PAUSE); - else - bfa_fsm_send_event(rxf, RXF_E_RESUME); - } - -} - -static void -bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event) -{ - switch (event) { - case RXF_E_STOP: - bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_clr_wait); - /* Hack to get FSM start clearing CAM entries */ - bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_RESP); - break; - - case RXF_E_FAIL: - rxf_reset_packet_filter(rxf); - bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); - break; - - case RXF_E_CAM_FLTR_MOD: - bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_mod_wait); - break; - - case RXF_E_PAUSE: - bfa_fsm_set_state(rxf, bna_rxf_sm_pause_wait); - break; - - case RXF_E_RESUME: - bfa_fsm_set_state(rxf, bna_rxf_sm_resume_wait); - break; - - default: - bfa_sm_fault(event); - } -} - -static void -bna_rxf_sm_cam_fltr_clr_wait_entry(struct bna_rxf *rxf) -{ - /** - * Note: Do not add rxf_clear_packet_filter here. - * It will overstep mbox when this transition happens: - * cam_fltr_mod_wait -> cam_fltr_clr_wait on RXF_E_STOP event - */ -} - -static void -bna_rxf_sm_cam_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event) -{ - switch (event) { - case RXF_E_FAIL: - /** - * FSM was in the process of stopping, initiated by - * bnad. When this happens, no one can be waiting for - * start or filter update - */ - rxf_reset_packet_filter(rxf); - bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); - break; - - case RXF_E_CAM_FLTR_RESP: - if (!rxf_clear_packet_filter(rxf)) { - /* No more pending CAM entries to clear */ - bfa_fsm_set_state(rxf, bna_rxf_sm_stop_wait); - rxf_disable(rxf); - } - break; - - default: - bfa_sm_fault(event); - } -} - -static void -bna_rxf_sm_stop_wait_entry(struct bna_rxf *rxf) -{ - /** - * NOTE: Do not add rxf_disable here. - * It will overstep mbox when this transition happens: - * start_wait -> stop_wait on RXF_E_STOP event - */ -} - -static void -bna_rxf_sm_stop_wait(struct bna_rxf *rxf, enum bna_rxf_event event) -{ - switch (event) { - case RXF_E_FAIL: - /** - * FSM was in the process of stopping, initiated by - * bnad. When this happens, no one can be waiting for - * start or filter update - */ - bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); - break; - - case RXF_E_STARTED: - /** - * This event is received due to abrupt transition from - * bna_rxf_sm_start_wait state on receiving - * RXF_E_STOP event - */ - rxf_disable(rxf); - break; - - case RXF_E_STOPPED: - /** - * FSM was in the process of stopping, initiated by - * bnad. When this happens, no one can be waiting for - * start or filter update - */ - bfa_fsm_set_state(rxf, bna_rxf_sm_stat_clr_wait); - break; - - case RXF_E_PAUSE: - rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED; - break; - - case RXF_E_RESUME: - rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING; - break; - - default: - bfa_sm_fault(event); - } -} - -static void -bna_rxf_sm_pause_wait_entry(struct bna_rxf *rxf) -{ - rxf->rxf_flags &= - ~(BNA_RXF_FL_OPERSTATE_CHANGED | BNA_RXF_FL_RXF_ENABLED); - __rxf_disable(rxf); -} - -static void -bna_rxf_sm_pause_wait(struct bna_rxf *rxf, enum bna_rxf_event event) -{ - switch (event) { - case RXF_E_FAIL: - /** - * FSM was in the process of disabling rxf, initiated by - * bnad. - */ - call_rxf_pause_cbfn(rxf, BNA_CB_FAIL); - bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); - break; - - case RXF_E_STOPPED: - rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED; - call_rxf_pause_cbfn(rxf, BNA_CB_SUCCESS); - bfa_fsm_set_state(rxf, bna_rxf_sm_started); - break; - - /* - * Since PAUSE/RESUME can only be sent by bnad, we don't expect - * any other event during these states - */ - default: - bfa_sm_fault(event); - } -} - -static void -bna_rxf_sm_resume_wait_entry(struct bna_rxf *rxf) -{ - rxf->rxf_flags &= ~(BNA_RXF_FL_OPERSTATE_CHANGED); - rxf->rxf_flags |= BNA_RXF_FL_RXF_ENABLED; - __rxf_enable(rxf); -} - -static void -bna_rxf_sm_resume_wait(struct bna_rxf *rxf, enum bna_rxf_event event) -{ - switch (event) { - case RXF_E_FAIL: - /** - * FSM was in the process of disabling rxf, initiated by - * bnad. - */ - call_rxf_resume_cbfn(rxf, BNA_CB_FAIL); - bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); - break; - - case RXF_E_STARTED: - rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING; - call_rxf_resume_cbfn(rxf, BNA_CB_SUCCESS); - bfa_fsm_set_state(rxf, bna_rxf_sm_started); - break; - - /* - * Since PAUSE/RESUME can only be sent by bnad, we don't expect - * any other event during these states - */ - default: - bfa_sm_fault(event); - } -} - -static void -bna_rxf_sm_stat_clr_wait_entry(struct bna_rxf *rxf) -{ - __bna_rxf_stat_clr(rxf); -} - -static void -bna_rxf_sm_stat_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event) -{ - switch (event) { - case RXF_E_FAIL: - case RXF_E_STAT_CLEARED: - bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); - break; - - default: - bfa_sm_fault(event); - } -} - -static void -__rxf_enable(struct bna_rxf *rxf) -{ - struct bfi_ll_rxf_multi_req ll_req; - u32 bm[2] = {0, 0}; - - if (rxf->rxf_id < 32) - bm[0] = 1 << rxf->rxf_id; - else - bm[1] = 1 << (rxf->rxf_id - 32); - - bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RX_REQ, 0); - ll_req.rxf_id_mask[0] = htonl(bm[0]); - ll_req.rxf_id_mask[1] = htonl(bm[1]); - ll_req.enable = 1; - - bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req), - rxf_cb_enabled, rxf); - - bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe); -} - -static void -__rxf_disable(struct bna_rxf *rxf) -{ - struct bfi_ll_rxf_multi_req ll_req; - u32 bm[2] = {0, 0}; - - if (rxf->rxf_id < 32) - bm[0] = 1 << rxf->rxf_id; - else - bm[1] = 1 << (rxf->rxf_id - 32); - - bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RX_REQ, 0); - ll_req.rxf_id_mask[0] = htonl(bm[0]); - ll_req.rxf_id_mask[1] = htonl(bm[1]); - ll_req.enable = 0; - - bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req), - rxf_cb_disabled, rxf); - - bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe); -} - -static void -__rxf_config_set(struct bna_rxf *rxf) -{ - u32 i; - struct bna_rss_mem *rss_mem; - struct bna_rx_fndb_ram *rx_fndb_ram; - struct bna *bna = rxf->rx->bna; - void __iomem *base_addr; - unsigned long off; - - base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva, - RSS_TABLE_BASE_OFFSET); - - rss_mem = (struct bna_rss_mem *)0; - - /* Configure RSS if required */ - if (rxf->ctrl_flags & BNA_RXF_CF_RSS_ENABLE) { - /* configure RSS Table */ - writel(BNA_GET_PAGE_NUM(RAD0_MEM_BLK_BASE_PG_NUM + - bna->port_num, RSS_TABLE_BASE_OFFSET), - bna->regs.page_addr); - - /* temporarily disable RSS, while hash value is written */ - off = (unsigned long)&rss_mem[0].type_n_hash; - writel(0, base_addr + off); - - for (i = 0; i < BFI_RSS_HASH_KEY_LEN; i++) { - off = (unsigned long) - &rss_mem[0].hash_key[(BFI_RSS_HASH_KEY_LEN - 1) - i]; - writel(htonl(rxf->rss_cfg.toeplitz_hash_key[i]), - base_addr + off); - } - - off = (unsigned long)&rss_mem[0].type_n_hash; - writel(rxf->rss_cfg.hash_type | rxf->rss_cfg.hash_mask, - base_addr + off); - } - - /* Configure RxF */ - writel(BNA_GET_PAGE_NUM( - LUT0_MEM_BLK_BASE_PG_NUM + (bna->port_num * 2), - RX_FNDB_RAM_BASE_OFFSET), - bna->regs.page_addr); - - base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva, - RX_FNDB_RAM_BASE_OFFSET); - - rx_fndb_ram = (struct bna_rx_fndb_ram *)0; - - /* We always use RSS table 0 */ - off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].rss_prop; - writel(rxf->ctrl_flags & BNA_RXF_CF_RSS_ENABLE, - base_addr + off); - - /* small large buffer enable/disable */ - off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].size_routing_props; - writel((rxf->ctrl_flags & BNA_RXF_CF_SM_LG_RXQ) | 0x80, - base_addr + off); - - /* RIT offset, HDS forced offset, multicast RxQ Id */ - off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].rit_hds_mcastq; - writel((rxf->rit_segment->rit_offset << 16) | - (rxf->forced_offset << 8) | - (rxf->hds_cfg.hdr_type & BNA_HDS_FORCED) | rxf->mcast_rxq_id, - base_addr + off); - - /* - * default vlan tag, default function enable, strip vlan bytes, - * HDS type, header size - */ - - off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].control_flags; - writel(((u32)rxf->default_vlan_tag << 16) | - (rxf->ctrl_flags & - (BNA_RXF_CF_DEFAULT_VLAN | - BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE | - BNA_RXF_CF_VLAN_STRIP)) | - (rxf->hds_cfg.hdr_type & ~BNA_HDS_FORCED) | - rxf->hds_cfg.header_size, - base_addr + off); -} - -void -__rxf_vlan_filter_set(struct bna_rxf *rxf, enum bna_status status) -{ - struct bna *bna = rxf->rx->bna; - int i; - - writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM + - (bna->port_num * 2), VLAN_RAM_BASE_OFFSET), - bna->regs.page_addr); - - if (status == BNA_STATUS_T_ENABLED) { - /* enable VLAN filtering on this function */ - for (i = 0; i <= BFI_MAX_VLAN / 32; i++) { - writel(rxf->vlan_filter_table[i], - BNA_GET_VLAN_MEM_ENTRY_ADDR - (bna->pcidev.pci_bar_kva, rxf->rxf_id, - i * 32)); - } - } else { - /* disable VLAN filtering on this function */ - for (i = 0; i <= BFI_MAX_VLAN / 32; i++) { - writel(0xffffffff, - BNA_GET_VLAN_MEM_ENTRY_ADDR - (bna->pcidev.pci_bar_kva, rxf->rxf_id, - i * 32)); - } - } -} - -static void -__rxf_rit_set(struct bna_rxf *rxf) -{ - struct bna *bna = rxf->rx->bna; - struct bna_rit_mem *rit_mem; - int i; - void __iomem *base_addr; - unsigned long off; - - base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva, - FUNCTION_TO_RXQ_TRANSLATE); - - rit_mem = (struct bna_rit_mem *)0; - - writel(BNA_GET_PAGE_NUM(RXA0_MEM_BLK_BASE_PG_NUM + bna->port_num, - FUNCTION_TO_RXQ_TRANSLATE), - bna->regs.page_addr); - - for (i = 0; i < rxf->rit_segment->rit_size; i++) { - off = (unsigned long)&rit_mem[i + rxf->rit_segment->rit_offset]; - writel(rxf->rit_segment->rit[i].large_rxq_id << 6 | - rxf->rit_segment->rit[i].small_rxq_id, - base_addr + off); - } -} - -static void -__bna_rxf_stat_clr(struct bna_rxf *rxf) -{ - struct bfi_ll_stats_req ll_req; - u32 bm[2] = {0, 0}; - - if (rxf->rxf_id < 32) - bm[0] = 1 << rxf->rxf_id; - else - bm[1] = 1 << (rxf->rxf_id - 32); - - bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0); - ll_req.stats_mask = 0; - ll_req.txf_id_mask[0] = 0; - ll_req.txf_id_mask[1] = 0; - - ll_req.rxf_id_mask[0] = htonl(bm[0]); - ll_req.rxf_id_mask[1] = htonl(bm[1]); - - bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req), - bna_rxf_cb_stats_cleared, rxf); - bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe); -} - -static void -rxf_enable(struct bna_rxf *rxf) -{ - if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED) - bfa_fsm_send_event(rxf, RXF_E_STARTED); - else { - rxf->rxf_flags |= BNA_RXF_FL_RXF_ENABLED; - __rxf_enable(rxf); - } -} - -static void -rxf_cb_enabled(void *arg, int status) -{ - struct bna_rxf *rxf = (struct bna_rxf *)arg; - - bfa_q_qe_init(&rxf->mbox_qe.qe); - bfa_fsm_send_event(rxf, RXF_E_STARTED); -} - -static void -rxf_disable(struct bna_rxf *rxf) -{ - if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED) - bfa_fsm_send_event(rxf, RXF_E_STOPPED); - else - rxf->rxf_flags &= ~BNA_RXF_FL_RXF_ENABLED; - __rxf_disable(rxf); -} - -static void -rxf_cb_disabled(void *arg, int status) -{ - struct bna_rxf *rxf = (struct bna_rxf *)arg; - - bfa_q_qe_init(&rxf->mbox_qe.qe); - bfa_fsm_send_event(rxf, RXF_E_STOPPED); -} - -void -rxf_cb_cam_fltr_mbox_cmd(void *arg, int status) -{ - struct bna_rxf *rxf = (struct bna_rxf *)arg; - - bfa_q_qe_init(&rxf->mbox_qe.qe); - - bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_RESP); -} - -static void -bna_rxf_cb_stats_cleared(void *arg, int status) -{ - struct bna_rxf *rxf = (struct bna_rxf *)arg; - - bfa_q_qe_init(&rxf->mbox_qe.qe); - bfa_fsm_send_event(rxf, RXF_E_STAT_CLEARED); -} - -void -rxf_cam_mbox_cmd(struct bna_rxf *rxf, u8 cmd, - const struct bna_mac *mac_addr) -{ - struct bfi_ll_mac_addr_req req; - - bfi_h2i_set(req.mh, BFI_MC_LL, cmd, 0); - - req.rxf_id = rxf->rxf_id; - memcpy(&req.mac_addr, (void *)&mac_addr->addr, ETH_ALEN); - - bna_mbox_qe_fill(&rxf->mbox_qe, &req, sizeof(req), - rxf_cb_cam_fltr_mbox_cmd, rxf); - - bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe); -} - -static int -rxf_process_packet_filter_mcast(struct bna_rxf *rxf) -{ - struct bna_mac *mac = NULL; - struct list_head *qe; - - /* Add multicast entries */ - if (!list_empty(&rxf->mcast_pending_add_q)) { - bfa_q_deq(&rxf->mcast_pending_add_q, &qe); - bfa_q_qe_init(qe); - mac = (struct bna_mac *)qe; - rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_ADD_REQ, mac); - list_add_tail(&mac->qe, &rxf->mcast_active_q); - return 1; - } - - /* Delete multicast entries previousely added */ - if (!list_empty(&rxf->mcast_pending_del_q)) { - bfa_q_deq(&rxf->mcast_pending_del_q, &qe); - bfa_q_qe_init(qe); - mac = (struct bna_mac *)qe; - rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac); - bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); - return 1; - } - - return 0; -} - -static int -rxf_process_packet_filter_vlan(struct bna_rxf *rxf) -{ - /* Apply the VLAN filter */ - if (rxf->rxf_flags & BNA_RXF_FL_VLAN_CONFIG_PENDING) { - rxf->rxf_flags &= ~BNA_RXF_FL_VLAN_CONFIG_PENDING; - if (!(rxf->rxmode_active & BNA_RXMODE_PROMISC)) - __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status); - } - - /* Apply RSS configuration */ - if (rxf->rxf_flags & BNA_RXF_FL_RSS_CONFIG_PENDING) { - rxf->rxf_flags &= ~BNA_RXF_FL_RSS_CONFIG_PENDING; - if (rxf->rss_status == BNA_STATUS_T_DISABLED) { - /* RSS is being disabled */ - rxf->ctrl_flags &= ~BNA_RXF_CF_RSS_ENABLE; |