aboutsummaryrefslogtreecommitdiff
path: root/drivers/scsi/bfa/bfa_ioc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/bfa/bfa_ioc.c')
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c1888
1 files changed, 1349 insertions, 539 deletions
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 8e78f20110a..6795b247791 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
@@ -15,35 +15,33 @@
* General Public License for more details.
*/
-#include <bfa.h>
-#include <bfa_ioc.h>
-#include <bfa_fwimg_priv.h>
-#include <cna/bfa_cna_trcmod.h>
-#include <cs/bfa_debug.h>
-#include <bfi/bfi_ioc.h>
-#include <bfi/bfi_ctreg.h>
-#include <aen/bfa_aen_ioc.h>
-#include <aen/bfa_aen.h>
-#include <log/bfa_log_hal.h>
-#include <defs/bfa_defs_pci.h>
+#include "bfa_ioc.h"
+#include "bfi_ctreg.h"
+#include "bfa_defs.h"
+#include "bfa_defs_svc.h"
+#include "bfad_drv.h"
BFA_TRC_FILE(CNA, IOC);
/**
* IOC local definitions
*/
-#define BFA_IOC_TOV 2000 /* msecs */
-#define BFA_IOC_HWSEM_TOV 500 /* msecs */
-#define BFA_IOC_HB_TOV 500 /* msecs */
-#define BFA_IOC_HWINIT_MAX 2
-#define BFA_IOC_FWIMG_MINSZ (16 * 1024)
-#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
+#define BFA_IOC_TOV 3000 /* msecs */
+#define BFA_IOC_HWSEM_TOV 500 /* msecs */
+#define BFA_IOC_HB_TOV 500 /* msecs */
+#define BFA_IOC_HWINIT_MAX 2
+#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
#define bfa_ioc_timer_start(__ioc) \
bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
#define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
+#define bfa_hb_timer_start(__ioc) \
+ bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \
+ bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
+#define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer)
+
#define BFA_DBG_FWTRC_ENTS (BFI_IOC_TRC_ENTS)
#define BFA_DBG_FWTRC_LEN \
(BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) + \
@@ -55,100 +53,226 @@ BFA_TRC_FILE(CNA, IOC);
* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
*/
-#define bfa_ioc_firmware_lock(__ioc) \
+#define bfa_ioc_firmware_lock(__ioc) \
((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
-#define bfa_ioc_firmware_unlock(__ioc) \
+#define bfa_ioc_firmware_unlock(__ioc) \
((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
-#define bfa_ioc_notify_hbfail(__ioc) \
+#define bfa_ioc_notify_hbfail(__ioc) \
((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
-#define bfa_ioc_is_optrom(__ioc) \
- (bfi_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
-bfa_boolean_t bfa_auto_recover = BFA_TRUE;
+#ifdef BFA_IOC_IS_UEFI
+#define bfa_ioc_is_bios_optrom(__ioc) (0)
+#define bfa_ioc_is_uefi(__ioc) BFA_IOC_IS_UEFI
+#else
+#define bfa_ioc_is_bios_optrom(__ioc) \
+ (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
+#define bfa_ioc_is_uefi(__ioc) (0)
+#endif
+
+#define bfa_ioc_mbox_cmd_pending(__ioc) \
+ (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
+ bfa_reg_read((__ioc)->ioc_regs.hfn_mbox_cmd))
+
+bfa_boolean_t bfa_auto_recover = BFA_TRUE;
/*
* forward declarations
*/
-static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
-static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc);
-static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
-static void bfa_ioc_timeout(void *ioc);
-static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
-static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
-static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
-static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
-static void bfa_ioc_hb_stop(struct bfa_ioc_s *ioc);
-static void bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force);
-static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
-static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
-static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
-static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
-static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
-static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
+static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
+static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc);
+static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
+static void bfa_ioc_timeout(void *ioc);
+static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
+static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
+static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
+static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
+static void bfa_ioc_hb_stop(struct bfa_ioc_s *ioc);
+static void bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force);
+static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
+static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
+static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
+static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
+static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
+static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
+static void bfa_ioc_pf_enabled(struct bfa_ioc_s *ioc);
+static void bfa_ioc_pf_disabled(struct bfa_ioc_s *ioc);
+static void bfa_ioc_pf_failed(struct bfa_ioc_s *ioc);
+static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
/**
- * bfa_ioc_sm
+ * hal_ioc_sm
*/
/**
- * IOC state machine events
+ * IOC state machine definitions/declarations
*/
enum ioc_event {
- IOC_E_ENABLE = 1, /* IOC enable request */
- IOC_E_DISABLE = 2, /* IOC disable request */
- IOC_E_TIMEOUT = 3, /* f/w response timeout */
- IOC_E_FWREADY = 4, /* f/w initialization done */
- IOC_E_FWRSP_GETATTR = 5, /* IOC get attribute response */
- IOC_E_FWRSP_ENABLE = 6, /* enable f/w response */
- IOC_E_FWRSP_DISABLE = 7, /* disable f/w response */
- IOC_E_HBFAIL = 8, /* heartbeat failure */
- IOC_E_HWERROR = 9, /* hardware error interrupt */
- IOC_E_SEMLOCKED = 10, /* h/w semaphore is locked */
- IOC_E_DETACH = 11, /* driver detach cleanup */
+ IOC_E_RESET = 1, /* IOC reset request */
+ IOC_E_ENABLE = 2, /* IOC enable request */
+ IOC_E_DISABLE = 3, /* IOC disable request */
+ IOC_E_DETACH = 4, /* driver detach cleanup */
+ IOC_E_ENABLED = 5, /* f/w enabled */
+ IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */
+ IOC_E_DISABLED = 7, /* f/w disabled */
+ IOC_E_FAILED = 8, /* failure notice by iocpf sm */
+ IOC_E_HBFAIL = 9, /* heartbeat failure */
+ IOC_E_HWERROR = 10, /* hardware error interrupt */
+ IOC_E_TIMEOUT = 11, /* timeout */
};
+bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
-bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc_s, enum ioc_event);
-bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc_s, enum ioc_event);
-bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc_s, enum ioc_event);
-bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc_s, enum ioc_event);
-bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
static struct bfa_sm_table_s ioc_sm_table[] = {
+ {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
{BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
- {BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH},
- {BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH},
- {BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT},
- {BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT},
- {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT},
+ {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
{BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
{BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
{BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL},
- {BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL},
+ {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
{BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
{BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
};
/**
+ * IOCPF state machine definitions/declarations
+ */
+
+#define bfa_iocpf_timer_start(__ioc) \
+ bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
+ bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
+#define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
+
+#define bfa_iocpf_recovery_timer_start(__ioc) \
+ bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
+ bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV_RECOVER)
+
+#define bfa_sem_timer_start(__ioc) \
+ bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
+ bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
+#define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer)
+
+/*
+ * Forward declareations for iocpf state machine
+ */
+static void bfa_iocpf_enable(struct bfa_ioc_s *ioc);
+static void bfa_iocpf_disable(struct bfa_ioc_s *ioc);
+static void bfa_iocpf_fail(struct bfa_ioc_s *ioc);
+static void bfa_iocpf_initfail(struct bfa_ioc_s *ioc);
+static void bfa_iocpf_getattrfail(struct bfa_ioc_s *ioc);
+static void bfa_iocpf_stop(struct bfa_ioc_s *ioc);
+static void bfa_iocpf_timeout(void *ioc_arg);
+static void bfa_iocpf_sem_timeout(void *ioc_arg);
+
+/**
+ * IOCPF state machine events
+ */
+enum iocpf_event {
+ IOCPF_E_ENABLE = 1, /* IOCPF enable request */
+ IOCPF_E_DISABLE = 2, /* IOCPF disable request */
+ IOCPF_E_STOP = 3, /* stop on driver detach */
+ IOCPF_E_FWREADY = 4, /* f/w initialization done */
+ IOCPF_E_FWRSP_ENABLE = 5, /* enable f/w response */
+ IOCPF_E_FWRSP_DISABLE = 6, /* disable f/w response */
+ IOCPF_E_FAIL = 7, /* failure notice by ioc sm */
+ IOCPF_E_INITFAIL = 8, /* init fail notice by ioc sm */
+ IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */
+ IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */
+ IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
+};
+
+/**
+ * IOCPF states
+ */
+enum bfa_iocpf_state {
+ BFA_IOCPF_RESET = 1, /* IOC is in reset state */
+ BFA_IOCPF_SEMWAIT = 2, /* Waiting for IOC h/w semaphore */
+ BFA_IOCPF_HWINIT = 3, /* IOC h/w is being initialized */
+ BFA_IOCPF_READY = 4, /* IOCPF is initialized */
+ BFA_IOCPF_INITFAIL = 5, /* IOCPF failed */
+ BFA_IOCPF_FAIL = 6, /* IOCPF failed */
+ BFA_IOCPF_DISABLING = 7, /* IOCPF is being disabled */
+ BFA_IOCPF_DISABLED = 8, /* IOCPF is disabled */
+ BFA_IOCPF_FWMISMATCH = 9, /* IOC f/w different from drivers */
+};
+
+bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
+
+static struct bfa_sm_table_s iocpf_sm_table[] = {
+ {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
+ {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
+ {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
+ {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
+ {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
+ {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
+ {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
+ {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
+ {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
+ {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
+ {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
+};
+
+/**
+ * IOC State Machine
+ */
+
+/**
+ * Beginning state. IOC uninit state.
+ */
+
+static void
+bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
+{
+}
+
+/**
+ * IOC is in uninit state.
+ */
+static void
+bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOC_E_RESET:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+/**
* Reset entry actions -- initialize state machine
*/
static void
bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
{
- ioc->retry_count = 0;
- ioc->auto_recover = bfa_auto_recover;
+ bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
}
/**
- * Beginning state. IOC is in reset state.
+ * IOC is in reset state.
*/
static void
bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
@@ -157,7 +281,7 @@ bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
switch (event) {
case IOC_E_ENABLE:
- bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
break;
case IOC_E_DISABLE:
@@ -165,6 +289,7 @@ bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
break;
case IOC_E_DETACH:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
break;
default:
@@ -172,46 +297,209 @@ bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
}
}
+
+static void
+bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
+{
+ bfa_iocpf_enable(ioc);
+}
+
/**
- * Semaphore should be acquired for version check.
+ * Host IOC function is being enabled, awaiting response from firmware.
+ * Semaphore is acquired.
*/
static void
-bfa_ioc_sm_fwcheck_entry(struct bfa_ioc_s *ioc)
+bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
{
- bfa_ioc_hw_sem_get(ioc);
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOC_E_ENABLED:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
+ break;
+
+ case IOC_E_FAILED:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
+ break;
+
+ case IOC_E_HWERROR:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
+ bfa_iocpf_initfail(ioc);
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
+ break;
+
+ case IOC_E_DETACH:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+ bfa_iocpf_stop(ioc);
+ break;
+
+ case IOC_E_ENABLE:
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+
+static void
+bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
+{
+ bfa_ioc_timer_start(ioc);
+ bfa_ioc_send_getattr(ioc);
}
/**
- * Awaiting h/w semaphore to continue with version check.
+ * IOC configuration in progress. Timer is active.
*/
static void
-bfa_ioc_sm_fwcheck(struct bfa_ioc_s *ioc, enum ioc_event event)
+bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
- case IOC_E_SEMLOCKED:
- if (bfa_ioc_firmware_lock(ioc)) {
- ioc->retry_count = 0;
- bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
- } else {
- bfa_ioc_hw_sem_release(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch);
- }
+ case IOC_E_FWRSP_GETATTR:
+ bfa_ioc_timer_stop(ioc);
+ bfa_ioc_check_attr_wwns(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
+ break;
+
+ case IOC_E_FAILED:
+ bfa_ioc_timer_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
+ break;
+
+ case IOC_E_HWERROR:
+ bfa_ioc_timer_stop(ioc);
+ /* fall through */
+
+ case IOC_E_TIMEOUT:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
+ bfa_iocpf_getattrfail(ioc);
break;
case IOC_E_DISABLE:
- bfa_ioc_disable_comp(ioc);
+ bfa_ioc_timer_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
+ break;
+
+ case IOC_E_ENABLE:
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+
+static void
+bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
+{
+ struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
+
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
+ bfa_ioc_hb_monitor(ioc);
+ BFA_LOG(KERN_INFO, bfad, log_level, "IOC enabled\n");
+}
+
+static void
+bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOC_E_ENABLE:
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_ioc_hb_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
+ break;
+
+ case IOC_E_FAILED:
+ bfa_ioc_hb_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
+ break;
+
+ case IOC_E_HWERROR:
+ bfa_ioc_hb_stop(ioc);
+ /* !!! fall through !!! */
+
+ case IOC_E_HBFAIL:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
+ bfa_iocpf_fail(ioc);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+
+static void
+bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
+{
+ struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
+ bfa_iocpf_disable(ioc);
+ BFA_LOG(KERN_INFO, bfad, log_level, "IOC disabled\n");
+}
+
+/**
+ * IOC is being disabled
+ */
+static void
+bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOC_E_DISABLED:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ break;
+
+ case IOC_E_HWERROR:
/*
- * fall through
+ * No state change. Will move to disabled state
+ * after iocpf sm completes failure processing and
+ * moves to disabled state.
*/
+ bfa_iocpf_fail(ioc);
+ break;
- case IOC_E_DETACH:
- bfa_ioc_hw_sem_get_cancel(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+/**
+ * IOC disable completion entry.
+ */
+static void
+bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
+{
+ bfa_ioc_disable_comp(ioc);
+}
+
+static void
+bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOC_E_ENABLE:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
+ break;
+
+ case IOC_E_DISABLE:
+ ioc->cbfn->disable_cbfn(ioc->bfa);
break;
- case IOC_E_FWREADY:
+ case IOC_E_DETACH:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+ bfa_iocpf_stop(ioc);
break;
default:
@@ -219,48 +507,138 @@ bfa_ioc_sm_fwcheck(struct bfa_ioc_s *ioc, enum ioc_event event)
}
}
+
+static void
+bfa_ioc_sm_initfail_entry(struct bfa_ioc_s *ioc)
+{
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+}
+
/**
- * Notify enable completion callback and generate mismatch AEN.
+ * Hardware initialization failed.
*/
static void
-bfa_ioc_sm_mismatch_entry(struct bfa_ioc_s *ioc)
+bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOC_E_ENABLED:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
+ break;
+
+ case IOC_E_FAILED:
+ /**
+ * Initialization failure during iocpf init retry.
+ */
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
+ break;
+
+ case IOC_E_DETACH:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+ bfa_iocpf_stop(ioc);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+
+static void
+bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
{
+ struct list_head *qe;
+ struct bfa_ioc_hbfail_notify_s *notify;
+ struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
+
/**
- * Provide enable completion callback and AEN notification only once.
+ * Notify driver and common modules registered for notification.
*/
- if (ioc->retry_count == 0) {
- ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
- bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
+ ioc->cbfn->hbfail_cbfn(ioc->bfa);
+ list_for_each(qe, &ioc->hb_notify_q) {
+ notify = (struct bfa_ioc_hbfail_notify_s *) qe;
+ notify->cbfn(notify->cbarg);
}
- ioc->retry_count++;
- bfa_ioc_timer_start(ioc);
+
+ BFA_LOG(KERN_CRIT, bfad, log_level,
+ "Heart Beat of IOC has failed\n");
}
/**
- * Awaiting firmware version match.
+ * IOC failure.
*/
static void
-bfa_ioc_sm_mismatch(struct bfa_ioc_s *ioc, enum ioc_event event)
+bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
- case IOC_E_TIMEOUT:
- bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
+
+ case IOC_E_FAILED:
+ /**
+ * Initialization failure during iocpf recovery.
+ * !!! Fall through !!!
+ */
+ case IOC_E_ENABLE:
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ break;
+
+ case IOC_E_ENABLED:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
break;
case IOC_E_DISABLE:
- bfa_ioc_disable_comp(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
+ break;
+
+ case IOC_E_HWERROR:
/*
- * fall through
+ * HB failure notification, ignore.
*/
+ break;
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
- case IOC_E_DETACH:
- bfa_ioc_timer_stop(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+
+
+/**
+ * IOCPF State Machine
+ */
+
+
+/**
+ * Reset entry actions -- initialize state machine
+ */
+static void
+bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
+{
+ iocpf->retry_count = 0;
+ iocpf->auto_recover = bfa_auto_recover;
+}
+
+/**
+ * Beginning state. IOC is in reset state.
+ */
+static void
+bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+ struct bfa_ioc_s *ioc = iocpf->ioc;
+
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOCPF_E_ENABLE:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
break;
- case IOC_E_FWREADY:
+ case IOCPF_E_STOP:
break;
default:
@@ -269,31 +647,44 @@ bfa_ioc_sm_mismatch(struct bfa_ioc_s *ioc, enum ioc_event event)
}
/**
- * Request for semaphore.
+ * Semaphore should be acquired for version check.
*/
static void
-bfa_ioc_sm_semwait_entry(struct bfa_ioc_s *ioc)
+bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
{
- bfa_ioc_hw_sem_get(ioc);
+ bfa_ioc_hw_sem_get(iocpf->ioc);
}
/**
- * Awaiting semaphore for h/w initialzation.
+ * Awaiting h/w semaphore to continue with version check.
*/
static void
-bfa_ioc_sm_semwait(struct bfa_ioc_s *ioc, enum ioc_event event)
+bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
+ struct bfa_ioc_s *ioc = iocpf->ioc;
+
bfa_trc(ioc, event);
switch (event) {
- case IOC_E_SEMLOCKED:
- ioc->retry_count = 0;
- bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
+ case IOCPF_E_SEMLOCKED:
+ if (bfa_ioc_firmware_lock(ioc)) {
+ iocpf->retry_count = 0;
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
+ } else {
+ bfa_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
+ }
break;
- case IOC_E_DISABLE:
+ case IOCPF_E_DISABLE:
bfa_ioc_hw_sem_get_cancel(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+ bfa_ioc_pf_disabled(ioc);
+ break;
+
+ case IOCPF_E_STOP:
+ bfa_ioc_hw_sem_get_cancel(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
break;
default:
@@ -301,51 +692,81 @@ bfa_ioc_sm_semwait(struct bfa_ioc_s *ioc, enum ioc_event event)
}
}
-
+/**
+ * Notify enable completion callback.
+ */
static void
-bfa_ioc_sm_hwinit_entry(struct bfa_ioc_s *ioc)
+bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
{
- bfa_ioc_timer_start(ioc);
- bfa_ioc_reset(ioc, BFA_FALSE);
+ /*
+ * Call only the first time sm enters fwmismatch state.
+ */
+ if (iocpf->retry_count == 0)
+ bfa_ioc_pf_fwmismatch(iocpf->ioc);
+
+ iocpf->retry_count++;
+ bfa_iocpf_timer_start(iocpf->ioc);
}
/**
- * Hardware is being initialized. Interrupts are enabled.
- * Holding hardware semaphore lock.
+ * Awaiting firmware version match.
*/
static void
-bfa_ioc_sm_hwinit(struct bfa_ioc_s *ioc, enum ioc_event event)
+bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
+ struct bfa_ioc_s *ioc = iocpf->ioc;
+
bfa_trc(ioc, event);
switch (event) {
- case IOC_E_FWREADY:
- bfa_ioc_timer_stop(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
+ case IOCPF_E_TIMEOUT:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
break;
- case IOC_E_HWERROR:
- bfa_ioc_timer_stop(ioc);
- /*
- * fall through
- */
+ case IOCPF_E_DISABLE:
+ bfa_iocpf_timer_stop(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+ bfa_ioc_pf_disabled(ioc);
+ break;
- case IOC_E_TIMEOUT:
- ioc->retry_count++;
- if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
- bfa_ioc_timer_start(ioc);
- bfa_ioc_reset(ioc, BFA_TRUE);
- break;
- }
+ case IOCPF_E_STOP:
+ bfa_iocpf_timer_stop(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+ break;
- bfa_ioc_hw_sem_release(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+/**
+ * Request for semaphore.
+ */
+static void
+bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
+{
+ bfa_ioc_hw_sem_get(iocpf->ioc);
+}
+
+/**
+ * Awaiting semaphore for h/w initialzation.
+ */
+static void
+bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+ struct bfa_ioc_s *ioc = iocpf->ioc;
+
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOCPF_E_SEMLOCKED:
+ iocpf->retry_count = 0;
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
break;
- case IOC_E_DISABLE:
- bfa_ioc_hw_sem_release(ioc);
- bfa_ioc_timer_stop(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ case IOCPF_E_DISABLE:
+ bfa_ioc_hw_sem_get_cancel(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
break;
default:
@@ -355,55 +776,54 @@ bfa_ioc_sm_hwinit(struct bfa_ioc_s *ioc, enum ioc_event event)
static void
-bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
+bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
{
- bfa_ioc_timer_start(ioc);
- bfa_ioc_send_enable(ioc);
+ bfa_iocpf_timer_start(iocpf->ioc);
+ bfa_ioc_reset(iocpf->ioc, BFA_FALSE);
}
/**
- * Host IOC function is being enabled, awaiting response from firmware.
- * Semaphore is acquired.
+ * Hardware is being initialized. Interrupts are enabled.
+ * Holding hardware semaphore lock.
*/
static void
-bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
+bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
+ struct bfa_ioc_s *ioc = iocpf->ioc;
+
bfa_trc(ioc, event);
switch (event) {
- case IOC_E_FWRSP_ENABLE:
- bfa_ioc_timer_stop(ioc);
- bfa_ioc_hw_sem_release(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
+ case IOCPF_E_FWREADY:
+ bfa_iocpf_timer_stop(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
break;
- case IOC_E_HWERROR:
- bfa_ioc_timer_stop(ioc);
+ case IOCPF_E_INITFAIL:
+ bfa_iocpf_timer_stop(ioc);
/*
- * fall through
+ * !!! fall through !!!
*/
- case IOC_E_TIMEOUT:
- ioc->retry_count++;
- if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
- bfa_reg_write(ioc->ioc_regs.ioc_fwstate,
- BFI_IOC_UNINIT);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
+ case IOCPF_E_TIMEOUT:
+ iocpf->retry_count++;
+ if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) {
+ bfa_iocpf_timer_start(ioc);
+ bfa_ioc_reset(ioc, BFA_TRUE);
break;
}
bfa_ioc_hw_sem_release(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
- break;
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
- case IOC_E_DISABLE:
- bfa_ioc_timer_stop(ioc);
- bfa_ioc_hw_sem_release(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ if (event == IOCPF_E_TIMEOUT)
+ bfa_ioc_pf_failed(ioc);
break;
- case IOC_E_FWREADY:
- bfa_ioc_send_enable(ioc);
+ case IOCPF_E_DISABLE:
+ bfa_ioc_hw_sem_release(ioc);
+ bfa_iocpf_timer_stop(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
break;
default:
@@ -413,40 +833,60 @@ bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
static void
-bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
+bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
{
- bfa_ioc_timer_start(ioc);
- bfa_ioc_send_getattr(ioc);
+ bfa_iocpf_timer_start(iocpf->ioc);
+ bfa_ioc_send_enable(iocpf->ioc);
}
/**
- * IOC configuration in progress. Timer is active.
+ * Host IOC function is being enabled, awaiting response from firmware.
+ * Semaphore is acquired.
*/
static void
-bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
+bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
+ struct bfa_ioc_s *ioc = iocpf->ioc;
+
bfa_trc(ioc, event);
switch (event) {
- case IOC_E_FWRSP_GETATTR:
- bfa_ioc_timer_stop(ioc);
- bfa_ioc_check_attr_wwns(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
+ case IOCPF_E_FWRSP_ENABLE:
+ bfa_iocpf_timer_stop(ioc);
+ bfa_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
break;
- case IOC_E_HWERROR:
- bfa_ioc_timer_stop(ioc);
+ case IOCPF_E_INITFAIL:
+ bfa_iocpf_timer_stop(ioc);
/*
- * fall through
+ * !!! fall through !!!
*/
- case IOC_E_TIMEOUT:
- bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
+ case IOCPF_E_TIMEOUT:
+ iocpf->retry_count++;
+ if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) {
+ bfa_reg_write(ioc->ioc_regs.ioc_fwstate,
+ BFI_IOC_UNINIT);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
+ break;
+ }
+
+ bfa_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
+
+ if (event == IOCPF_E_TIMEOUT)
+ bfa_ioc_pf_failed(ioc);
break;
- case IOC_E_DISABLE:
- bfa_ioc_timer_stop(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ case IOCPF_E_DISABLE:
+ bfa_iocpf_timer_stop(ioc);
+ bfa_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
+ break;
+
+ case IOCPF_E_FWREADY:
+ bfa_ioc_send_enable(ioc);
break;
default:
@@ -455,41 +895,40 @@ bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
}
+
static void
-bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
+bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
{
- ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
- bfa_ioc_hb_monitor(ioc);
- bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
+ bfa_ioc_pf_enabled(iocpf->ioc);
}
static void
-bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
+bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
+ struct bfa_ioc_s *ioc = iocpf->ioc;
+
bfa_trc(ioc, event);
switch (event) {
- case IOC_E_ENABLE:
+ case IOCPF_E_DISABLE:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
break;
- case IOC_E_DISABLE:
- bfa_ioc_hb_stop(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
+ case IOCPF_E_GETATTRFAIL:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
break;
- case IOC_E_HWERROR:
- case IOC_E_FWREADY:
- /**
- * Hard error or IOC recovery by other function.
- * Treat it same as heartbeat failure.
- */
- bfa_ioc_hb_stop(ioc);
- /*
- * !!! fall through !!!
- */
+ case IOCPF_E_FAIL:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+ break;
- case IOC_E_HBFAIL:
- bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail);
+ case IOCPF_E_FWREADY:
+ if (bfa_ioc_is_operational(ioc))
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+ else
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
+
+ bfa_ioc_pf_failed(ioc);
break;
default:
@@ -499,36 +938,41 @@ bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
static void
-bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
+bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
{
- bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
- bfa_ioc_timer_start(ioc);
- bfa_ioc_send_disable(ioc);
+ bfa_iocpf_timer_start(iocpf->ioc);
+ bfa_ioc_send_disable(iocpf->ioc);
}
/**
* IOC is being disabled
*/
static void
-bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
+bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
+ struct bfa_ioc_s *ioc = iocpf->ioc;
+
bfa_trc(ioc, event);
switch (event) {
- case IOC_E_FWRSP_DISABLE:
- bfa_ioc_timer_stop(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ case IOCPF_E_FWRSP_DISABLE:
+ case IOCPF_E_FWREADY:
+ bfa_iocpf_timer_stop(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
break;
- case IOC_E_HWERROR:
- bfa_ioc_timer_stop(ioc);
+ case IOCPF_E_FAIL:
+ bfa_iocpf_timer_stop(ioc);
/*
* !!! fall through !!!
*/
- case IOC_E_TIMEOUT:
+ case IOCPF_E_TIMEOUT:
bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
+ break;
+
+ case IOCPF_E_FWRSP_ENABLE:
break;
default:
@@ -540,31 +984,26 @@ bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
* IOC disable completion entry.
*/
static void
-bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
+bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
{
- bfa_ioc_disable_comp(ioc);
+ bfa_ioc_pf_disabled(iocpf->ioc);
}
static void
-bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
+bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
+ struct bfa_ioc_s *ioc = iocpf->ioc;
+