diff options
Diffstat (limited to 'drivers/edac/amd64_edac.h')
| -rw-r--r-- | drivers/edac/amd64_edac.h | 509 |
1 files changed, 254 insertions, 255 deletions
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index 613ec72b0f6..d903e0c2114 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h @@ -33,7 +33,7 @@ * detection. The mods to Rev F required more family * information detection. * - * Changes/Fixes by Borislav Petkov <borislav.petkov@amd.com>: + * Changes/Fixes by Borislav Petkov <bp@alien8.de>: * - misc fixes and code cleanups * * This module is based on the following documents @@ -144,7 +144,7 @@ * sections 3.5.4 and 3.5.5 for more information. */ -#define EDAC_AMD64_VERSION "v3.3.0" +#define EDAC_AMD64_VERSION "3.4.0" #define EDAC_MOD_STR "amd64_edac" /* Extended Model from CPUID, for CPU Revision numbers */ @@ -153,8 +153,8 @@ #define K8_REV_F 4 /* Hardware limit on ChipSelect rows per MC and processors per system */ -#define MAX_CS_COUNT 8 -#define DRAM_REG_COUNT 8 +#define NUM_CHIPSELECTS 8 +#define DRAM_RANGES 8 #define ON true #define OFF false @@ -162,234 +162,177 @@ /* * PCI-defined configuration space registers */ - +#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F1 0x141b +#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F2 0x141c +#define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601 +#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602 +#define PCI_DEVICE_ID_AMD_16H_NB_F1 0x1531 +#define PCI_DEVICE_ID_AMD_16H_NB_F2 0x1532 +#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F1 0x1581 +#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F2 0x1582 /* * Function 1 - Address Map */ -#define K8_DRAM_BASE_LOW 0x40 -#define K8_DRAM_LIMIT_LOW 0x44 -#define K8_DHAR 0xf0 - -#define DHAR_VALID BIT(0) -#define F10_DRAM_MEM_HOIST_VALID BIT(1) +#define DRAM_BASE_LO 0x40 +#define DRAM_LIMIT_LO 0x44 -#define DHAR_BASE_MASK 0xff000000 -#define dhar_base(dhar) (dhar & DHAR_BASE_MASK) - -#define K8_DHAR_OFFSET_MASK 0x0000ff00 -#define k8_dhar_offset(dhar) ((dhar & K8_DHAR_OFFSET_MASK) << 16) +/* + * F15 M30h D18F1x2[1C:00] + */ +#define DRAM_CONT_BASE 0x200 +#define DRAM_CONT_LIMIT 0x204 -#define F10_DHAR_OFFSET_MASK 0x0000ff80 - /* NOTE: Extra mask bit vs K8 */ -#define f10_dhar_offset(dhar) ((dhar & F10_DHAR_OFFSET_MASK) << 16) +/* + * F15 M30h D18F1x2[4C:40] + */ +#define DRAM_CONT_HIGH_OFF 0x240 +#define dram_rw(pvt, i) ((u8)(pvt->ranges[i].base.lo & 0x3)) +#define dram_intlv_sel(pvt, i) ((u8)((pvt->ranges[i].lim.lo >> 8) & 0x7)) +#define dram_dst_node(pvt, i) ((u8)(pvt->ranges[i].lim.lo & 0x7)) -/* F10 High BASE/LIMIT registers */ -#define F10_DRAM_BASE_HIGH 0x140 -#define F10_DRAM_LIMIT_HIGH 0x144 +#define DHAR 0xf0 +#define dhar_mem_hoist_valid(pvt) ((pvt)->dhar & BIT(1)) +#define dhar_base(pvt) ((pvt)->dhar & 0xff000000) +#define k8_dhar_offset(pvt) (((pvt)->dhar & 0x0000ff00) << 16) + /* NOTE: Extra mask bit vs K8 */ +#define f10_dhar_offset(pvt) (((pvt)->dhar & 0x0000ff80) << 16) -/* - * Function 2 - DRAM controller - */ -#define K8_DCSB0 0x40 -#define F10_DCSB1 0x140 +#define DCT_CFG_SEL 0x10C -#define K8_DCSB_CS_ENABLE BIT(0) -#define K8_DCSB_NPT_SPARE BIT(1) -#define K8_DCSB_NPT_TESTFAIL BIT(2) +#define DRAM_LOCAL_NODE_BASE 0x120 +#define DRAM_LOCAL_NODE_LIM 0x124 -/* - * REV E: select [31:21] and [15:9] from DCSB and the shift amount to form - * the address - */ -#define REV_E_DCSB_BASE_BITS (0xFFE0FE00ULL) -#define REV_E_DCS_SHIFT 4 +#define DRAM_BASE_HI 0x140 +#define DRAM_LIMIT_HI 0x144 -#define REV_F_F1Xh_DCSB_BASE_BITS (0x1FF83FE0ULL) -#define REV_F_F1Xh_DCS_SHIFT 8 /* - * REV F and later: selects [28:19] and [13:5] from DCSB and the shift amount - * to form the address + * Function 2 - DRAM controller */ -#define REV_F_DCSB_BASE_BITS (0x1FF83FE0ULL) -#define REV_F_DCS_SHIFT 8 - -/* DRAM CS Mask Registers */ -#define K8_DCSM0 0x60 -#define F10_DCSM1 0x160 - -/* REV E: select [29:21] and [15:9] from DCSM */ -#define REV_E_DCSM_MASK_BITS 0x3FE0FE00 - -/* unused bits [24:20] and [12:0] */ -#define REV_E_DCS_NOTUSED_BITS 0x01F01FFF +#define DCSB0 0x40 +#define DCSB1 0x140 +#define DCSB_CS_ENABLE BIT(0) -/* REV F and later: select [28:19] and [13:5] from DCSM */ -#define REV_F_F1Xh_DCSM_MASK_BITS 0x1FF83FE0 +#define DCSM0 0x60 +#define DCSM1 0x160 -/* unused bits [26:22] and [12:0] */ -#define REV_F_F1Xh_DCS_NOTUSED_BITS 0x07C01FFF +#define csrow_enabled(i, dct, pvt) ((pvt)->csels[(dct)].csbases[(i)] & DCSB_CS_ENABLE) #define DBAM0 0x80 #define DBAM1 0x180 /* Extract the DIMM 'type' on the i'th DIMM from the DBAM reg value passed */ -#define DBAM_DIMM(i, reg) ((((reg) >> (4*i))) & 0xF) +#define DBAM_DIMM(i, reg) ((((reg) >> (4*(i)))) & 0xF) #define DBAM_MAX_VALUE 11 - -#define F10_DCLR_0 0x90 -#define F10_DCLR_1 0x190 +#define DCLR0 0x90 +#define DCLR1 0x190 #define REVE_WIDTH_128 BIT(16) -#define F10_WIDTH_128 BIT(11) +#define WIDTH_128 BIT(11) +#define DCHR0 0x94 +#define DCHR1 0x194 +#define DDR3_MODE BIT(8) -#define F10_DCHR_0 0x94 -#define F10_DCHR_1 0x194 +#define DCT_SEL_LO 0x110 +#define dct_high_range_enabled(pvt) ((pvt)->dct_sel_lo & BIT(0)) +#define dct_interleave_enabled(pvt) ((pvt)->dct_sel_lo & BIT(2)) -#define F10_DCHR_FOUR_RANK_DIMM BIT(18) -#define DDR3_MODE BIT(8) -#define F10_DCHR_MblMode BIT(6) +#define dct_ganging_enabled(pvt) ((boot_cpu_data.x86 == 0x10) && ((pvt)->dct_sel_lo & BIT(4))) +#define dct_data_intlv_enabled(pvt) ((pvt)->dct_sel_lo & BIT(5)) +#define dct_memory_cleared(pvt) ((pvt)->dct_sel_lo & BIT(10)) -#define F10_DCTL_SEL_LOW 0x110 -#define dct_sel_baseaddr(pvt) ((pvt->dram_ctl_select_low) & 0xFFFFF800) -#define dct_sel_interleave_addr(pvt) (((pvt->dram_ctl_select_low) >> 6) & 0x3) -#define dct_high_range_enabled(pvt) (pvt->dram_ctl_select_low & BIT(0)) -#define dct_interleave_enabled(pvt) (pvt->dram_ctl_select_low & BIT(2)) -#define dct_ganging_enabled(pvt) (pvt->dram_ctl_select_low & BIT(4)) -#define dct_data_intlv_enabled(pvt) (pvt->dram_ctl_select_low & BIT(5)) -#define dct_dram_enabled(pvt) (pvt->dram_ctl_select_low & BIT(8)) -#define dct_memory_cleared(pvt) (pvt->dram_ctl_select_low & BIT(10)) +#define SWAP_INTLV_REG 0x10c -#define F10_DCTL_SEL_HIGH 0x114 +#define DCT_SEL_HI 0x114 /* * Function 3 - Misc Control */ -#define K8_NBCTL 0x40 +#define NBCTL 0x40 -/* Correctable ECC error reporting enable */ -#define K8_NBCTL_CECCEn BIT(0) +#define NBCFG 0x44 +#define NBCFG_CHIPKILL BIT(23) +#define NBCFG_ECC_ENABLE BIT(22) -/* UnCorrectable ECC error reporting enable */ -#define K8_NBCTL_UECCEn BIT(1) - -#define K8_NBCFG 0x44 -#define K8_NBCFG_CHIPKILL BIT(23) -#define K8_NBCFG_ECC_ENABLE BIT(22) - -#define K8_NBSL 0x48 - - -/* Family F10h: Normalized Extended Error Codes */ -#define F10_NBSL_EXT_ERR_RES 0x0 +/* F3x48: NBSL */ #define F10_NBSL_EXT_ERR_ECC 0x8 +#define NBSL_PP_OBS 0x2 -/* Next two are overloaded values */ -#define F10_NBSL_EXT_ERR_LINK_PROTO 0xB -#define F10_NBSL_EXT_ERR_L3_PROTO 0xB - -#define F10_NBSL_EXT_ERR_NB_ARRAY 0xC -#define F10_NBSL_EXT_ERR_DRAM_PARITY 0xD -#define F10_NBSL_EXT_ERR_LINK_RETRY 0xE - -/* Next two are overloaded values */ -#define F10_NBSL_EXT_ERR_GART_WALK 0xF -#define F10_NBSL_EXT_ERR_DEV_WALK 0xF - -/* 0x10 to 0x1B: Reserved */ -#define F10_NBSL_EXT_ERR_L3_DATA 0x1C -#define F10_NBSL_EXT_ERR_L3_TAG 0x1D -#define F10_NBSL_EXT_ERR_L3_LRU 0x1E - -/* K8: Normalized Extended Error Codes */ -#define K8_NBSL_EXT_ERR_ECC 0x0 -#define K8_NBSL_EXT_ERR_CRC 0x1 -#define K8_NBSL_EXT_ERR_SYNC 0x2 -#define K8_NBSL_EXT_ERR_MST 0x3 -#define K8_NBSL_EXT_ERR_TGT 0x4 -#define K8_NBSL_EXT_ERR_GART 0x5 -#define K8_NBSL_EXT_ERR_RMW 0x6 -#define K8_NBSL_EXT_ERR_WDT 0x7 -#define K8_NBSL_EXT_ERR_CHIPKILL_ECC 0x8 -#define K8_NBSL_EXT_ERR_DRAM_PARITY 0xD - -/* - * The following are for BUS type errors AFTER values have been normalized by - * shifting right - */ -#define K8_NBSL_PP_SRC 0x0 -#define K8_NBSL_PP_RES 0x1 -#define K8_NBSL_PP_OBS 0x2 -#define K8_NBSL_PP_GENERIC 0x3 - -#define EXTRACT_ERR_CPU_MAP(x) ((x) & 0xF) - -#define K8_NBEAL 0x50 -#define K8_NBEAH 0x54 -#define K8_SCRCTRL 0x58 - -#define F10_NB_CFG_LOW 0x88 +#define SCRCTRL 0x58 #define F10_ONLINE_SPARE 0xB0 -#define F10_ONLINE_SPARE_SWAPDONE0(x) ((x) & BIT(1)) -#define F10_ONLINE_SPARE_SWAPDONE1(x) ((x) & BIT(3)) -#define F10_ONLINE_SPARE_BADDRAM_CS0(x) (((x) >> 4) & 0x00000007) -#define F10_ONLINE_SPARE_BADDRAM_CS1(x) (((x) >> 8) & 0x00000007) +#define online_spare_swap_done(pvt, c) (((pvt)->online_spare >> (1 + 2 * (c))) & 0x1) +#define online_spare_bad_dramcs(pvt, c) (((pvt)->online_spare >> (4 + 4 * (c))) & 0x7) #define F10_NB_ARRAY_ADDR 0xB8 - -#define F10_NB_ARRAY_DRAM_ECC 0x80000000 +#define F10_NB_ARRAY_DRAM BIT(31) /* Bits [2:1] are used to select 16-byte section within a 64-byte cacheline */ -#define SET_NB_ARRAY_ADDRESS(section) (((section) & 0x3) << 1) +#define SET_NB_ARRAY_ADDR(section) (((section) & 0x3) << 1) #define F10_NB_ARRAY_DATA 0xBC +#define F10_NB_ARR_ECC_WR_REQ BIT(17) +#define SET_NB_DRAM_INJECTION_WRITE(inj) \ + (BIT(((inj.word) & 0xF) + 20) | \ + F10_NB_ARR_ECC_WR_REQ | inj.bit_map) +#define SET_NB_DRAM_INJECTION_READ(inj) \ + (BIT(((inj.word) & 0xF) + 20) | \ + BIT(16) | inj.bit_map) -#define SET_NB_DRAM_INJECTION_WRITE(word, bits) \ - (BIT(((word) & 0xF) + 20) | \ - BIT(17) | bits) - -#define SET_NB_DRAM_INJECTION_READ(word, bits) \ - (BIT(((word) & 0xF) + 20) | \ - BIT(16) | bits) -#define K8_NBCAP 0xE8 -#define K8_NBCAP_CORES (BIT(12)|BIT(13)) -#define K8_NBCAP_CHIPKILL BIT(4) -#define K8_NBCAP_SECDED BIT(3) -#define K8_NBCAP_DCT_DUAL BIT(0) +#define NBCAP 0xE8 +#define NBCAP_CHIPKILL BIT(4) +#define NBCAP_SECDED BIT(3) +#define NBCAP_DCT_DUAL BIT(0) #define EXT_NB_MCA_CFG 0x180 /* MSRs */ -#define K8_MSR_MCGCTL_NBE BIT(4) - -#define K8_MSR_MC4CTL 0x0410 -#define K8_MSR_MC4STAT 0x0411 -#define K8_MSR_MC4ADDR 0x0412 +#define MSR_MCGCTL_NBE BIT(4) -/* AMD sets the first MC device at device ID 0x18. */ -static inline int get_node_id(struct pci_dev *pdev) -{ - return PCI_SLOT(pdev->devfn) - 0x18; -} - -enum amd64_chipset_families { +enum amd_families { K8_CPUS = 0, F10_CPUS, + F15_CPUS, + F15_M30H_CPUS, + F16_CPUS, + F16_M30H_CPUS, + NUM_FAMILIES, }; /* Error injection control structure */ struct error_injection { - u32 section; - u32 word; - u32 bit_map; + u32 section; + u32 word; + u32 bit_map; +}; + +/* low and high part of PCI config space regs */ +struct reg_pair { + u32 lo, hi; +}; + +/* + * See F1x[1, 0][7C:40] DRAM Base/Limit Registers + */ +struct dram_range { + struct reg_pair base; + struct reg_pair lim; +}; + +/* A DCT chip selects collection */ +struct chip_select { + u32 csbases[NUM_CHIPSELECTS]; + u8 b_cnt; + + u32 csmasks[NUM_CHIPSELECTS]; + u8 m_cnt; }; struct amd64_pvt { @@ -398,7 +341,11 @@ struct amd64_pvt { /* pci_device handles which we utilize */ struct pci_dev *F1, *F2, *F3; - int mc_node_id; /* MC index of this MC node */ + u16 mc_node_id; /* MC index of this MC node */ + u8 fam; /* CPU family */ + u8 model; /* ... model */ + u8 stepping; /* ... stepping */ + int ext_model; /* extended model value of this node */ int channel_count; @@ -414,61 +361,76 @@ struct amd64_pvt { u32 dbam0; /* DRAM Base Address Mapping reg for DCT0 */ u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */ - /* DRAM CS Base Address Registers F2x[1,0][5C:40] */ - u32 dcsb0[MAX_CS_COUNT]; - u32 dcsb1[MAX_CS_COUNT]; - - /* DRAM CS Mask Registers F2x[1,0][6C:60] */ - u32 dcsm0[MAX_CS_COUNT]; - u32 dcsm1[MAX_CS_COUNT]; - - /* - * Decoded parts of DRAM BASE and LIMIT Registers - * F1x[78,70,68,60,58,50,48,40] - */ - u64 dram_base[DRAM_REG_COUNT]; - u64 dram_limit[DRAM_REG_COUNT]; - u8 dram_IntlvSel[DRAM_REG_COUNT]; - u8 dram_IntlvEn[DRAM_REG_COUNT]; - u8 dram_DstNode[DRAM_REG_COUNT]; - u8 dram_rw_en[DRAM_REG_COUNT]; - - /* - * The following fields are set at (load) run time, after CPU revision - * has been determined, since the dct_base and dct_mask registers vary - * based on revision - */ - u32 dcsb_base; /* DCSB base bits */ - u32 dcsm_mask; /* DCSM mask bits */ - u32 cs_count; /* num chip selects (== num DCSB registers) */ - u32 num_dcsm; /* Number of DCSM registers */ - u32 dcs_mask_notused; /* DCSM notused mask bits */ - u32 dcs_shift; /* DCSB and DCSM shift value */ + /* one for each DCT */ + struct chip_select csels[2]; + + /* DRAM base and limit pairs F1x[78,70,68,60,58,50,48,40] */ + struct dram_range ranges[DRAM_RANGES]; u64 top_mem; /* top of memory below 4GB */ u64 top_mem2; /* top of memory above 4GB */ - u32 dram_ctl_select_low; /* DRAM Controller Select Low Reg */ - u32 dram_ctl_select_high; /* DRAM Controller Select High Reg */ - u32 online_spare; /* On-Line spare Reg */ + u32 dct_sel_lo; /* DRAM Controller Select Low */ + u32 dct_sel_hi; /* DRAM Controller Select High */ + u32 online_spare; /* On-Line spare Reg */ /* x4 or x8 syndromes in use */ - u8 syn_type; - - /* temp storage for when input is received from sysfs */ - struct err_regs ctl_error_info; + u8 ecc_sym_sz; /* place to store error injection parameters prior to issue */ struct error_injection injection; +}; - /* DCT per-family scrubrate setting */ - u32 min_scrubrate; - - /* family name this instance is running on */ - const char *ctl_name; +enum err_codes { + DECODE_OK = 0, + ERR_NODE = -1, + ERR_CSROW = -2, + ERR_CHANNEL = -3, +}; +struct err_info { + int err_code; + struct mem_ctl_info *src_mci; + int csrow; + int channel; + u16 syndrome; + u32 page; + u32 offset; }; +static inline u64 get_dram_base(struct amd64_pvt *pvt, u8 i) +{ + u64 addr = ((u64)pvt->ranges[i].base.lo & 0xffff0000) << 8; + + if (boot_cpu_data.x86 == 0xf) + return addr; + + return (((u64)pvt->ranges[i].base.hi & 0x000000ff) << 40) | addr; +} + +static inline u64 get_dram_limit(struct amd64_pvt *pvt, u8 i) +{ + u64 lim = (((u64)pvt->ranges[i].lim.lo & 0xffff0000) << 8) | 0x00ffffff; + + if (boot_cpu_data.x86 == 0xf) + return lim; + + return (((u64)pvt->ranges[i].lim.hi & 0x000000ff) << 40) | lim; +} + +static inline u16 extract_syndrome(u64 status) +{ + return ((status >> 47) & 0xff) | ((status >> 16) & 0xff00); +} + +static inline u8 dct_sel_interleave_addr(struct amd64_pvt *pvt) +{ + if (pvt->fam == 0x15 && pvt->model >= 0x30) + return (((pvt->dct_sel_hi >> 9) & 0x1) << 2) | + ((pvt->dct_sel_lo >> 6) & 0x3); + + return ((pvt)->dct_sel_lo >> 6) & 0x3; +} /* * per-node ECC settings descriptor */ @@ -482,43 +444,45 @@ struct ecc_settings { } flags; }; -extern const char *tt_msgs[4]; -extern const char *ll_msgs[4]; -extern const char *rrrr_msgs[16]; -extern const char *to_msgs[2]; -extern const char *pp_msgs[4]; -extern const char *ii_msgs[4]; -extern const char *htlink_msgs[8]; - #ifdef CONFIG_EDAC_DEBUG -#define NUM_DBG_ATTRS 5 +int amd64_create_sysfs_dbg_files(struct mem_ctl_info *mci); +void amd64_remove_sysfs_dbg_files(struct mem_ctl_info *mci); + #else -#define NUM_DBG_ATTRS 0 +static inline int amd64_create_sysfs_dbg_files(struct mem_ctl_info *mci) +{ + return 0; +} +static void inline amd64_remove_sysfs_dbg_files(struct mem_ctl_info *mci) +{ +} #endif #ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION -#define NUM_INJ_ATTRS 5 +int amd64_create_sysfs_inject_files(struct mem_ctl_info *mci); +void amd64_remove_sysfs_inject_files(struct mem_ctl_info *mci); + #else -#define NUM_INJ_ATTRS 0 +static inline int amd64_create_sysfs_inject_files(struct mem_ctl_info *mci) +{ + return 0; +} +static inline void amd64_remove_sysfs_inject_files(struct mem_ctl_info *mci) +{ +} #endif -extern struct mcidev_sysfs_attribute amd64_dbg_attrs[NUM_DBG_ATTRS], - amd64_inj_attrs[NUM_INJ_ATTRS]; - /* * Each of the PCI Device IDs types have their own set of hardware accessor * functions and per device encoding/decoding logic. */ struct low_ops { int (*early_channel_count) (struct amd64_pvt *pvt); - - u64 (*get_error_address) (struct mem_ctl_info *mci, - struct err_regs *info); - void (*read_dram_base_limit) (struct amd64_pvt *pvt, int dram); - void (*read_dram_ctl_register) (struct amd64_pvt *pvt); - void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, - struct err_regs *info, u64 SystemAddr); - int (*dbam_to_cs) (struct amd64_pvt *pvt, int cs_mode); + void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, u64 sys_addr, + struct err_info *); + int (*dbam_to_cs) (struct amd64_pvt *pvt, u8 dct, unsigned cs_mode); + int (*read_dct_pci_cfg) (struct amd64_pvt *pvt, int offset, + u32 *val, const char *func); }; struct amd64_family_type { @@ -527,28 +491,63 @@ struct amd64_family_type { struct low_ops ops; }; -static inline int amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset, - u32 *val, const char *func) -{ - int err = 0; - - err = pci_read_config_dword(pdev, offset, val); - if (err) - amd64_warn("%s: error reading F%dx%x.\n", - func, PCI_FUNC(pdev->devfn), offset); - - return err; -} +int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset, + u32 *val, const char *func); +int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset, + u32 val, const char *func); #define amd64_read_pci_cfg(pdev, offset, val) \ - amd64_read_pci_cfg_dword(pdev, offset, val, __func__) + __amd64_read_pci_cfg_dword(pdev, offset, val, __func__) -/* - * For future CPU versions, verify the following as new 'slow' rates appear and - * modify the necessary skip values for the supported CPU. - */ -#define K8_MIN_SCRUB_RATE_BITS 0x0 -#define F10_MIN_SCRUB_RATE_BITS 0x5 +#define amd64_write_pci_cfg(pdev, offset, val) \ + __amd64_write_pci_cfg_dword(pdev, offset, val, __func__) + +#define amd64_read_dct_pci_cfg(pvt, offset, val) \ + pvt->ops->read_dct_pci_cfg(pvt, offset, val, __func__) int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, u64 *hole_offset, u64 *hole_size); + +#define to_mci(k) container_of(k, struct mem_ctl_info, dev) + +/* Injection helpers */ +static inline void disable_caches(void *dummy) +{ + write_cr0(read_cr0() | X86_CR0_CD); + wbinvd(); +} + +static inline void enable_caches(void *dummy) +{ + write_cr0(read_cr0() & ~X86_CR0_CD); +} + +static inline u8 dram_intlv_en(struct amd64_pvt *pvt, unsigned int i) +{ + if (pvt->fam == 0x15 && pvt->model >= 0x30) { + u32 tmp; + amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &tmp); + return (u8) tmp & 0xF; + } + return (u8) (pvt->ranges[i].base.lo >> 8) & 0x7; +} + +static inline u8 dhar_valid(struct amd64_pvt *pvt) +{ + if (pvt->fam == 0x15 && pvt->model >= 0x30) { + u32 tmp; + amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &tmp); + return (tmp >> 1) & BIT(0); + } + return (pvt)->dhar & BIT(0); +} + +static inline u32 dct_sel_baseaddr(struct amd64_pvt *pvt) +{ + if (pvt->fam == 0x15 && pvt->model >= 0x30) { + u32 tmp; + amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &tmp); + return (tmp >> 11) & 0x1FFF; + } + return (pvt)->dct_sel_lo & 0xFFFFF800; +} |
