aboutsummaryrefslogtreecommitdiff
path: root/drivers/edac/amd64_edac.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/edac/amd64_edac.c')
-rw-r--r--drivers/edac/amd64_edac.c1423
1 files changed, 785 insertions, 638 deletions
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index c9eee6d33e9..f8bf00010d4 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -1,7 +1,7 @@
#include "amd64_edac.h"
#include <asm/amd_nb.h>
-static struct edac_pci_ctl_info *amd64_ctl_pci;
+static struct edac_pci_ctl_info *pci_ctl;
static int report_gart_errors;
module_param(report_gart_errors, int, 0644);
@@ -31,7 +31,7 @@ static struct ecc_settings **ecc_stngs;
*
*FIXME: Produce a better mapping/linearisation.
*/
-struct scrubrate {
+static const struct scrubrate {
u32 scrubval; /* bit pattern for scrub rate */
u32 bandwidth; /* bandwidth consumed (bytes/sec) */
} scrubrates[] = {
@@ -60,8 +60,8 @@ struct scrubrate {
{ 0x00, 0UL}, /* scrubbing off */
};
-static int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
- u32 *val, const char *func)
+int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
+ u32 *val, const char *func)
{
int err = 0;
@@ -98,6 +98,7 @@ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
*
* F15h: we select which DCT we access using F1x10C[DctCfgSel]
*
+ * F16h: has only 1 DCT
*/
static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
const char *func)
@@ -122,7 +123,7 @@ static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
u32 reg = 0;
amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
- reg &= 0xfffffffe;
+ reg &= (pvt->model >= 0x30) ? ~3 : ~1;
reg |= dct;
amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
}
@@ -132,8 +133,9 @@ static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
{
u8 dct = 0;
+ /* For F15 M30h, the second dct is DCT 3, refer to BKDG Section 2.10 */
if (addr >= 0x140 && addr <= 0x1a0) {
- dct = 1;
+ dct = (pvt->model >= 0x30) ? 3 : 1;
addr -= 0x100;
}
@@ -160,7 +162,7 @@ static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
* scan the scrub rate mapping table for a close or matching bandwidth value to
* issue. If requested is too big, then use last maximum value found.
*/
-static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
+static int __set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
{
u32 scrubval;
int i;
@@ -170,8 +172,11 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
* memory controller and apply to register. Search for the first
* bandwidth entry that is greater or equal than the setting requested
* and program that. If at last entry, turn off DRAM scrubbing.
+ *
+ * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
+ * by falling back to the last element in scrubrates[].
*/
- for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
+ for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
/*
* skip scrub rates which aren't recommended
* (see F10 BKDG, F3x58)
@@ -181,12 +186,6 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
if (scrubrates[i].bandwidth <= new_bw)
break;
-
- /*
- * if no suitable bandwidth found, turn off DRAM scrubbing
- * entirely by falling back to the last element in the
- * scrubrates array.
- */
}
scrubval = scrubrates[i].scrubval;
@@ -199,29 +198,29 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
return 0;
}
-static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
+static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
{
struct amd64_pvt *pvt = mci->pvt_info;
u32 min_scrubrate = 0x5;
- if (boot_cpu_data.x86 == 0xf)
+ if (pvt->fam == 0xf)
min_scrubrate = 0x0;
- /* F15h Erratum #505 */
- if (boot_cpu_data.x86 == 0x15)
+ /* Erratum #505 */
+ if (pvt->fam == 0x15 && pvt->model < 0x10)
f15h_select_dct(pvt, 0);
- return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate);
+ return __set_scrub_rate(pvt->F3, bw, min_scrubrate);
}
-static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
+static int get_scrub_rate(struct mem_ctl_info *mci)
{
struct amd64_pvt *pvt = mci->pvt_info;
u32 scrubval = 0;
int i, retval = -EINVAL;
- /* F15h Erratum #505 */
- if (boot_cpu_data.x86 == 0x15)
+ /* Erratum #505 */
+ if (pvt->fam == 0x15 && pvt->model < 0x10)
f15h_select_dct(pvt, 0);
amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
@@ -241,8 +240,7 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
* returns true if the SysAddr given by sys_addr matches the
* DRAM base/limit associated with node_id
*/
-static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr,
- unsigned nid)
+static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
{
u64 addr;
@@ -268,7 +266,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
u64 sys_addr)
{
struct amd64_pvt *pvt;
- unsigned node_id;
+ u8 node_id;
u32 intlv_en, bits;
/*
@@ -286,7 +284,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
if (intlv_en == 0) {
for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
- if (amd64_base_limit_match(pvt, sys_addr, node_id))
+ if (base_limit_match(pvt, sys_addr, node_id))
goto found;
}
goto err_no_match;
@@ -310,7 +308,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
}
/* sanity test for sys_addr */
- if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
+ if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
"range for node %d with node interleaving enabled.\n",
__func__, sys_addr, node_id);
@@ -321,8 +319,8 @@ found:
return edac_mc_find((int)node_id);
err_no_match:
- debugf2("sys_addr 0x%lx doesn't match any node\n",
- (unsigned long)sys_addr);
+ edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
+ (unsigned long)sys_addr);
return NULL;
}
@@ -337,21 +335,45 @@ static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
u64 csbase, csmask, base_bits, mask_bits;
u8 addr_shift;
- if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
+ if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
csbase = pvt->csels[dct].csbases[csrow];
csmask = pvt->csels[dct].csmasks[csrow];
- base_bits = GENMASK(21, 31) | GENMASK(9, 15);
- mask_bits = GENMASK(21, 29) | GENMASK(9, 15);
+ base_bits = GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
+ mask_bits = GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
addr_shift = 4;
+
+ /*
+ * F16h and F15h, models 30h and later need two addr_shift values:
+ * 8 for high and 6 for low (cf. F16h BKDG).
+ */
+ } else if (pvt->fam == 0x16 ||
+ (pvt->fam == 0x15 && pvt->model >= 0x30)) {
+ csbase = pvt->csels[dct].csbases[csrow];
+ csmask = pvt->csels[dct].csmasks[csrow >> 1];
+
+ *base = (csbase & GENMASK_ULL(15, 5)) << 6;
+ *base |= (csbase & GENMASK_ULL(30, 19)) << 8;
+
+ *mask = ~0ULL;
+ /* poke holes for the csmask */
+ *mask &= ~((GENMASK_ULL(15, 5) << 6) |
+ (GENMASK_ULL(30, 19) << 8));
+
+ *mask |= (csmask & GENMASK_ULL(15, 5)) << 6;
+ *mask |= (csmask & GENMASK_ULL(30, 19)) << 8;
+
+ return;
} else {
csbase = pvt->csels[dct].csbases[csrow];
csmask = pvt->csels[dct].csmasks[csrow >> 1];
addr_shift = 8;
- if (boot_cpu_data.x86 == 0x15)
- base_bits = mask_bits = GENMASK(19,30) | GENMASK(5,13);
+ if (pvt->fam == 0x15)
+ base_bits = mask_bits =
+ GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
else
- base_bits = mask_bits = GENMASK(19,28) | GENMASK(5,13);
+ base_bits = mask_bits =
+ GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
}
*base = (csbase & base_bits) << addr_shift;
@@ -393,15 +415,15 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
mask = ~mask;
if ((input_addr & mask) == (base & mask)) {
- debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
- (unsigned long)input_addr, csrow,
- pvt->mc_node_id);
+ edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
+ (unsigned long)input_addr, csrow,
+ pvt->mc_node_id);
return csrow;
}
}
- debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
- (unsigned long)input_addr, pvt->mc_node_id);
+ edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
+ (unsigned long)input_addr, pvt->mc_node_id);
return -1;
}
@@ -426,24 +448,23 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
u64 *hole_offset, u64 *hole_size)
{
struct amd64_pvt *pvt = mci->pvt_info;
- u64 base;
/* only revE and later have the DRAM Hole Address Register */
- if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) {
- debugf1(" revision %d for node %d does not support DHAR\n",
- pvt->ext_model, pvt->mc_node_id);
+ if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
+ edac_dbg(1, " revision %d for node %d does not support DHAR\n",
+ pvt->ext_model, pvt->mc_node_id);
return 1;
}
/* valid for Fam10h and above */
- if (boot_cpu_data.x86 >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
- debugf1(" Dram Memory Hoisting is DISABLED on this system\n");
+ if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
+ edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n");
return 1;
}
if (!dhar_valid(pvt)) {
- debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n",
- pvt->mc_node_id);
+ edac_dbg(1, " Dram Memory Hoisting is DISABLED on this node %d\n",
+ pvt->mc_node_id);
return 1;
}
@@ -465,19 +486,15 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
* addresses in the hole so that they start at 0x100000000.
*/
- base = dhar_base(pvt);
-
- *hole_base = base;
- *hole_size = (0x1ull << 32) - base;
+ *hole_base = dhar_base(pvt);
+ *hole_size = (1ULL << 32) - *hole_base;
- if (boot_cpu_data.x86 > 0xf)
- *hole_offset = f10_dhar_offset(pvt);
- else
- *hole_offset = k8_dhar_offset(pvt);
+ *hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
+ : k8_dhar_offset(pvt);
- debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
- pvt->mc_node_id, (unsigned long)*hole_base,
- (unsigned long)*hole_offset, (unsigned long)*hole_size);
+ edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
+ pvt->mc_node_id, (unsigned long)*hole_base,
+ (unsigned long)*hole_offset, (unsigned long)*hole_size);
return 0;
}
@@ -516,22 +533,21 @@ static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
{
struct amd64_pvt *pvt = mci->pvt_info;
u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
- int ret = 0;
+ int ret;
dram_base = get_dram_base(pvt, pvt->mc_node_id);
ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
&hole_size);
if (!ret) {
- if ((sys_addr >= (1ull << 32)) &&
- (sys_addr < ((1ull << 32) + hole_size))) {
+ if ((sys_addr >= (1ULL << 32)) &&
+ (sys_addr < ((1ULL << 32) + hole_size))) {
/* use DHAR to translate SysAddr to DramAddr */
dram_addr = sys_addr - hole_offset;
- debugf2("using DHAR to translate SysAddr 0x%lx to "
- "DramAddr 0x%lx\n",
- (unsigned long)sys_addr,
- (unsigned long)dram_addr);
+ edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
+ (unsigned long)sys_addr,
+ (unsigned long)dram_addr);
return dram_addr;
}
@@ -546,11 +562,10 @@ static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
* section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
* Programmer's Manual Volume 1 Application Programming.
*/
- dram_addr = (sys_addr & GENMASK(0, 39)) - dram_base;
+ dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
- debugf2("using DRAM Base register to translate SysAddr 0x%lx to "
- "DramAddr 0x%lx\n", (unsigned long)sys_addr,
- (unsigned long)dram_addr);
+ edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
+ (unsigned long)sys_addr, (unsigned long)dram_addr);
return dram_addr;
}
@@ -583,12 +598,12 @@ static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
* concerning translating a DramAddr to an InputAddr.
*/
intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
- input_addr = ((dram_addr >> intlv_shift) & GENMASK(12, 35)) +
+ input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) +
(dram_addr & 0xfff);
- debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
- intlv_shift, (unsigned long)dram_addr,
- (unsigned long)input_addr);
+ edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
+ intlv_shift, (unsigned long)dram_addr,
+ (unsigned long)input_addr);
return input_addr;
}
@@ -604,142 +619,18 @@ static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
input_addr =
dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
- debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
- (unsigned long)sys_addr, (unsigned long)input_addr);
+ edac_dbg(2, "SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
+ (unsigned long)sys_addr, (unsigned long)input_addr);
return input_addr;
}
-
-/*
- * @input_addr is an InputAddr associated with the node represented by mci.
- * Translate @input_addr to a DramAddr and return the result.
- */
-static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
-{
- struct amd64_pvt *pvt;
- unsigned node_id, intlv_shift;
- u64 bits, dram_addr;
- u32 intlv_sel;
-
- /*
- * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
- * shows how to translate a DramAddr to an InputAddr. Here we reverse
- * this procedure. When translating from a DramAddr to an InputAddr, the
- * bits used for node interleaving are discarded. Here we recover these
- * bits from the IntlvSel field of the DRAM Limit register (section
- * 3.4.4.2) for the node that input_addr is associated with.
- */
- pvt = mci->pvt_info;
- node_id = pvt->mc_node_id;
-
- BUG_ON(node_id > 7);
-
- intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
- if (intlv_shift == 0) {
- debugf1(" InputAddr 0x%lx translates to DramAddr of "
- "same value\n", (unsigned long)input_addr);
-
- return input_addr;
- }
-
- bits = ((input_addr & GENMASK(12, 35)) << intlv_shift) +
- (input_addr & 0xfff);
-
- intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1);
- dram_addr = bits + (intlv_sel << 12);
-
- debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx "
- "(%d node interleave bits)\n", (unsigned long)input_addr,
- (unsigned long)dram_addr, intlv_shift);
-
- return dram_addr;
-}
-
-/*
- * @dram_addr is a DramAddr that maps to the node represented by mci. Convert
- * @dram_addr to a SysAddr.
- */
-static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
-{
- struct amd64_pvt *pvt = mci->pvt_info;
- u64 hole_base, hole_offset, hole_size, base, sys_addr;
- int ret = 0;
-
- ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
- &hole_size);
- if (!ret) {
- if ((dram_addr >= hole_base) &&
- (dram_addr < (hole_base + hole_size))) {
- sys_addr = dram_addr + hole_offset;
-
- debugf1("using DHAR to translate DramAddr 0x%lx to "
- "SysAddr 0x%lx\n", (unsigned long)dram_addr,
- (unsigned long)sys_addr);
-
- return sys_addr;
- }
- }
-
- base = get_dram_base(pvt, pvt->mc_node_id);
- sys_addr = dram_addr + base;
-
- /*
- * The sys_addr we have computed up to this point is a 40-bit value
- * because the k8 deals with 40-bit values. However, the value we are
- * supposed to return is a full 64-bit physical address. The AMD
- * x86-64 architecture specifies that the most significant implemented
- * address bit through bit 63 of a physical address must be either all
- * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a
- * 64-bit value below. See section 3.4.2 of AMD publication 24592:
- * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
- * Programming.
- */
- sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
-
- debugf1(" Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
- pvt->mc_node_id, (unsigned long)dram_addr,
- (unsigned long)sys_addr);
-
- return sys_addr;
-}
-
-/*
- * @input_addr is an InputAddr associated with the node given by mci. Translate
- * @input_addr to a SysAddr.
- */
-static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
- u64 input_addr)
-{
- return dram_addr_to_sys_addr(mci,
- input_addr_to_dram_addr(mci, input_addr));
-}
-
-/*
- * Find the minimum and maximum InputAddr values that map to the given @csrow.
- * Pass back these values in *input_addr_min and *input_addr_max.
- */
-static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
- u64 *input_addr_min, u64 *input_addr_max)
-{
- struct amd64_pvt *pvt;
- u64 base, mask;
-
- pvt = mci->pvt_info;
- BUG_ON((csrow < 0) || (csrow >= pvt->csels[0].b_cnt));
-
- get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
-
- *input_addr_min = base & ~mask;
- *input_addr_max = base | mask;
-}
-
/* Map the Error address to a PAGE and PAGE OFFSET. */
static inline void error_address_to_page_and_offset(u64 error_address,
- u32 *page, u32 *offset)
+ struct err_info *err)
{
- *page = (u32) (error_address >> PAGE_SHIFT);
- *offset = ((u32) error_address) & ~PAGE_MASK;
+ err->page = (u32) (error_address >> PAGE_SHIFT);
+ err->offset = ((u32) error_address) & ~PAGE_MASK;
}
/*
@@ -768,12 +659,12 @@ static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
* Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
* are ECC capable.
*/
-static unsigned long amd64_determine_edac_cap(struct amd64_pvt *pvt)
+static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
{
u8 bit;
unsigned long edac_cap = EDAC_FLAG_NONE;
- bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
+ bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
? 19
: 17;
@@ -783,77 +674,79 @@ static unsigned long amd64_determine_edac_cap(struct amd64_pvt *pvt)
return edac_cap;
}
-static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8);
+static void debug_display_dimm_sizes(struct amd64_pvt *, u8);
-static void amd64_dump_dramcfg_low(u32 dclr, int chan)
+static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
{
- debugf1("F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
+ edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
- debugf1(" DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
- (dclr & BIT(16)) ? "un" : "",
- (dclr & BIT(19)) ? "yes" : "no");
+ edac_dbg(1, " DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
+ (dclr & BIT(16)) ? "un" : "",
+ (dclr & BIT(19)) ? "yes" : "no");
- debugf1(" PAR/ERR parity: %s\n",
- (dclr & BIT(8)) ? "enabled" : "disabled");
+ edac_dbg(1, " PAR/ERR parity: %s\n",
+ (dclr & BIT(8)) ? "enabled" : "disabled");
- if (boot_cpu_data.x86 == 0x10)
- debugf1(" DCT 128bit mode width: %s\n",
- (dclr & BIT(11)) ? "128b" : "64b");
+ if (pvt->fam == 0x10)
+ edac_dbg(1, " DCT 128bit mode width: %s\n",
+ (dclr & BIT(11)) ? "128b" : "64b");
- debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
- (dclr & BIT(12)) ? "yes" : "no",
- (dclr & BIT(13)) ? "yes" : "no",
- (dclr & BIT(14)) ? "yes" : "no",
- (dclr & BIT(15)) ? "yes" : "no");
+ edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
+ (dclr & BIT(12)) ? "yes" : "no",
+ (dclr & BIT(13)) ? "yes" : "no",
+ (dclr & BIT(14)) ? "yes" : "no",
+ (dclr & BIT(15)) ? "yes" : "no");
}
/* Display and decode various NB registers for debug purposes. */
static void dump_misc_regs(struct amd64_pvt *pvt)
{
- debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
+ edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
- debugf1(" NB two channel DRAM capable: %s\n",
- (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
+ edac_dbg(1, " NB two channel DRAM capable: %s\n",
+ (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
- debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n",
- (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
- (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
+ edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
+ (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
+ (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
- amd64_dump_dramcfg_low(pvt->dclr0, 0);
+ debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
- debugf1("F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
+ edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
- debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, "
- "offset: 0x%08x\n",
- pvt->dhar, dhar_base(pvt),
- (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt)
- : f10_dhar_offset(pvt));
+ edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
+ pvt->dhar, dhar_base(pvt),
+ (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
+ : f10_dhar_offset(pvt));
- debugf1(" DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
+ edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
- amd64_debug_display_dimm_sizes(pvt, 0);
+ debug_display_dimm_sizes(pvt, 0);
/* everything below this point is Fam10h and above */
- if (boot_cpu_data.x86 == 0xf)
+ if (pvt->fam == 0xf)
return;
- amd64_debug_display_dimm_sizes(pvt, 1);
+ debug_display_dimm_sizes(pvt, 1);
amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
/* Only if NOT ganged does dclr1 have valid info */
if (!dct_ganging_enabled(pvt))
- amd64_dump_dramcfg_low(pvt->dclr1, 1);
+ debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
}
/*
- * see BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
+ * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
*/
static void prep_chip_selects(struct amd64_pvt *pvt)
{
- if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
+ if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
+ } else if (pvt->fam == 0x15 && pvt->model >= 0x30) {
+ pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
+ pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
} else {
pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
@@ -876,15 +769,15 @@ static void read_dct_base_mask(struct amd64_pvt *pvt)
u32 *base1 = &pvt->csels[1].csbases[cs];
if (!amd64_read_dct_pci_cfg(pvt, reg0, base0))
- debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
- cs, *base0, reg0);
+ edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
+ cs, *base0, reg0);
- if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
+ if (pvt->fam == 0xf || dct_ganging_enabled(pvt))
continue;
if (!amd64_read_dct_pci_cfg(pvt, reg1, base1))
- debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
- cs, *base1, reg1);
+ edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
+ cs, *base1, reg1);
}
for_each_chip_select_mask(cs, 0, pvt) {
@@ -894,26 +787,26 @@ static void read_dct_base_mask(struct amd64_pvt *pvt)
u32 *mask1 = &pvt->csels[1].csmasks[cs];
if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0))
- debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
- cs, *mask0, reg0);
+ edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
+ cs, *mask0, reg0);
- if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
+ if (pvt->fam == 0xf || dct_ganging_enabled(pvt))
continue;
if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1))
- debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
- cs, *mask1, reg1);
+ edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
+ cs, *mask1, reg1);
}
}
-static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
+static enum mem_type determine_memory_type(struct amd64_pvt *pvt, int cs)
{
enum mem_type type;
/* F15h supports only DDR3 */
- if (boot_cpu_data.x86 >= 0x15)
+ if (pvt->fam >= 0x15)
type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
- else if (boot_cpu_data.x86 == 0x10 || pvt->ext_model >= K8_REV_F) {
+ else if (pvt->fam == 0x10 || pvt->ext_model >= K8_REV_F) {
if (pvt->dchr0 & DDR3_MODE)
type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
else
@@ -946,30 +839,30 @@ static int k8_early_channel_count(struct amd64_pvt *pvt)
}
/* On F10h and later ErrAddr is MC4_ADDR[47:1] */
-static u64 get_error_address(struct mce *m)
+static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
{
- struct cpuinfo_x86 *c = &boot_cpu_data;
u64 addr;
u8 start_bit = 1;
u8 end_bit = 47;
- if (c->x86 == 0xf) {
+ if (pvt->fam == 0xf) {
start_bit = 3;
end_bit = 39;
}
- addr = m->addr & GENMASK(start_bit, end_bit);
+ addr = m->addr & GENMASK_ULL(end_bit, start_bit);
/*
* Erratum 637 workaround
*/
- if (c->x86 == 0x15) {
+ if (pvt->fam == 0x15) {
struct amd64_pvt *pvt;
u64 cc6_base, tmp_addr;
u32 tmp;
- u8 mce_nid, intlv_en;
+ u16 mce_nid;
+ u8 intlv_en;
- if ((addr & GENMASK(24, 47)) >> 24 != 0x00fdf7)
+ if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
return addr;
mce_nid = amd_get_nb_id(m->extcpu);
@@ -979,7 +872,7 @@ static u64 get_error_address(struct mce *m)
intlv_en = tmp >> 21 & 0x7;
/* add [47:27] + 3 trailing bits */
- cc6_base = (tmp & GENMASK(0, 20)) << 3;
+ cc6_base = (tmp & GENMASK_ULL(20, 0)) << 3;
/* reverse and add DramIntlvEn */
cc6_base |= intlv_en ^ 0x7;
@@ -988,18 +881,18 @@ static u64 get_error_address(struct mce *m)
cc6_base <<= 24;
if (!intlv_en)
- return cc6_base | (addr & GENMASK(0, 23));
+ return cc6_base | (addr & GENMASK_ULL(23, 0));
amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
/* faster log2 */
- tmp_addr = (addr & GENMASK(12, 23)) << __fls(intlv_en + 1);
+ tmp_addr = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1);
/* OR DramIntlvSel into bits [14:12] */
- tmp_addr |= (tmp & GENMASK(21, 23)) >> 9;
+ tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9;
/* add remaining [11:0] bits from original MC4_ADDR */
- tmp_addr |= addr & GENMASK(0, 11);
+ tmp_addr |= addr & GENMASK_ULL(11, 0);
return cc6_base | tmp_addr;
}
@@ -1007,15 +900,34 @@ static u64 get_error_address(struct mce *m)
return addr;
}
+static struct pci_dev *pci_get_related_function(unsigned int vendor,
+ unsigned int device,
+ struct pci_dev *related)
+{
+ struct pci_dev *dev = NULL;
+
+ while ((dev = pci_get_device(vendor, device, dev))) {
+ if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
+ (dev->bus->number == related->bus->number) &&
+ (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
+ break;
+ }
+
+ return dev;
+}
+
static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
{
- struct cpuinfo_x86 *c = &boot_cpu_data;
+ struct amd_northbridge *nb;
+ struct pci_dev *f1 = NULL;
+ unsigned int pci_func;
int off = range << 3;
+ u32 llim;
amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
- if (c->x86 == 0xf)
+ if (pvt->fam == 0xf)
return;
if (!dram_rw(pvt, range))
@@ -1024,52 +936,75 @@ static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
- /* Factor in CC6 save area by reading dst node's limit reg */
- if (c->x86 == 0x15) {
- struct pci_dev *f1 = NULL;
- u8 nid = dram_dst_node(pvt, range);
- u32 llim;
+ /* F15h: factor in CC6 save area by reading dst node's limit reg */
+ if (pvt->fam != 0x15)
+ return;
- f1 = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x18 + nid, 1));
- if (WARN_ON(!f1))
- return;
+ nb = node_to_amd_nb(dram_dst_node(pvt, range));
+ if (WARN_ON(!nb))
+ return;
- amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
+ pci_func = (pvt->model == 0x30) ? PCI_DEVICE_ID_AMD_15H_M30H_NB_F1
+ : PCI_DEVICE_ID_AMD_15H_NB_F1;
- pvt->ranges[range].lim.lo &= GENMASK(0, 15);
+ f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
+ if (WARN_ON(!f1))
+ return;
- /* {[39:27],111b} */
- pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
+ amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
- pvt->ranges[range].lim.hi &= GENMASK(0, 7);
+ pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
- /* [47:40] */
- pvt->ranges[range].lim.hi |= llim >> 13;
+ /* {[39:27],111b} */
+ pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
- pci_dev_put(f1);
- }
+ pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
+
+ /* [47:40] */
+ pvt->ranges[range].lim.hi |= llim >> 13;
+
+ pci_dev_put(f1);
}
static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
- u16 syndrome)
+ struct err_info *err)
{
- struct mem_ctl_info *src_mci;
struct amd64_pvt *pvt = mci->pvt_info;
- int channel, csrow;
- u32 page, offset;
+
+ error_address_to_page_and_offset(sys_addr, err);
+
+ /*
+ * Find out which node the error address belongs to. This may be
+ * different from the node that detected the error.
+ */
+ err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
+ if (!err->src_mci) {
+ amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
+ (unsigned long)sys_addr);
+ err->err_code = ERR_NODE;
+ return;
+ }
+
+ /* Now map the sys_addr to a CSROW */
+ err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
+ if (err->csrow < 0) {
+ err->err_code = ERR_CSROW;
+ return;
+ }
/* CHIPKILL enabled */
if (pvt->nbcfg & NBCFG_CHIPKILL) {
- channel = get_channel_from_ecc_syndrome(mci, syndrome);
- if (channel < 0) {
+ err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
+ if (err->channel < 0) {
/*
* Syndrome didn't map, so we don't know which of the
* 2 DIMMs is in error. So we need to ID 'both' of them
* as suspect.
*/
- amd64_mc_warn(mci, "unknown syndrome 0x%04x - possible "
- "error reporting race\n", syndrome);
- edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
+ amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - "
+ "possible error reporting race\n",
+ err->syndrome);
+ err->err_code = ERR_CHANNEL;
return;
}
} else {
@@ -1081,30 +1016,7 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
* was obtained from email communication with someone at AMD.
* (Wish the email was placed in this comment - norsk)
*/
- channel = ((sys_addr & BIT(3)) != 0);
- }
-
- /*
- * Find out which node the error address belongs to. This may be
- * different from the node that detected the error.
- */
- src_mci = find_mc_by_sys_addr(mci, sys_addr);
- if (!src_mci) {
- amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
- (unsigned long)sys_addr);
- edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
- return;
- }
-
- /* Now map the sys_addr to a CSROW */
- csrow = sys_addr_to_csrow(src_mci, sys_addr);
- if (csrow < 0) {
- edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR);
- } else {
- error_address_to_page_and_offset(sys_addr, &page, &offset);
-
- edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow,
- channel, EDAC_MOD_STR);
+ err->channel = ((sys_addr & BIT(3)) != 0);
}
}
@@ -1132,12 +1044,36 @@ static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
}
else if (pvt->ext_model >= K8_REV_D) {
+ unsigned diff;
WARN_ON(cs_mode > 10);
- if (cs_mode == 3 || cs_mode == 8)
- return 32 << (cs_mode - 1);
- else
- return 32 << cs_mode;
+ /*
+ * the below calculation, besides trying to win an obfuscated C
+ * contest, maps cs_mode values to DIMM chip select sizes. The
+ * mappings are:
+ *
+ * cs_mode CS size (mb)
+ * ======= ============
+ * 0 32
+ * 1 64
+ * 2 128
+ * 3 128
+ * 4 256
+ * 5 512
+ * 6 256
+ * 7 512
+ * 8 1024
+ * 9 1024
+ * 10 2048
+ *
+ * Basically, it calculates a value with which to shift the
+ * smallest CS size of 32MB.
+ *
+ * ddr[23]_cs_size have a similar purpose.
+ */
+ diff = cs_mode/3 + (unsigned)(cs_mode > 5);
+
+ return 32 << (cs_mode - diff);
}
else {
WARN_ON(cs_mode > 6);
@@ -1158,7 +1094,7 @@ static int f1x_early_channel_count(struct amd64_pvt *pvt)
int i, j, channels = 0;
/* On F10h, if we are in 128 bit mode, then we are using 2 channels */
- if (boot_cpu_data.x86 == 0x10 && (pvt->dclr0 & WIDTH_128))
+ if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
return 2;
/*
@@ -1169,7 +1105,7 @@ static int f1x_early_channel_count(struct amd64_pvt *pvt)
* Need to check DCT0[0] and DCT1[0] to see if only one of them has
* their CSEnable bit on. If so, then SINGLE DIMM case.
*/
- debugf0("Data width is not 128 bits - need more decoding\n");
+ edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
/*
* Check DRAM Bank Address Mapping values for each DIMM to see if there
@@ -1241,38 +1177,83 @@ static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
return ddr3_cs_size(cs_mode, false);
}
+/*
+ * F16h and F15h model 30h have only limited cs_modes.
+ */
+static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
+ unsigned cs_mode)
+{
+ WARN_ON(cs_mode > 12);
+
+ if (cs_mode == 6 || cs_mode == 8 ||
+ cs_mode == 9 || cs_mode == 12)
+ return -1;
+ else
+ return ddr3_cs_size(cs_mode, false);
+}
+
static void read_dram_ctl_register(struct amd64_pvt *pvt)
{
- if (boot_cpu_data.x86 == 0xf)
+ if (pvt->fam == 0xf)
return;
if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) {
- debugf0("F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
- pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
+ edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
+ pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
- debugf0(" DCTs operate in %s mode.\n",
- (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
+ edac_dbg(0, " DCTs operate in %s mode\n",
+ (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
if (!dct_ganging_enabled(pvt))
- debugf0(" Address range split per DCT: %s\n",
- (dct_high_range_enabled(pvt) ? "yes" : "no"));
+ edac_dbg(0, " Address range split per DCT: %s\n",
+ (dct_high_range_enabled(pvt) ? "yes" : "no"));
- debugf0(" data interleave for ECC: %s, "
- "DRAM cleared since last warm reset: %s\n",
- (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
- (dct_memory_cleared(pvt) ? "yes" : "no"));
+ edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
+ (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
+ (dct_memory_cleared(pvt) ? "yes" : "no"));
- debugf0(" channel interleave: %s, "
- "interleave bits selector: 0x%x\n",
- (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
- dct_sel_interleave_addr(pvt));
+ edac_dbg(0, " channel interleave: %s, "
+ "interleave bits selector: 0x%x\n",
+ (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
+ dct_sel_interleave_addr(pvt));
}
amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi);
}
/*
+ * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
+ * 2.10.12 Memory Interleaving Modes).
+ */
+static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
+ u8 intlv_en, int num_dcts_intlv,
+ u32 dct_sel)
+{
+ u8 channel = 0;
+ u8 select;
+
+ if (!(intlv_en))
+ return (u8)(dct_sel);
+
+ if (num_dcts_intlv == 2) {
+ select = (sys_addr >> 8) & 0x3;
+ channel = select ? 0x3 : 0;
+ } else if (num_dcts_intlv == 4) {
+ u8 intlv_addr = dct_sel_interleave_addr(pvt);
+ switch (intlv_addr) {
+ case 0x4:
+ channel = (sys_addr >> 8) & 0x3;
+ break;
+ case 0x5:
+ channel = (sys_addr >> 9) & 0x3;
+ break;
+ }
+ }
+ return channel;
+}
+
+/*
* Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
* Interleaving Modes.
*/
@@ -1314,7 +1295,7 @@ static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
}
/* Convert the sys_addr to the normalized DCT address */
-static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, unsigned range,
+static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
u64 sys_addr, bool hi_rng,
u32 dct_sel_base_addr)
{
@@ -1358,7 +1339,7 @@ static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, unsigned range,
chan_off = dram_base;
}
- return (sys_addr & GENMASK(6,47)) - (chan_off & GENMASK(23,47));
+ return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
}
/*
@@ -1390,7 +1371,7 @@ static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
* -EINVAL: NOT FOUND
* 0..csrow = Chip-Select Row
*/
-static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct)
+static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
{
struct mem_ctl_info *mci;
struct amd64_pvt *pvt;
@@ -1404,7 +1385,7 @@ static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct)
pvt = mci->pvt_info;
- debugf1("input addr: 0x%llx, DCT: %d\n", in_addr, dct);
+ edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
for_each_chip_select(csrow, dct, pvt) {
if (!csrow_enabled(csrow, dct, pvt))
@@ -1412,19 +1393,22 @@ static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct)
get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
- debugf1(" CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
- csrow, cs_base, cs_mask);
+ edac_dbg(1, " CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
+ csrow, cs_base, cs_mask);
cs_mask = ~cs_mask;
- debugf1(" (InputAddr & ~CSMask)=0x%llx "
- "(CSBase & ~CSMask)=0x%llx\n",
- (in_addr & cs_mask), (cs_base & cs_mask));
+ edac_dbg(1, " (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
+ (in_addr & cs_mask), (cs_base & cs_mask));
if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
+ if (pvt->fam == 0x15 && pvt->model >= 0x30) {
+ cs_found = csrow;
+ break;
+ }
cs_found = f10_process_possible_spare(pvt, dct, csrow);
- debugf1(" MATCH csrow=%d\n", cs_found);
+ edac_dbg(1, " MATCH csrow=%d\n", cs_found);
break;
}
}
@@ -1440,11 +1424,9 @@ static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
{
u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
- if (boot_cpu_data.x86 == 0x10) {
+ if (pvt->fam == 0x10) {
/* only revC3 and revE have that feature */
- if (boot_cpu_data.x86_model < 4 ||
- (boot_cpu_data.x86_model < 0xa &&
- boot_cpu_data.x86_mask < 3))
+ if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
return sys_addr;
}
@@ -1469,7 +1451,7 @@ static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
/* For a given @dram_range, check if @sys_addr falls within it. */
static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
- u64 sys_addr, int *nid, int *chan_sel)
+ u64 sys_addr, int *chan_sel)
{
int cs_found = -EINVAL;
u64 chan_addr;
@@ -1481,8 +1463,8 @@ static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
u8 intlv_en = dram_intlv_en(pvt, range);
u32 intlv_sel = dram_intlv_sel(pvt, range);
- debugf1("(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
- range, sys_addr, get_dram_limit(pvt, range));
+ edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
+ range, sys_addr, get_dram_limit(pvt, range));
if (dhar_valid(pvt) &&
dhar_base(pvt) <= sys_addr &&
@@ -1538,34 +1520,155 @@ static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
(chan_addr & 0xfff);
}
- debugf1(" Normalized DCT addr: 0x%llx\n", chan_addr);
+ edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
- if (cs_found >= 0) {
- *nid = node_id;
+ if (cs_found >= 0)
*chan_sel = channel;
+
+ return cs_found;
+}
+
+static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
+ u64 sys_addr, int *chan_sel)
+{
+ int cs_found = -EINVAL;
+ int num_dcts_intlv = 0;
+ u64 chan_addr, chan_offset;
+ u64 dct_base, dct_limit;
+ u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
+ u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;
+
+ u64 dhar_offset = f10_dhar_offset(pvt);
+ u8 intlv_addr = dct_sel_interleave_addr(pvt);
+ u8 node_id = dram_dst_node(pvt, range);
+ u8 intlv_en = dram_intlv_en(pvt, range);
+
+ amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
+ amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
+
+ dct_offset_en = (u8) ((dct_cont_base_reg >> 3) & BIT(0));
+ dct_sel = (u8) ((dct_cont_base_reg >> 4) & 0x7);
+
+ edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
+ range, sys_addr, get_dram_limit(pvt, range));
+
+ if (!(get_dram_base(pvt, range) <= sys_addr) &&
+ !(get_dram_limit(pvt, range) >= sys_addr))
+ return -EINVAL;
+
+ if (dhar_valid(pvt) &&
+ dhar_base(pvt) <= sys_addr &&
+ sys_addr < BIT_64(32)) {
+ amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
+ sys_addr);
+ return -EINVAL;
+ }
+
+ /* Verify sys_addr is within DCT Range. */
+ dct_base = (u64) dct_sel_baseaddr(pvt);
+ dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
+
+ if (!(dct_cont_base_reg & BIT(0)) &&
+ !(dct_base <= (sys_addr >> 27) &&
+ dct_limit >= (sys_addr >> 27)))
+ return -EINVAL;
+
+ /* Verify number of dct's that participate in channel interleaving. */
+ num_dcts_intlv = (int) hweight8(intlv_en);
+
+ if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
+ return -EINVAL;
+
+ channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
+ num_dcts_intlv, dct_sel);
+
+ /* Verify we stay within the MAX number of channels allowed */
+ if (channel > 3)
+ return -EINVAL;
+
+ leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
+
+ /* Get normalized DCT addr */
+ if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
+ chan_offset = dhar_offset;
+ else
+ chan_offset = dct_base << 27;
+
+ chan_addr = sys_addr - chan_offset;
+
+ /* remove channel interleave */
+ if (num_dcts_intlv == 2) {
+ if (intlv_addr == 0x4)
+ chan_addr = ((chan_addr >> 9) << 8) |
+ (chan_addr & 0xff);
+ else if (intlv_addr == 0x5)
+ chan_addr = ((chan_addr >> 10) << 9) |
+ (chan_addr & 0x1ff);
+ else
+ return -EINVAL;
+
+ } else if (num_dcts_intlv == 4) {
+ if (intlv_addr == 0x4)
+ chan_addr = ((chan_addr >> 10) << 8) |
+ (chan_addr & 0xff);
+ else if (intlv_addr == 0x5)
+ chan_addr = ((chan_addr >> 11) << 9) |
+ (chan_addr & 0x1ff);
+ else
+ return -EINVAL;
}
+
+ if (dct_offset_en) {
+ amd64_read_pci_cfg(pvt->F1,
+ DRAM_CONT_HIGH_OFF + (int) channel * 4,
+ &tmp);
+ chan_addr += (u64) ((tmp >> 11) & 0xfff) << 27;
+ }
+
+ f15h_select_dct(pvt, channel);
+
+ edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
+
+ /*
+ * Find Chip select:
+ * if channel = 3, then alias it to 1. This is because, in F15 M30h,
+ * there is support for 4 DCT's, but only 2 are currently functional.
+ * They are DCT0 and DCT3. But we have read all registers of DCT3 into
+ * pvt->csels[1]. So we need to use '1' here to get correct info.
+ * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
+ */
+ alias_channel = (channel == 3) ? 1 : channel;
+
+ cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);
+
+ if (cs_found >= 0)
+ *chan_sel = alias_channel;
+
return cs_found;
}
-static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
- int *node, int *chan_sel)
+static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
+ u64 sys_addr,
+ int *chan_sel)
{
int cs_found = -EINVAL;
unsigned range;
for (range = 0; range < DRAM_RANGES; range++) {
-
if (!dram_rw(pvt, range))
continue;
- if ((get_dram_base(pvt, range) <= sys_addr) &&
- (get_dram_limit(pvt, range) >= sys_addr)) {
+ if (pvt->fam == 0x15 && pvt->model >= 0x30)
+ cs_found = f15_m30h_match_to_this_node(pvt, range,
+ sys_addr,
+ chan_sel);
+ else if ((get_dram_base(pvt, range) <= sys_addr) &&
+ (get_dram_limit(pvt, range) >= sys_addr)) {
cs_found = f1x_match_to_this_node(pvt, range,
- sys_addr, node,
- chan_sel);
+ sys_addr, chan_sel);
if (cs_found >= 0)
break;
}
@@ -1581,55 +1684,38 @@ static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
* (MCX_ADDR).
*/
static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
- u16 syndrome)
+ struct err_info *err)
{
struct amd64_pvt *pvt = mci->pvt_info;
- u32 page, offset;
- int nid, csrow, chan = 0;
- csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
+ error_address_to_page_and_offset(sys_addr, err);
- if (csrow < 0) {
- edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
+ err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
+ if (err->csrow < 0) {
+ err->err_code = ERR_CSROW;
return;
}
- error_address_to_page_and_offset(sys_addr, &page, &offset);
-
/*
* We need the syndromes for channel detection only when we're
* ganged. Otherwise @chan should already contain the channel at
* this point.
*/
if (dct_ganging_enabled(pvt))
- chan = get_channel_from_ecc_syndrome(mci, syndrome);
-
- if (chan >= 0)
- edac_mc_handle_ce(mci, page, offset, syndrome, csrow, chan,
- EDAC_MOD_STR);
- else
- /*
- * Channel unknown, report all channels on this CSROW as failed.
- */
- for (chan = 0; chan < mci->csrows[csrow].nr_channels; chan++)
- edac_mc_handle_ce(mci, page, offset, syndrome,
- csrow, chan, EDAC_MOD_STR);
+ err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
}
/*
* debug routine to display the memory sizes of all logical DIMMs and its
* CSROWs
*/
-static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
+static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
{
- int dimm, size0, size1, factor = 0;
+ int dimm, size0, size1;
u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
- if (boot_cpu_data.x86 == 0xf) {
- if (pvt->dclr0 & WIDTH_128)
- factor = 1;
-
+ if (pvt->fam == 0xf) {
/* K8 families < revF not supported yet */
if (pvt->ext_model < K8_REV_F)
return;
@@ -1641,7 +1727,8 @@ static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases
: pvt->csels[0].csbases;
- debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam);
+ edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
+ ctrl, dbam);
edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
@@ -1659,12 +1746,12 @@ static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
DBAM_DIMM(dimm, dbam));
amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
- dimm * 2, size0 << factor,
- dimm * 2 + 1, size1 << factor);
+ dimm * 2, size0,
+ dimm * 2 + 1, size1);
}
}
-static struct amd64_family_type amd64_family_types[] = {
+static struct amd64_family_type family_types[] = {
[K8_CPUS] = {
.ctl_name = "K8",
.f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
@@ -1698,25 +1785,41 @@ static struct amd64_family_type amd64_family_types[] = {
.read_dct_pci_cfg = f15_read_dct_pci_cfg,
}
},
+ [F15_M30H_CPUS] = {
+ .ctl_name = "F15h_M30h",
+ .f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
+ .f3_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F3,
+ .ops = {
+ .early_channel_count = f1x_early_channel_count,
+ .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
+ .dbam_to_cs = f16_dbam_to_chip_select,
+ .read_dct_pci_cfg = f15_read_dct_pci_cfg,
+ }
+ },
+ [F16_CPUS] = {
+ .ctl_name = "F16h",
+ .f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
+ .f3_id = PCI_DEVICE_ID_AMD_16H_NB_F3,
+ .ops = {
+ .early_channel_count = f1x_early_channel_count,
+ .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
+ .dbam_to_cs = f16_dbam_to_chip_select,
+ .read_dct_pci_cfg = f10_read_dct_pci_cfg,
+ }
+ },
+ [F16_M30H_CPUS] = {
+ .ctl_name = "F16h_M30h",
+ .f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
+ .f3_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F3,
+ .ops = {
+ .early_channel_count = f1x_early_channel_count,
+ .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
+ .dbam_to_cs = f16_dbam_to_chip_select,
+ .read_dct_pci_cfg = f10_read_dct_pci_cfg,
+ }
+ },
};
-static struct pci_dev *pci_get_related_function(unsigned int vendor,
- unsigned int device,
- struct pci_dev *related)
-{
- struct pci_dev *dev = NULL;
-
- dev = pci_get_device(vendor, device, dev);
- while (dev) {
- if ((dev->bus->number == related->bus->number) &&
- (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
- break;
- dev = pci_get_device(vendor, device, dev);
- }
-
- return dev;
-}
-
/*
* These are tables of eigenvectors (one per line) which can be used for the
* construction of the syndrome tables. The modified syndrome search algorithm
@@ -1724,7 +1827,7 @@ static struct pci_dev *pci_get_related_function(unsigned int vendor,
*
* Algorithm courtesy of Ross LaFetra from AMD.
*/
-static u16 x4_vectors[] = {
+static const u16 x4_vectors[] = {
0x2f57, 0x1afe, 0x66cc, 0xdd88,
0x11eb, 0x3396, 0x7f4c, 0xeac8,
0x0001, 0x0002, 0x0004, 0x0008,
@@ -1763,7 +1866,7 @@ static u16 x4_vectors[] = {
0x19a9, 0x2efe, 0xb5cc, 0x6f88,
};
-static u16 x8_vectors[] = {
+static const u16 x8_vectors[] = {
0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
@@ -1785,7 +1888,7 @@ static u16 x8_vectors[] = {
0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
};
-static int decode_syndrome(u16 syndrome, u16 *vectors, unsigned num_vecs,
+static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
unsigned v_dim)
{
unsigned int i, err_sym;
@@ -1817,7 +1920,7 @@ static int decode_syndrome(u16 syndrome, u16 *vectors, unsigned num_vecs,
}
}
- debugf0("syndrome(%x) not found\n", syndrome);
+ edac_dbg(0, "syndrome(%x) not found\n", syndrome);
return -1;
}
@@ -1881,82 +1984,56 @@ static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
}
-/*
- * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR
- * ADDRESS and process.
- */
-static void amd64_handle_ce(struct mem_ctl_info *mci, struct mce *m)
-{
- struct amd64_pvt *pvt = mci->pvt_info;
- u64 sys_addr;
- u16 syndrome;
-
- /* Ensure that the Error Address is VALID */
- if (!(m->status & MCI_STATUS_ADDRV)) {
- amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
- edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
- return;
- }
-
- sys_addr = get_error_address(m);
- syndrome = extract_syndrome(m->status);
-
- amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr);
-
- pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, syndrome);
-}
-
-/* Handle any Un-correctable Errors (UEs) */
-static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
+static void __log_bus_error(struct mem_ctl_info *mci, struct err_info *err,
+ u8 ecc_type)
{
- struct mem_ctl_info *log_mci, *src_mci = NULL;
- int csrow;
- u64 sys_addr;
- u32 page, offset;
-
- log_mci = mci;
+ enum hw_event_mc_err_type err_type;
+ const char *string;
- if (!(m->status & MCI_STATUS_ADDRV)) {
- amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
- edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
+ if (ecc_type == 2)
+ err_type = HW_EVENT_ERR_CORRECTED;
+ else if (ecc_type == 1)
+ err_type = HW_EVENT_ERR_UNCORRECTED;
+ else {
+ WARN(1, "Something is rotten in the state of Denmark.\n");
return;
}
- sys_addr = get_error_address(m);
-
- /*
- * Find out which node the error address belongs to. This may be
- * different from the node that detected the error.
- */
- src_mci = find_mc_by_sys_addr(mci, sys_addr);
- if (!src_mci) {
- amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n",
- (unsigned long)sys_addr);
- edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
- return;
+ switch (err->err_code) {
+ case DECODE_OK:
+ string = "";
+ break;
+ case ERR_NODE:
+ string = "Failed to map error addr to a node";
+ break;
+ case ERR_CSROW:
+ string = "Failed to map error addr to a csrow";
+ break;
+ case ERR_CHANNEL:
+ string = "unknown syndrome - possible error reporting race";
+ break;
+ default:
+ string = "WTF error";
+ break;
}
- log_mci = src_mci;
-
- csrow = sys_addr_to_csrow(log_mci, sys_addr);
- if (csrow < 0) {
- amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n",
- (unsigned long)sys_addr);
- edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
- } else {
- error_address_to_page_and_offset(sys_addr, &page, &offset);
- edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR);
- }
+ edac_mc_handle_error(err_type, mci, 1,
+ err->page, err->offset, err->syndrome,
+ err->csrow, err->channel, -1,
+ string, "");
}
-static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
- struct mce *m)
+static inline void decode_bus_error(int node_id, struct mce *m)
{
- u16 ec = EC(m->status);
- u8 xec = XEC(m->status, 0x1f);
+ struct mem_ctl_info *mci = mcis[node_id];
+ struct amd64_pvt *pvt = mci->pvt_info;
u8 ecc_type = (m->status >> 45) & 0x3;
+ u8 xec = XEC(m->status, 0x1f);
+ u16 ec = EC(m->status);
+ u64 sys_addr;
+ struct err_info err;
- /* Bail early out if this was an 'observed' error */
+ /* Bail out early if this was an 'observed' error */
if (PP(ec) == NBSL_PP_OBS)
return;
@@ -1964,15 +2041,16 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
if (xec && xec != F10_NBSL_EXT_ERR_ECC)
return;
+ memset(&err, 0, sizeof(err));
+
+ sys_addr = get_error_address(pvt, m);
+
if (ecc_type == 2)
- amd64_handle_ce(mci, m);
- else if (ecc_type == 1)
- amd64_handle_ue(mci, m);
-}
+ err.syndrome = extract_syndrome(m->status);
-void amd64_decode_bus_error(int node_id, struct mce *m)
-{
- __amd64_decode_bus_error(mcis[node_id], m);
+ pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
+
+ __log_bus_error(mci, &err, ecc_type);
}
/*
@@ -2002,9 +2080,9 @@ static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id)
return -ENODEV;
}
- debugf1("F1: %s\n", pci_name(pvt->F1));
- debugf1("F2: %s\n", pci_name(pvt->F2));
- debugf1("F3: %s\n", pci_name(pvt->F3));
+ edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
+ edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
+ edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
return 0;
}
@@ -2021,25 +2099,24 @@ static void free_mc_sibling_devs(struct amd64_pvt *pvt)
*/
static void read_mc_regs(struct amd64_pvt *pvt)
{
- struct cpuinfo_x86 *c = &boot_cpu_data;
+ unsigned range;
u64 msr_val;
u32 tmp;
- unsigned range;
/*
* Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
* those are Read-As-Zero
*/
rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
- debugf0(" TOP_MEM: 0x%016llx\n", pvt->top_mem);
+ edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
/* check first whether TOP_MEM2 is enabled */
rdmsrl(MSR_K8_SYSCFG, msr_val);
if (msr_val & (1U << 21)) {
rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
- debugf0(" TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
+ edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
} else
- debugf0(" TOP_MEM2 disabled.\n");
+ edac_dbg(0, " TOP_MEM2 disabled\n");
amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
@@ -2055,17 +2132,17 @@ static void read_mc_regs(struct amd64_pvt *pvt)
if (!rw)
continue;
- debugf1(" DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
- range,
- get_dram_base(pvt, range),
- get_dram_limit(pvt, range));
+ edac_dbg(1, " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
+ range,
+ get_dram_base(pvt, range),
+ get_dram_limit(pvt, range));
- debugf1(" IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
- dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
- (rw & 0x1) ? "R" : "-",
- (rw & 0x2) ? "W" : "-",
- dram_intlv_sel(pvt, range),
- dram_dst_node(pvt, range));
+ edac_dbg(1, " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
+ dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
+ (rw & 0x1) ? "R" : "-",
+ (rw & 0x2) ? "W" : "-",
+ dram_intlv_sel(pvt, range),
+ dram_dst_node(pvt, range));
}
read_dct_base_mask(pvt);
@@ -2085,12 +2162,14 @@ static void read_mc_regs(struct amd64_pvt *pvt)
pvt->ecc_sym_sz = 4;
- if (c->x86 >= 0x10) {
+ if (pvt->fam >= 0x10) {
amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
- amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1);
+ if (pvt->fam != 0x16)
+ /* F16h has only DCT0 */
+ amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1);
/* F10h, revD and later can do x8 ECC too */
- if ((c->x86 > 0x10 || c->x86_model > 7) && tmp & BIT(25))
+ if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
pvt->ecc_sym_sz = 8;
}
dump_misc_regs(pvt);
@@ -2130,9 +2209,11 @@ static void read_mc_regs(struct amd64_pvt *pvt)
* encompasses
*
*/
-static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
+static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
{
u32 cs_mode, nr_pages;
+ u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
+
/*
* The math on this doesn't look right on the surface because x/2*4 can
@@ -2141,19 +2222,13 @@ static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
* number of bits to shift the DBAM register to extract the proper CSROW
* field.
*/
- cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
+ cs_mode = DBAM_DIMM(csrow_nr / 2, dbam);
nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT);
- /*
- * If dual channel then double the memory size of single channel.
- * Channel count is 1 or 2
- */
- nr_pages <<= (pvt->channel_count - 1);
-
- debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
- debugf0(" nr_pages= %u channel-count = %d\n",
- nr_pages, pvt->channel_count);
+ edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
+ csrow_nr, dct, cs_mode);
+ edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
return nr_pages;
}
@@ -2164,73 +2239,80 @@ static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
*/
static int init_csrows(struct mem_ctl_info *mci)
{
- struct csrow_info *csrow;
struct amd64_pvt *pvt = mci->pvt_info;
- u64 input_addr_min, input_addr_max, sys_addr, base, mask;
+ struct csrow_info *csrow;
+ struct dimm_info *dimm;
+ enum edac_type edac_mode;
+ enum mem_type mtype;
+ int i, j, empty = 1;
+ int nr_pages = 0;
u32 val;
- int i, empty = 1;
amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
pvt->nbcfg = val;
- debugf0("node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
- pvt->mc_node_id, val,
- !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
+ edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
+ pvt->mc_node_id, val,
+ !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
+ /*
+ * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
+ */
for_each_chip_select(i, 0, pvt) {
- csrow = &mci->csrows[i];
+ bool row_dct0 = !!csrow_enabled(i, 0, pvt);
+ bool row_dct1 = false;
+
+ if (pvt->fam != 0xf)
+ row_dct1 = !!csrow_enabled(i, 1, pvt);
- if (!csrow_enabled(i, 0, pvt)) {
- debugf1("----CSROW %d EMPTY for node %d\n", i,
- pvt->mc_node_id);
+ if (!row_dct0 && !row_dct1)
continue;
+
+ csrow = mci->csrows[i];
+ empty = 0;
+
+ edac_dbg(1, "MC node: %d, csrow: %d\n",
+ pvt->mc_node_id, i);
+
+ if (row_dct0) {
+ nr_pages = get_csrow_nr_pages(pvt, 0, i);
+ csrow->channels[0]->dimm->nr_pages = nr_pages;
}
- debugf1("----CSROW %d VALID for MC node %d\n",
- i, pvt->mc_node_id);
+ /* K8 has only one DCT */
+ if (pvt->fam != 0xf && row_dct1) {
+ int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i);
- empty = 0;
- csrow->nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
- find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
- sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
- csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
- sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
- csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
-
- get_cs_base_and_mask(pvt, i, 0, &base, &mask);
- csrow->page_mask = ~mask;
- /* 8 bytes of resolution */
-
- csrow->mtype = amd64_determine_memory_type(pvt, i);
-
- debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i);
- debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n",
- (unsigned long)input_addr_min,
- (unsigned long)input_addr_max);
- debugf1(" sys_addr: 0x%lx page_mask: 0x%lx\n",
- (unsigned long)sys_addr, csrow->page_mask);
- debugf1(" nr_pages: %u first_page: 0x%lx "
- "last_page: 0x%lx\n",
- (unsigned)csrow->nr_pages,
- csrow->first_page, csrow->last_page);
+ csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
+ nr_pages += row_dct1_pages;
+ }
+
+ mtype = determine_memory_type(pvt, i);
+
+ edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
/*
* determine whether CHIPKILL or JUST ECC or NO ECC is operating
*/
if (pvt->nbcfg & NBCFG_ECC_ENABLE)
- csrow->edac_mode =
- (pvt->nbcfg & NBCFG_CHIPKILL) ?
- EDAC_S4ECD4ED : EDAC_SECDED;
+ edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL) ?
+ EDAC_S4ECD4ED : EDAC_SECDED;
else
- csrow->edac_mode = EDAC_NONE;
+ edac_mode = EDAC_NONE;
+
+ for (j = 0; j < pvt->channel_count; j++) {
+ dimm = csrow->channels[j]->dimm;
+ dimm->mtype = mtype;
+ dimm->edac_mode = edac_mode;
+ }
}
return empty;
}
/* get all cores on this DCT */
-static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, unsigned nid)
+static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
{
int cpu;
@@ -2240,7 +2322,7 @@ static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, unsigned nid)
}
/* check MCG_CTL on all the cpus on this node */
-static bool amd64_nb_mce_bank_enabled_on_node(unsigned nid)
+static bool nb_mce_bank_enabled_on_node(u16 nid)
{
cpumask_var_t mask;
int cpu, nbe;
@@ -2259,9 +2341,9 @@ static bool amd64_nb_mce_bank_enabled_on_node(unsigned nid)
struct msr *reg = per_cpu_ptr(msrs, cpu);
nbe = reg->l & MSR_MCGCTL_NBE;
- debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
- cpu, reg->q,
- (nbe ? "enabled" : "disabled"));
+ edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
+ cpu, reg->q,
+ (nbe ? "enabled" : "disabled"));
if (!nbe)
goto out;
@@ -2273,7 +2355,7 @@ out:
return ret;
}
-static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
+static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
{
cpumask_var_t cmask;
int cpu;
@@ -2311,7 +2393,7 @@ static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
return 0;
}
-static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
+static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
struct pci_dev *F3)
{
bool ret = true;
@@ -2332,8 +2414,8 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
amd64_read_pci_cfg(F3, NBCFG, &value);
- debugf0("1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
- nid, value, !!(value & NBCFG_ECC_ENABLE));
+ edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
+ nid, value, !!(value & NBCFG_ECC_ENABLE));
if (!(value & NBCFG_ECC_ENABLE)) {
amd64_warn("DRAM ECC disabled on this node, enabling...\n");
@@ -2357,13 +2439,13 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
s->flags.nb_ecc_prev = 1;
}
- debugf0("2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
- nid, value, !!(value & NBCFG_ECC_ENABLE));
+ edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
+ nid, value, !!(value & NBCFG_ECC_ENABLE));
return ret;
}
-static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid,
+static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
struct pci_dev *F3)
{
u32 value, mask = 0x3; /* UECC/CECC enable */
@@ -2402,7 +2484,7 @@ static const char *ecc_msg =
"'ecc_enable_override'.\n"
" (Note that use of the override may cause unknown side effects.)\n";
-static bool ecc_enabled(struct pci_dev *F3, u8 nid)
+static bool ecc_enabled(struct pci_dev *F3, u16 nid)
{
u32 value;
u8 ecc_en = 0;
@@ -2413,7 +2495,7 @@ static bool ecc_enabled(struct pci_dev *F3, u8 nid)
ecc_en = !!(value & NBCFG_ECC_ENABLE);
amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
- nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid);
+ nb_mce_en = nb_mce_bank_enabled_on_node(nid);
if (!nb_mce_en)
amd64_notice("NB MCE bank disabled, set MSR "
"0x%08x[4] on node %d to enable.\n",
@@ -2426,26 +2508,32 @@ static bool ecc_enabled(struct pci_dev *F3, u8 nid)
return true;
}
-struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) +
- ARRAY_SIZE(amd64_inj_attrs) +
- 1];
+static int set_mc_sysfs_attrs(struct mem_ctl_info *mci)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ int rc;
-struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } };
+ rc = amd64_create_sysfs_dbg_files(mci);
+ if (rc < 0)
+ return rc;
-static void set_mc_sysfs_attrs(struct mem_ctl_info *mci)
-{
- unsigned int i = 0, j = 0;
+ if (pvt->fam >= 0x10) {
+ rc = amd64_create_sysfs_inject_files(mci);
+ if (rc < 0)
+ return rc;
+ }
- for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++)
- sysfs_attrs[i] = amd64_dbg_attrs[i];
+ return 0;
+}
- if (boot_cpu_data.x86 >= 0x10)
- for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++)
- sysfs_attrs[i] = amd64_inj_attrs[j];
+static void del_mc_sysfs_attrs(struct mem_ctl_info *mci)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
- sysfs_attrs[i] = terminator;
+ amd64_remove_sysfs_dbg_files(mci);
- mci->mc_driver_sysfs_attributes = sysfs_attrs;
+ if (pvt->fam >= 0x10)
+ amd64_remove_sysfs_inject_files(mci);
}
static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
@@ -2462,7 +2550,7 @@ static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
if (pvt->nbcap & NBCAP_CHIPKILL)
mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
- mci->edac_cap = amd64_determine_edac_cap(pvt);
+ mci->edac_cap = determine_edac_cap(pvt);
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = EDAC_AMD64_VERSION;
mci->ctl_name = fam->ctl_name;
@@ -2470,32 +2558,52 @@ static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
mci->ctl_page_to_phys = NULL;
/* memory scrubber interface */
- mci->set_sdram_scrub_rate = amd64_set_scrub_rate;
- mci->get_sdram_scrub_rate = amd64_get_scrub_rate;
+ mci->set_sdram_scrub_rate = set_scrub_rate;
+ mci->get_sdram_scrub_rate = get_scrub_rate;
}
/*
* returns a pointer to the family descriptor on success, NULL otherwise.
*/
-static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
+static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
{
- u8 fam = boot_cpu_data.x86;
struct amd64_family_type *fam_type = NULL;
- switch (fam) {
+ pvt->ext_model = boot_cpu_data.x86_model >> 4;
+ pvt->stepping = boot_cpu_data.x86_mask;
+ pvt->model = boot_cpu_data.x86_model;
+ pvt->fam = boot_cpu_data.x86;
+
+ switch (pvt->fam) {
case 0xf:
- fam_type = &amd64_family_types[K8_CPUS];
- pvt->ops = &amd64_family_types[K8_CPUS].ops;
+ fam_type = &family_types[K8_CPUS];
+ pvt->ops = &family_types[K8_CPUS].ops;
break;
case 0x10:
- fam_type = &amd64_family_types[F10_CPUS];
- pvt->ops = &amd64_family_types[F10_CPUS].ops;
+ fam_type = &family_types[F10_CPUS];
+ pvt->ops = &family_types[F10_CPUS].ops;
break;
case 0x15:
- fam_type = &amd64_family_types[F15_CPUS];
- pvt->ops = &amd64_family_types[F15_CPUS].ops;
+ if (pvt->model == 0x30) {
+ fam_type = &family_types[F15_M30H_CPUS];
+ pvt->ops = &family_types[F15_M30H_CPUS].ops;
+ break;
+ }
+
+ fam_type = &family_types[F15_CPUS];
+ pvt->ops = &family_types[F15_CPUS].ops;
+ break;
+
+ case 0x16:
+ if (pvt->model == 0x30) {
+ fam_type = &family_types[F16_M30H_CPUS];
+ pvt->ops = &family_types[F16_M30H_CPUS].ops;
+ break;
+ }
+ fam_type = &family_types[F16_CPUS];
+ pvt->ops = &family_types[F16_CPUS].ops;
break;
default:
@@ -2503,23 +2611,22 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
return NULL;
}
- pvt->ext_model = boot_cpu_data.x86_model >> 4;
-
amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
- (fam == 0xf ?
+ (pvt->fam == 0xf ?
(pvt->ext_model >= K8_REV_F ? "revF or later "
: "revE or earlier ")
: ""), pvt->mc_node_id);
return fam_type;
}
-static int amd64_init_one_instance(struct pci_dev *F2)
+static int init_one_instance(struct pci_dev *F2)
{
struct amd64_pvt *pvt = NULL;
struct amd64_family_type *fam_type = NULL;
struct mem_ctl_info *mci = NULL;
+ struct edac_mc_layer layers[2];
int err = 0, ret;
- u8 nid = get_node_id(F2);
+ u16 nid = amd_get_node_id(F2);
ret = -ENOMEM;
pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
@@ -2530,7 +2637,7 @@ static int amd64_init_one_instance(struct pci_dev *F2)
pvt->F2 = F2;
ret = -EINVAL;
- fam_type = amd64_per_family_init(pvt);
+ fam_type = per_family_init(pvt);
if (!fam_type)
goto err_free;
@@ -2552,31 +2659,46 @@ static int amd64_init_one_instance(struct pci_dev *F2)
goto err_siblings;
ret = -ENOMEM;
- mci = edac_mc_alloc(0, pvt->csels[0].b_cnt, pvt->channel_count, nid);
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = pvt->csels[0].b_cnt;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+
+ /*
+ * Always allocate two channels since we can have setups with DIMMs on
+ * only one channel. Also, this simplifies handling later for the price
+ * of a couple of KBs tops.
+ */
+ layers[1].size = 2;
+ layers[1].is_virt_csrow = false;
+
+ mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
if (!mci)
goto err_siblings;
mci->pvt_info = pvt;
- mci->dev = &pvt->F2->dev;
+ mci->pdev = &pvt->F2->dev;
setup_mci_misc_attrs(mci, fam_type);
if (init_csrows(mci))
mci->edac_cap = EDAC_FLAG_NONE;
- set_mc_sysfs_attrs(mci);
-
ret = -ENODEV;
if (edac_mc_add_mc(mci)) {
- debugf1("failed edac_mc_add_mc()\n");
+ edac_dbg(1, "failed edac_mc_add_mc()\n");
goto err_add_mc;
}
+ if (set_mc_sysfs_attrs(mci)) {
+ edac_dbg(1, "failed edac_mc_add_mc()\n");
+ goto err_add_sysfs;
+ }
/* register stuff with EDAC MCE */
if (report_gart_errors)
amd_report_gart_errors(true);
- amd_register_ecc_decoder(amd64_decode_bus_error);
+ amd_register_ecc_decoder(decode_bus_error);
mcis[nid] = mci;
@@ -2584,6 +2706,8 @@ static int amd64_init_one_instance(struct pci_dev *F2)
return 0;
+err_add_sysfs:
+ edac_mc_del_mc(mci->pdev);
err_add_mc:
edac_mc_free(mci);
@@ -2597,17 +2721,17 @@ err_ret:
return ret;
}
-static int __devinit amd64_probe_one_instance(struct pci_dev *pdev,
- const struct pci_device_id *mc_type)
+static int probe_one_instance(struct pci_dev *pdev,
+ const struct pci_device_id *mc_type)
{
- u8 nid = get_node_id(pdev);
+ u16 nid = amd_get_node_id(pdev);
struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
struct ecc_settings *s;
int ret = 0;
ret = pci_enable_device(pdev);
if (ret < 0) {
- debugf0("ret=%d\n", ret);
+ edac_dbg(0, "ret=%d\n", ret);
return -EIO;
}
@@ -2630,7 +2754,7 @@ static int __devinit amd64_probe_one_instance(struct pci_dev *pdev,
goto err_enable;
}
- ret = amd64_init_one_instance(pdev);
+ ret = init_one_instance(pdev);
if (ret < 0) {
amd64_err("Error probing instance: %d\n", nid);
restore_ecc_error_reporting(s, nid, F3);
@@ -2646,14 +2770,18 @@ err_out:
return ret;
}
-static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
+static void remove_one_instance(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
struct amd64_pvt *pvt;
- u8 nid = get_node_id(pdev);
+ u16 nid = amd_get_node_id(pdev);
struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
struct ecc_settings *s = ecc_stngs[nid];
+ mci = find_mci_by_dev(&pdev->dev);
+ WARN_ON(!mci);
+
+ del_mc_sysfs_attrs(mci);
/* Remove from EDAC CORE tracking list */
mci = edac_mc_del_mc(&pdev->dev);
if (!mci)
@@ -2667,7 +2795,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
/* unregister from EDAC MCE */
amd_report_gart_errors(false);
- amd_unregister_ecc_decoder(amd64_decode_bus_error);
+ amd_unregister_ecc_decoder(decode_bus_error);
kfree(ecc_stngs[nid]);
ecc_stngs[nid] = NULL;
@@ -2685,7 +2813,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
* PCI core identifies what devices are on a system during boot, and then
* inquiry this table to see if this driver is for a given device found.
*/
-static const struct pci_device_id amd64_pci_table[] __devinitdata = {
+static const struct pci_device_id amd64_pci_table[] = {
{
.vendor = PCI_VENDOR_ID_AMD,
.device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
@@ -2710,6 +2838,30 @@ static const struct pci_device_id amd64_pci_table[] __devinitdata = {
.class = 0,
.class_mask = 0,
},
+ {
+ .vendor = PCI_VENDOR_ID_AMD,
+ .device = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = 0,
+ .class_mask = 0,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_AMD,
+ .device = PCI_DEVICE_ID_AMD_16H_NB_F2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = 0,
+ .class_mask = 0,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_AMD,
+ .device = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = 0,
+ .class_mask = 0,
+ },
{0, }
};
@@ -2717,8 +2869,8 @@ MODULE_DEVICE_TABLE(pci, amd64_pci_table);
static struct pci_driver amd64_pci_driver = {
.name = EDAC_MOD_STR,
- .probe = amd64_probe_one_instance,
- .remove = __devexit_p(amd64_remove_one_instance),
+ .probe = probe_one_instance,
+ .remove = remove_one_instance,
.id_table = amd64_pci_table,
};
@@ -2727,23 +2879,18 @@ static void setup_pci_device(void)
struct mem_ctl_info *mci;
struct amd64_pvt *pvt;
- if (amd64_ctl_pci)
+ if (pci_ctl)
return;
mci = mcis[0];
- if (mci) {
-
- pvt = mci->pvt_info;
- amd64_ctl_pci =
- edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
-
- if (!amd64_ctl_pci) {
- pr_warning("%s(): Unable to create PCI control\n",
- __func__);
+ if (!mci)
+ return;
- pr_warning("%s(): PCI error report via EDAC not set\n",
- __func__);
- }
+ pvt = mci->pvt_info;
+ pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
+ if (!pci_ctl) {
+ pr_warn("%s(): Unable to create PCI control\n", __func__);
+ pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
}
}
@@ -2799,8 +2946,8 @@ err_ret:
static void __exit amd64_edac_exit(void)
{
- if (amd64_ctl_pci)
- edac_pci_release_generic_ctl(amd64_ctl_pci);
+ if (pci_ctl)
+ edac_pci_release_generic_ctl(pci_ctl);
pci_unregister_driver(&amd64_pci_driver);