@@ -767,17 +750,25 @@ static void read_dct_base_mask(struct amd64_pvt *pvt)
int reg1 = DCSB1 + (cs * 4);
u32 *base0 = &pvt->csels[0].csbases[cs];
u32 *base1 = &pvt->csels[1].csbases[cs];
+ u8 dct = 0;
- if (!amd64_read_dct_pci_cfg(pvt, reg0, base0))
+ if (!amd64_read_dct_pci_cfg(pvt, dct, reg0, base0))
edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
cs, *base0, reg0);
- if (pvt->fam == 0xf || dct_ganging_enabled(pvt))
+ if (pvt->fam == 0xf) {
continue;
-
- if (!amd64_read_dct_pci_cfg(pvt, reg1, base1))
- edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
- cs, *base1, reg1);
+ } else if (pvt->fam == 0x10 && !dct_ganging_enabled(pvt)) {
+ if (!amd64_read_pci_cfg(pvt->F2, reg1, base1))
+ edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
+ cs, *base1, reg1);
+ } else {
+ dct = ((pvt->fam == 0x15)
+ && (pvt->model == 0x30)) ? 3 : 1;
+ if (!amd64_read_dct_pci_cfg(pvt, dct, reg0, base1))
+ edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
+ cs, *base1, reg0);
+ }
}
for_each_chip_select_mask(cs, 0, pvt) {
@@ -785,17 +776,25 @@ static void read_dct_base_mask(struct amd64_pvt *pvt)
int reg1 = DCSM1 + (cs * 4);
u32 *mask0 = &pvt->csels[0].csmasks[cs];
u32 *mask1 = &pvt->csels[1].csmasks[cs];
+ u8 dct = 0;
- if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0))
+ if (!amd64_read_dct_pci_cfg(pvt, dct, reg0, mask0))
edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
cs, *mask0, reg0);
- if (pvt->fam == 0xf || dct_ganging_enabled(pvt))
+ if (pvt->fam == 0xf) {
continue;
-
- if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1))
- edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
- cs, *mask1, reg1);
+ } else if (pvt->fam == 0x10 && !dct_ganging_enabled(pvt)) {
+ if (!amd64_read_pci_cfg(pvt->F2, reg1, mask1))
+ edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
+ cs, *mask1, reg1);
+ } else {
+ dct = ((pvt->fam == 0x15)
+ && (pvt->model == 0x30)) ? 3 : 1;
+ if (!amd64_read_dct_pci_cfg(pvt, dct, reg0, mask1))
+ edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
+ cs, *mask1, reg0);
+ }