Lines Matching full:dct
100 * Select DCT to which PCI cfg accesses are routed
102 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
108 reg |= dct;
114 * Depending on the family, F2 DCT reads need special handling:
116 * K8: has a single DCT only and no address offsets >= 0x100
118 * F10h: each DCT has its own set of regs
122 * F16h: has only 1 DCT
124 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
126 static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
131 if (dct || offset >= 0x100)
136 if (dct) {
152 * We should select which DCT we access using F1x10C[DctCfgSel]
154 dct = (dct && pvt->model == 0x30) ? 3 : dct;
155 f15h_select_dct(pvt, dct);
159 if (dct)
369 * compute the CS base address of the @csrow on the DRAM controller @dct.
372 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
379 csbase = pvt->csels[dct].csbases[csrow];
380 csmask = pvt->csels[dct].csmasks[csrow];
391 csbase = pvt->csels[dct].csbases[csrow];
392 csmask = pvt->csels[dct].csmasks[csrow >> 1];
407 csbase = pvt->csels[dct].csbases[csrow];
408 csmask = pvt->csels[dct].csmasks[csrow >> 1];
428 #define for_each_chip_select(i, dct, pvt) \
429 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
431 #define chip_select_base(i, dct, pvt) \
432 pvt->csels[dct].csbases[i]
434 #define for_each_chip_select_mask(i, dct, pvt) \
435 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
1397 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1433 * It's assumed all LRDIMMs in a DCT are going to be of
1448 edac_dbg(1, " DCT 128bit mode width: %s\n",
1876 * 'Rank' value on a DCT. But this is not the common case. So,
2113 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
2116 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
2217 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
2220 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
2231 * F15h supports only 64bit DCT interfaces
2233 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
2242 static int f15_m60h_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
2246 u32 dcsm = pvt->csels[dct].csmasks[cs_mask_nr];
2275 static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
2301 edac_dbg(0, " Address range split per DCT: %s\n",
2318 * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
2349 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
2369 /* return DCT select function: 0=DCT0, 1=DCT1 */
2395 /* Convert the sys_addr to the normalized DCT address */
2410 * DRAM address space on this DCT is hoisted above 4Gb &&
2432 * remove dram base to normalize to DCT address
2447 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
2451 if (online_spare_swap_done(pvt, dct) &&
2452 csrow == online_spare_bad_dramcs(pvt, dct)) {
2454 for_each_chip_select(tmp_cs, dct, pvt) {
2455 if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
2465 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
2472 static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
2486 edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
2488 for_each_chip_select(csrow, dct, pvt) {
2489 if (!csrow_enabled(csrow, dct, pvt))
2492 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
2507 cs_found = f10_process_possible_spare(pvt, dct, csrow);
2621 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
2667 /* Verify sys_addr is within DCT Range. */
2676 /* Verify number of dct's that participate in channel interleaving. */
2694 /* Get normalized DCT addr */
2733 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
2738 * there is support for 4 DCT's, but only 2 are currently functional.
2782 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
3116 * F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
3128 /* Reserve the DCT Device */
3291 static u32 dct_get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
3293 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
3299 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
3303 csrow_nr, dct, cs_mode);
3309 static u32 umc_get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
3314 cs_mode = umc_get_cs_mode(csrow_nr >> 1, dct, pvt);
3316 nr_pages = umc_addr_mask_to_cs_size(pvt, dct, cs_mode, csrow_nr);
3320 csrow_nr_orig, dct, cs_mode);
3411 /* K8 has only one DCT */
3437 /* get all cores on this DCT */
3812 static u32 gpu_get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
3817 nr_pages = gpu_addr_mask_to_cs_size(pvt, dct, cs_mode, csrow_nr);
3820 edac_dbg(0, "csrow: %d, channel: %d\n", csrow_nr, dct);
4214 int cs = 0, dct = 0;
4216 for (dct = 0; dct < pvt->max_mcs; dct++) {
4217 for_each_chip_select(cs, dct, pvt)
4218 cs_enabled |= csrow_enabled(cs, dct, pvt);