Lines Matching full:dn

285 	/* DN/HN-F/CXHA */
369 static struct arm_cmn_nodeid arm_cmn_nid(const struct arm_cmn_node *dn) in arm_cmn_nid() argument
373 nid.dev = dn->id & ((1U << dn->deviceid_bits) - 1); in arm_cmn_nid()
374 nid.port = (dn->id >> dn->deviceid_bits) & ((1U << dn->portid_bits) - 1); in arm_cmn_nid()
379 const struct arm_cmn_node *dn) in arm_cmn_node_to_xp() argument
381 int id = dn->id >> (dn->portid_bits + dn->deviceid_bits); in arm_cmn_node_to_xp()
391 struct arm_cmn_node *dn; in arm_cmn_node() local
393 for (dn = cmn->dns; dn->type; dn++) in arm_cmn_node()
394 if (dn->type == type) in arm_cmn_node()
395 return dn; in arm_cmn_node()
475 struct arm_cmn_node *dn; in arm_cmn_show_logid() local
478 for (dn = cmn->dns; dn->type; dn++) { in arm_cmn_show_logid()
479 int pad = dn->logid < 10; in arm_cmn_show_logid()
481 if (dn->type == CMN_TYPE_XP) in arm_cmn_show_logid()
484 if (dn->type < CMN_TYPE_HNI) in arm_cmn_show_logid()
487 if (dn->id != id) in arm_cmn_show_logid()
490 seq_printf(s, " %*c#%-*d |", pad + 1, ' ', 3 - pad, dn->logid); in arm_cmn_show_logid()
570 struct arm_cmn_node *dn; member
579 #define for_each_hw_dn(hw, dn, i) \ argument
580 for (i = 0, dn = hw->dn; i < hw->num_dns; i++, dn++)
1378 struct arm_cmn_node *dn; in arm_cmn_read_dtm() local
1383 for_each_hw_dn(hw, dn, i) { in arm_cmn_read_dtm()
1384 if (dtm != &cmn->dtms[dn->dtm]) { in arm_cmn_read_dtm()
1385 dtm = &cmn->dtms[dn->dtm] + hw->dtm_offset; in arm_cmn_read_dtm()
1452 static int arm_cmn_set_event_sel_hi(struct arm_cmn_node *dn, in arm_cmn_set_event_sel_hi() argument
1460 if (!dn->occupid[fsel].count) { in arm_cmn_set_event_sel_hi()
1461 dn->occupid[fsel].val = occupid; in arm_cmn_set_event_sel_hi()
1463 dn->occupid[SEL_CBUSY_SNTHROTTLE_SEL].val) | in arm_cmn_set_event_sel_hi()
1465 dn->occupid[SEL_SN_HOME_SEL].val) | in arm_cmn_set_event_sel_hi()
1467 dn->occupid[SEL_HBT_LBT_SEL].val) | in arm_cmn_set_event_sel_hi()
1469 dn->occupid[SEL_CLASS_OCCUP_ID].val) | in arm_cmn_set_event_sel_hi()
1471 dn->occupid[SEL_OCCUP1ID].val); in arm_cmn_set_event_sel_hi()
1472 writel_relaxed(reg >> 32, dn->pmu_base + CMN_PMU_EVENT_SEL + 4); in arm_cmn_set_event_sel_hi()
1473 } else if (dn->occupid[fsel].val != occupid) { in arm_cmn_set_event_sel_hi()
1476 dn->occupid[fsel].count++; in arm_cmn_set_event_sel_hi()
1480 static void arm_cmn_set_event_sel_lo(struct arm_cmn_node *dn, int dtm_idx, in arm_cmn_set_event_sel_lo() argument
1484 dn->event_w[dtm_idx] = eventid; in arm_cmn_set_event_sel_lo()
1485 writeq_relaxed(le64_to_cpu(dn->event_sel_w), dn->pmu_base + CMN_PMU_EVENT_SEL); in arm_cmn_set_event_sel_lo()
1487 dn->event[dtm_idx] = eventid; in arm_cmn_set_event_sel_lo()
1488 writel_relaxed(le32_to_cpu(dn->event_sel), dn->pmu_base + CMN_PMU_EVENT_SEL); in arm_cmn_set_event_sel_lo()
1496 struct arm_cmn_node *dn; in arm_cmn_event_start() local
1509 for_each_hw_dn(hw, dn, i) { in arm_cmn_event_start()
1510 void __iomem *base = dn->pmu_base + CMN_DTM_OFFSET(hw->dtm_offset); in arm_cmn_event_start()
1515 } else for_each_hw_dn(hw, dn, i) { in arm_cmn_event_start()
1518 arm_cmn_set_event_sel_lo(dn, dtm_idx, CMN_EVENT_EVENTID(event), in arm_cmn_event_start()
1527 struct arm_cmn_node *dn; in arm_cmn_event_stop() local
1537 for_each_hw_dn(hw, dn, i) { in arm_cmn_event_stop()
1538 void __iomem *base = dn->pmu_base + CMN_DTM_OFFSET(hw->dtm_offset); in arm_cmn_event_stop()
1543 } else for_each_hw_dn(hw, dn, i) { in arm_cmn_event_stop()
1546 arm_cmn_set_event_sel_lo(dn, dtm_idx, 0, hw->wide_sel); in arm_cmn_event_stop()
1564 struct arm_cmn_node *dn; in arm_cmn_val_add_event() local
1579 for_each_hw_dn(hw, dn, i) { in arm_cmn_val_add_event()
1580 int wp_idx, dtm = dn->dtm, sel = hw->filter_sel; in arm_cmn_val_add_event()
1598 struct arm_cmn_node *dn; in arm_cmn_validate_group() local
1627 for_each_hw_dn(hw, dn, i) { in arm_cmn_validate_group()
1628 int wp_idx, wp_cmb, dtm = dn->dtm, sel = hw->filter_sel; in arm_cmn_validate_group()
1675 struct arm_cmn_node *dn; in arm_cmn_event_init() local
1715 hw->dn = arm_cmn_node(cmn, type); in arm_cmn_event_init()
1716 if (!hw->dn) in arm_cmn_event_init()
1720 for (dn = hw->dn; dn->type == type; dn++) { in arm_cmn_event_init()
1721 if (bynodeid && dn->id != nodeid) { in arm_cmn_event_init()
1722 hw->dn++; in arm_cmn_event_init()
1726 if (dn->dtc < 0) in arm_cmn_event_init()
1729 hw->dtc_idx[dn->dtc] = 0; in arm_cmn_event_init()
1750 struct arm_cmn_dtm *dtm = &cmn->dtms[hw->dn[i].dtm] + hw->dtm_offset; in arm_cmn_event_clear()
1757 hw->dn[i].occupid[hw->filter_sel].count--; in arm_cmn_event_clear()
1772 struct arm_cmn_node *dn; in arm_cmn_event_add() local
1803 for_each_hw_dn(hw, dn, i) { in arm_cmn_event_add()
1804 struct arm_cmn_dtm *dtm = &cmn->dtms[dn->dtm] + hw->dtm_offset; in arm_cmn_event_add()
1831 struct arm_cmn_nodeid nid = arm_cmn_nid(dn); in arm_cmn_event_add()
1839 if (arm_cmn_set_event_sel_hi(dn, hw->filter_sel, CMN_EVENT_OCCUPID(event))) in arm_cmn_event_add()
2025 static int arm_cmn_init_dtc(struct arm_cmn *cmn, struct arm_cmn_node *dn, int idx) in arm_cmn_init_dtc() argument
2029 dtc->base = dn->pmu_base - CMN_PMU_OFFSET; in arm_cmn_init_dtc()
2055 struct arm_cmn_node *dn, *xp; in arm_cmn_init_dtcs() local
2068 dn = arm_cmn_node(cmn, CMN_TYPE_DTC); in arm_cmn_init_dtcs()
2070 arm_cmn_node_to_xp(cmn, dn + i)->dtc = i; in arm_cmn_init_dtcs()
2073 for (dn = cmn->dns; dn->type; dn++) { in arm_cmn_init_dtcs()
2074 if (dn->type == CMN_TYPE_XP) in arm_cmn_init_dtcs()
2077 xp = arm_cmn_node_to_xp(cmn, dn); in arm_cmn_init_dtcs()
2078 dn->dtc = xp->dtc; in arm_cmn_init_dtcs()
2079 dn->dtm = xp->dtm; in arm_cmn_init_dtcs()
2081 dn->dtm += arm_cmn_nid(dn).port / 2; in arm_cmn_init_dtcs()
2083 if (dn->type == CMN_TYPE_DTC) { in arm_cmn_init_dtcs()
2084 int err = arm_cmn_init_dtc(cmn, dn, dtc_idx++); in arm_cmn_init_dtcs()
2091 if (dn->type == CMN_TYPE_RND) in arm_cmn_init_dtcs()
2092 dn->type = CMN_TYPE_RNI; in arm_cmn_init_dtcs()
2095 if (dn->type == CMN_TYPE_CCLA_RNI) in arm_cmn_init_dtcs()
2096 dn->type = CMN_TYPE_CCLA; in arm_cmn_init_dtcs()
2152 struct arm_cmn_node cfg, *dn; in arm_cmn_discover() local
2211 dn = devm_kcalloc(cmn->dev, cmn->num_dns * 2 - cmn->num_xps, in arm_cmn_discover()
2212 sizeof(*dn), GFP_KERNEL); in arm_cmn_discover()
2213 if (!dn) in arm_cmn_discover()
2225 cmn->dns = dn; in arm_cmn_discover()
2229 struct arm_cmn_node *xp = dn++; in arm_cmn_discover()
2307 arm_cmn_init_node_info(cmn, reg & CMN_CHILD_NODE_ADDR, dn); in arm_cmn_discover()
2308 dn->portid_bits = xp->portid_bits; in arm_cmn_discover()
2309 dn->deviceid_bits = xp->deviceid_bits; in arm_cmn_discover()
2311 switch (dn->type) { in arm_cmn_discover()
2314 dn++; in arm_cmn_discover()
2329 dn++; in arm_cmn_discover()
2332 dn->pmu_base += CMN_CCLA_PMU_EVENT_SEL; in arm_cmn_discover()
2333 dn++; in arm_cmn_discover()
2351 dn[1] = dn[0]; in arm_cmn_discover()
2352 dn[0].pmu_base += CMN_CCLA_PMU_EVENT_SEL; in arm_cmn_discover()
2353 dn[1].type = arm_cmn_subtype(dn->type); in arm_cmn_discover()
2354 dn += 2; in arm_cmn_discover()
2358 dev_err(cmn->dev, "invalid device node type: 0x%x\n", dn->type); in arm_cmn_discover()
2365 cmn->num_dns = dn - cmn->dns; in arm_cmn_discover()
2368 sz = (void *)(dn + 1) - (void *)cmn->dns; in arm_cmn_discover()
2369 dn = devm_krealloc(cmn->dev, cmn->dns, sz, GFP_KERNEL); in arm_cmn_discover()
2370 if (dn) in arm_cmn_discover()
2371 cmn->dns = dn; in arm_cmn_discover()