Lines Matching refs:u32

245 		u32 name, value;  in dcb_tx_queue_prio_enable()
678 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A)); in t4_nondata_intr()
807 u32 param, val = 0; in adap_config_hpfilter()
1250 u32 v, new_idx; in cxgb4_set_rspq_intr_params()
2141 u32 v1, v2, lp_count, hp_count; in cxgb4_dbfifo_count()
2213 u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8; in read_eq_indices()
2242 u32 val; in cxgb4_sync_txq_pidx()
2262 int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte) in cxgb4_read_tpte()
2264 u32 edc0_size, edc1_size, mc0_size, mc1_size, size; in cxgb4_read_tpte()
2265 u32 edc0_end, edc1_end, mc0_end, mc1_end; in cxgb4_read_tpte()
2266 u32 offset, memtype, memaddr; in cxgb4_read_tpte()
2268 u32 hma_size = 0; in cxgb4_read_tpte()
2340 u32 hi, lo; in cxgb4_read_sge_timestamp()
2404 u32 v1, v2, lp_count, hp_count; in drain_db_fifo()
2530 u32 val; in sync_txq_pidx()
2589 u32 dropped_db = t4_read_reg(adap, 0x010ac); in process_db_drop()
2693 (const u32 *)ifa, 1); in cxgb4_inet6addr_handler()
2697 (const u32 *)ifa, 1); in cxgb4_inet6addr_handler()
2713 cxgb4_clip_get(event_dev, (const u32 *)ifa, 1); in cxgb4_inet6addr_handler()
2716 cxgb4_clip_release(event_dev, (const u32 *)ifa, 1); in cxgb4_inet6addr_handler()
3293 u32 fw_pfvf, fw_class; in cxgb4_mgmt_set_vf_rate()
3417 u32 param, val; in cxgb4_mgmt_set_vf_link_state()
3491 static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate) in cxgb_set_tx_maxrate()
3498 u32 req_rate; in cxgb_set_tx_maxrate()
3952 u32 nic_win_base = t4_get_util_window(adap); in setup_memwin()
3960 u32 start; in setup_memwin_rdma()
4033 u32 param, hma_size; in adap_config_hma()
4036 u32 page_order; in adap_config_hma()
4180 u32 v; in adap_init1()
4456 u32 finiver, finicsum, cfcsum, param, val; in adap_init0_config()
4512 u32 params[7], val[7]; in adap_init0_config()
4764 u32 params[7], val[7]; in adap_init0()
4766 u32 v, port_vec; in adap_init0()
5224 adap->tids.neotids = min_t(u32, MAX_ATIDS, in adap_init0()
5655 u32 avail_qsets, avail_eth_qsets, avail_uld_qsets; in cfg_queues()
5656 u32 ncpus = num_online_cpus(); in cfg_queues()
5657 u32 niqflint, neq, num_ulds; in cfg_queues()
5659 u32 i, n10g = 0, qidx = 0; in cfg_queues()
5660 u32 q10g = 0, q1g; in cfg_queues()
5697 avail_eth_qsets = min_t(u32, avail_qsets, MAX_ETH_QSETS); in cfg_queues()
5753 i = min_t(u32, MAX_OFLD_QSETS, ncpus); in cfg_queues()
5844 static int alloc_msix_info(struct adapter *adap, u32 num_vec) in alloc_msix_info()
5906 u32 eth_need, uld_need = 0, ethofld_need = 0, mirror_need = 0; in enable_msix()
5907 u32 ethqsets = 0, ofldqsets = 0, eoqsets = 0, mirrorqsets = 0; in enable_msix()
5909 u32 i, want, need, num_vec; in enable_msix()
6215 u32 pl_rev = REV_G(t4_read_reg(adap, PL_REV_A)); in t4_get_chip_type()
6251 u32 pcie_fw; in cxgb4_iov_configure()
6301 u32 devcap2; in cxgb4_iov_configure()
6439 u32 tcp_sn) in cxgb4_ktls_dev_add()
6610 u32 whoami; in init_one()
6948 u32 v; in init_one()