Lines Matching refs:mvdev

128 static bool is_index_valid(struct mlx5_vdpa_dev *mvdev, u16 idx)  in is_index_valid()  argument
130 if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_MQ))) { in is_index_valid()
131 if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) in is_index_valid()
137 return idx <= mvdev->max_idx; in is_index_valid()
142 static int setup_driver(struct mlx5_vdpa_dev *mvdev);
150 mlx5_vdpa_info(mvdev, "%s\n", #_feature); \
156 mlx5_vdpa_info(mvdev, "%s\n", #_status); \
160 static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev) in mlx5_vdpa_is_little_endian() argument
163 (mvdev->actual_features & BIT_ULL(VIRTIO_F_VERSION_1)); in mlx5_vdpa_is_little_endian()
166 static u16 mlx5vdpa16_to_cpu(struct mlx5_vdpa_dev *mvdev, __virtio16 val) in mlx5vdpa16_to_cpu() argument
168 return __virtio16_to_cpu(mlx5_vdpa_is_little_endian(mvdev), val); in mlx5vdpa16_to_cpu()
171 static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val) in cpu_to_mlx5vdpa16() argument
173 return __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), val); in cpu_to_mlx5vdpa16()
176 static u16 ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev) in ctrl_vq_idx() argument
178 if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_MQ))) in ctrl_vq_idx()
181 return mvdev->max_vqs; in ctrl_vq_idx()
184 static bool is_ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev, u16 idx) in is_ctrl_vq_idx() argument
186 return idx == ctrl_vq_idx(mvdev); in is_ctrl_vq_idx()
189 static void print_status(struct mlx5_vdpa_dev *mvdev, u8 status, bool set) in print_status() argument
192 mlx5_vdpa_warn(mvdev, "Warning: there are invalid status bits 0x%x\n", in print_status()
198 mlx5_vdpa_info(mvdev, "driver status %s", set ? "set" : "get"); in print_status()
200 mlx5_vdpa_info(mvdev, "driver resets the device\n"); in print_status()
212 static void print_features(struct mlx5_vdpa_dev *mvdev, u64 features, bool set) in print_features() argument
215 mlx5_vdpa_warn(mvdev, "There are invalid feature bits 0x%llx\n", in print_features()
221 mlx5_vdpa_info(mvdev, "driver %s feature bits:\n", set ? "sets" : "reads"); in print_features()
223 mlx5_vdpa_info(mvdev, "all feature bits are cleared\n"); in print_features()
263 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in create_tis() local
270 err = mlx5_vdpa_create_tis(mvdev, in, &ndev->res.tisn); in create_tis()
272 mlx5_vdpa_warn(mvdev, "create TIS (%d)\n", err); in create_tis()
279 mlx5_vdpa_destroy_tis(&ndev->mvdev, ndev->res.tisn); in destroy_tis()
292 err = mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, nent * MLX5_VDPA_CQE_SIZE, frag_buf, in cq_frag_buf_alloc()
293 ndev->mvdev.mdev->priv.numa_node); in cq_frag_buf_alloc()
309 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, size, frag_buf, in umem_frag_buf_alloc()
310 ndev->mvdev.mdev->priv.numa_node); in umem_frag_buf_alloc()
315 mlx5_frag_buf_free(ndev->mvdev.mdev, &buf->frag_buf); in cq_frag_buf_free()
361 MLX5_SET(create_qp_in, in, uid, ndev->mvdev.res.uid); in qp_prepare()
375 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn); in qp_prepare()
377 MLX5_SET(qpc, qpc, uar_page, ndev->mvdev.res.uar->index); in qp_prepare()
389 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, in rq_buf_alloc()
391 ndev->mvdev.mdev->priv.numa_node); in rq_buf_alloc()
396 mlx5_frag_buf_free(ndev->mvdev.mdev, &vqp->frag_buf); in rq_buf_free()
402 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in qp_create()
415 err = mlx5_db_alloc(ndev->mvdev.mdev, &vqp->db); in qp_create()
431 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn); in qp_create()
441 vqp->mqp.uid = ndev->mvdev.res.uid; in qp_create()
451 mlx5_db_free(ndev->mvdev.mdev, &vqp->db); in qp_create()
465 MLX5_SET(destroy_qp_in, in, uid, ndev->mvdev.res.uid); in qp_destroy()
466 if (mlx5_cmd_exec_in(ndev->mvdev.mdev, destroy_qp, in)) in qp_destroy()
467 mlx5_vdpa_warn(&ndev->mvdev, "destroy qp 0x%x\n", vqp->mqp.qpn); in qp_destroy()
469 mlx5_db_free(ndev->mvdev.mdev, &vqp->db); in qp_destroy()
512 void __iomem *uar_page = ndev->mvdev.res.uar->map; in mlx5_vdpa_cq_comp()
538 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in cq_create()
539 void __iomem *uar_page = ndev->mvdev.res.uar->map; in cq_create()
571 MLX5_SET(create_cq_in, in, uid, ndev->mvdev.res.uid); in cq_create()
587 MLX5_SET(cqc, cqc, uar_page, ndev->mvdev.res.uar->index); in cq_create()
608 mlx5_db_free(ndev->mvdev.mdev, &vcq->db); in cq_create()
615 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in cq_destroy()
619 mlx5_vdpa_warn(&ndev->mvdev, "destroy CQ 0x%x\n", vcq->mcq.cqn); in cq_destroy()
623 mlx5_db_free(ndev->mvdev.mdev, &vcq->db); in cq_destroy()
630 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in read_umem_params()
645 mlx5_vdpa_warn(&ndev->mvdev, in read_umem_params()
695 mlx5_frag_buf_free(ndev->mvdev.mdev, &umem->frag_buf); in umem_frag_buf_free()
722 MLX5_SET(create_umem_in, in, uid, ndev->mvdev.res.uid); in create_umem()
730 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in create_umem()
732 mlx5_vdpa_warn(&ndev->mvdev, "create umem(%d)\n", err); in create_umem()
768 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) in umem_destroy()
805 type_mask = MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, virtio_queue_type); in get_queue_type()
845 static bool counters_supported(const struct mlx5_vdpa_dev *mvdev) in counters_supported() argument
847 return MLX5_CAP_GEN_64(mvdev->mdev, general_obj_types) & in counters_supported()
851 static bool msix_mode_supported(struct mlx5_vdpa_dev *mvdev) in msix_mode_supported() argument
853 return MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, event_mode) & in msix_mode_supported()
855 pci_msix_can_alloc_dyn(mvdev->mdev->pdev); in msix_mode_supported()
879 mlx_features = get_features(ndev->mvdev.actual_features); in create_virtqueue()
884 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in create_virtqueue()
910 !!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_F_VERSION_1))); in create_virtqueue()
914 MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, ndev->mvdev.mr.mkey); in create_virtqueue()
921 MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn); in create_virtqueue()
922 if (counters_supported(&ndev->mvdev)) in create_virtqueue()
925 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in create_virtqueue()
950 MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.uid, ndev->mvdev.res.uid); in destroy_virtqueue()
953 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) { in destroy_virtqueue()
954 mlx5_vdpa_warn(&ndev->mvdev, "destroy virtqueue 0x%x\n", mvq->virtq_id); in destroy_virtqueue()
987 MLX5_SET(qp_2rst_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
999 MLX5_SET(rst2init_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
1016 MLX5_SET(init2rtr_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
1034 MLX5_SET(rtr2rts_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
1078 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, outlen); in modify_qp()
1139 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in query_virtqueue()
1140 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, outlen); in query_virtqueue()
1195 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in modify_virtqueue()
1201 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in modify_virtqueue()
1216 if (!counters_supported(&ndev->mvdev)) in counter_set_alloc()
1223 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in counter_set_alloc()
1225 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out)); in counter_set_alloc()
1239 if (!counters_supported(&ndev->mvdev)) in counter_set_dealloc()
1244 MLX5_SET(destroy_virtio_q_counters_in, in, hdr.uid, ndev->mvdev.res.uid); in counter_set_dealloc()
1246 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) in counter_set_dealloc()
1247 mlx5_vdpa_warn(&ndev->mvdev, "dealloc counter set 0x%x\n", mvq->counter_set_id); in counter_set_dealloc()
1272 dev_name(&ndev->mvdev.vdev.dev), mvq->index); in alloc_vector()
1339 mlx5_vdpa_warn(&ndev->mvdev, "failed to modify to ready vq idx %d(%d)\n", in setup_vq()
1373 mlx5_vdpa_warn(&ndev->mvdev, "modify to suspend failed\n"); in suspend_vq()
1376 mlx5_vdpa_warn(&ndev->mvdev, "failed to query virtqueue\n"); in suspend_vq()
1387 for (i = 0; i < ndev->mvdev.max_vqs; i++) in suspend_vqs()
1422 MLX5_SET(create_rqt_in, in, uid, ndev->mvdev.res.uid); in create_rqt()
1432 err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn); in create_rqt()
1457 MLX5_SET(modify_rqt_in, in, uid, ndev->mvdev.res.uid); in modify_rqt()
1467 err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn); in modify_rqt()
1477 mlx5_vdpa_destroy_rqt(&ndev->mvdev, ndev->res.rqtn); in destroy_rqt()
1500 MLX5_SET(create_tir_in, in, uid, ndev->mvdev.res.uid); in create_tir()
1517 err = mlx5_vdpa_create_tir(&ndev->mvdev, in, &ndev->res.tirn); in create_tir()
1529 mlx5_vdpa_destroy_tir(&ndev->mvdev, ndev->res.tirn); in destroy_tir()
1549 node->ucast_counter.counter = mlx5_fc_create(ndev->mvdev.mdev, false); in add_steering_counters()
1553 node->mcast_counter.counter = mlx5_fc_create(ndev->mvdev.mdev, false); in add_steering_counters()
1564 mlx5_fc_destroy(ndev->mvdev.mdev, node->ucast_counter.counter); in add_steering_counters()
1575 mlx5_fc_destroy(ndev->mvdev.mdev, node->mcast_counter.counter); in remove_steering_counters()
1576 mlx5_fc_destroy(ndev->mvdev.mdev, node->ucast_counter.counter); in remove_steering_counters()
1605 if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN)) { in mlx5_vdpa_add_mac_vlan_rules()
1764 ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS); in setup_steering()
1766 mlx5_vdpa_warn(&ndev->mvdev, "failed to get flow namespace\n"); in setup_steering()
1772 mlx5_vdpa_warn(&ndev->mvdev, "failed to create flow table\n"); in setup_steering()
1796 static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd) in handle_ctrl_mac() argument
1798 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in handle_ctrl_mac()
1799 struct mlx5_control_vq *cvq = &mvdev->cvq; in handle_ctrl_mac()
1805 pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev)); in handle_ctrl_mac()
1822 mlx5_vdpa_warn(mvdev, "failed to delete old MAC %pM from MPFS table\n", in handle_ctrl_mac()
1829 mlx5_vdpa_warn(mvdev, "failed to insert new MAC %pM into MPFS table\n", in handle_ctrl_mac()
1846 mlx5_vdpa_warn(mvdev, "failed to insert forward rules, try to restore\n"); in handle_ctrl_mac()
1850 mlx5_vdpa_warn(mvdev, "restore mac failed: Original MAC is zero\n"); in handle_ctrl_mac()
1858 mlx5_vdpa_warn(mvdev, "restore mac failed: delete MAC %pM from MPFS table failed\n", in handle_ctrl_mac()
1863 mlx5_vdpa_warn(mvdev, "restore mac failed: insert old MAC %pM into MPFS table failed\n", in handle_ctrl_mac()
1870 mlx5_vdpa_warn(mvdev, "restore forward rules failed: insert forward rules failed\n"); in handle_ctrl_mac()
1885 static int change_num_qps(struct mlx5_vdpa_dev *mvdev, int newqps) in change_num_qps() argument
1887 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in change_num_qps()
1923 static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd) in handle_ctrl_mq() argument
1925 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in handle_ctrl_mq()
1927 struct mlx5_control_vq *cvq = &mvdev->cvq; in handle_ctrl_mq()
1943 if (!MLX5_FEATURE(mvdev, VIRTIO_NET_F_MQ)) in handle_ctrl_mq()
1950 newqps = mlx5vdpa16_to_cpu(mvdev, mq.virtqueue_pairs); in handle_ctrl_mq()
1960 if (!change_num_qps(mvdev, newqps)) in handle_ctrl_mq()
1971 static virtio_net_ctrl_ack handle_ctrl_vlan(struct mlx5_vdpa_dev *mvdev, u8 cmd) in handle_ctrl_vlan() argument
1973 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in handle_ctrl_vlan()
1975 struct mlx5_control_vq *cvq = &mvdev->cvq; in handle_ctrl_vlan()
1980 if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN))) in handle_ctrl_vlan()
1989 id = mlx5vdpa16_to_cpu(mvdev, vlan); in handle_ctrl_vlan()
2000 id = mlx5vdpa16_to_cpu(mvdev, vlan); in handle_ctrl_vlan()
2016 struct mlx5_vdpa_dev *mvdev; in mlx5_cvq_kick_handler() local
2023 mvdev = wqent->mvdev; in mlx5_cvq_kick_handler()
2024 ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_cvq_kick_handler()
2025 cvq = &mvdev->cvq; in mlx5_cvq_kick_handler()
2029 if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) in mlx5_cvq_kick_handler()
2032 if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) in mlx5_cvq_kick_handler()
2051 status = handle_ctrl_mac(mvdev, ctrl.cmd); in mlx5_cvq_kick_handler()
2054 status = handle_ctrl_mq(mvdev, ctrl.cmd); in mlx5_cvq_kick_handler()
2057 status = handle_ctrl_vlan(mvdev, ctrl.cmd); in mlx5_cvq_kick_handler()
2075 queue_work(mvdev->wq, &wqent->work); in mlx5_cvq_kick_handler()
2085 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_kick_vq() local
2086 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_kick_vq()
2089 if (!is_index_valid(mvdev, idx)) in mlx5_vdpa_kick_vq()
2092 if (unlikely(is_ctrl_vq_idx(mvdev, idx))) { in mlx5_vdpa_kick_vq()
2093 if (!mvdev->wq || !mvdev->cvq.ready) in mlx5_vdpa_kick_vq()
2096 queue_work(mvdev->wq, &ndev->cvq_ent.work); in mlx5_vdpa_kick_vq()
2104 iowrite16(idx, ndev->mvdev.res.kick_addr); in mlx5_vdpa_kick_vq()
2110 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_vq_address() local
2111 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_address()
2114 if (!is_index_valid(mvdev, idx)) in mlx5_vdpa_set_vq_address()
2117 if (is_ctrl_vq_idx(mvdev, idx)) { in mlx5_vdpa_set_vq_address()
2118 mvdev->cvq.desc_addr = desc_area; in mlx5_vdpa_set_vq_address()
2119 mvdev->cvq.device_addr = device_area; in mlx5_vdpa_set_vq_address()
2120 mvdev->cvq.driver_addr = driver_area; in mlx5_vdpa_set_vq_address()
2133 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_vq_num() local
2134 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_num()
2137 if (!is_index_valid(mvdev, idx)) in mlx5_vdpa_set_vq_num()
2140 if (is_ctrl_vq_idx(mvdev, idx)) { in mlx5_vdpa_set_vq_num()
2141 struct mlx5_control_vq *cvq = &mvdev->cvq; in mlx5_vdpa_set_vq_num()
2153 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_vq_cb() local
2154 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_cb()
2157 if (is_ctrl_vq_idx(mvdev, idx)) in mlx5_vdpa_set_vq_cb()
2158 mvdev->cvq.event_cb = *cb; in mlx5_vdpa_set_vq_cb()
2171 static void set_cvq_ready(struct mlx5_vdpa_dev *mvdev, bool ready) in set_cvq_ready() argument
2173 struct mlx5_control_vq *cvq = &mvdev->cvq; in set_cvq_ready()
2184 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_vq_ready() local
2185 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_ready()
2189 if (!mvdev->actual_features) in mlx5_vdpa_set_vq_ready()
2192 if (!is_index_valid(mvdev, idx)) in mlx5_vdpa_set_vq_ready()
2195 if (is_ctrl_vq_idx(mvdev, idx)) { in mlx5_vdpa_set_vq_ready()
2196 set_cvq_ready(mvdev, ready); in mlx5_vdpa_set_vq_ready()
2206 mlx5_vdpa_warn(mvdev, "modify VQ %d to ready failed (%d)\n", idx, err); in mlx5_vdpa_set_vq_ready()
2217 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_vq_ready() local
2218 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_vq_ready()
2220 if (!is_index_valid(mvdev, idx)) in mlx5_vdpa_get_vq_ready()
2223 if (is_ctrl_vq_idx(mvdev, idx)) in mlx5_vdpa_get_vq_ready()
2224 return mvdev->cvq.ready; in mlx5_vdpa_get_vq_ready()
2232 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_vq_state() local
2233 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_state()
2236 if (!is_index_valid(mvdev, idx)) in mlx5_vdpa_set_vq_state()
2239 if (is_ctrl_vq_idx(mvdev, idx)) { in mlx5_vdpa_set_vq_state()
2240 mvdev->cvq.vring.last_avail_idx = state->split.avail_index; in mlx5_vdpa_set_vq_state()
2246 mlx5_vdpa_warn(mvdev, "can't modify available index\n"); in mlx5_vdpa_set_vq_state()
2257 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_vq_state() local
2258 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_vq_state()
2263 if (!is_index_valid(mvdev, idx)) in mlx5_vdpa_get_vq_state()
2266 if (is_ctrl_vq_idx(mvdev, idx)) { in mlx5_vdpa_get_vq_state()
2267 state->split.avail_index = mvdev->cvq.vring.last_avail_idx; in mlx5_vdpa_get_vq_state()
2287 mlx5_vdpa_warn(mvdev, "failed to query virtqueue\n"); in mlx5_vdpa_get_vq_state()
2301 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_vq_group() local
2303 if (is_ctrl_vq_idx(mvdev, idx)) in mlx5_vdpa_get_vq_group()
2358 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_device_features() local
2359 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_device_features()
2361 print_features(mvdev, ndev->mvdev.mlx_features, false); in mlx5_vdpa_get_device_features()
2362 return ndev->mvdev.mlx_features; in mlx5_vdpa_get_device_features()
2365 static int verify_driver_features(struct mlx5_vdpa_dev *mvdev, u64 features) in verify_driver_features() argument
2387 static int setup_virtqueues(struct mlx5_vdpa_dev *mvdev) in setup_virtqueues() argument
2389 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in setup_virtqueues()
2393 for (i = 0; i < mvdev->max_vqs; i++) { in setup_virtqueues()
2413 for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) { in teardown_virtqueues()
2422 static void update_cvq_info(struct mlx5_vdpa_dev *mvdev) in update_cvq_info() argument
2424 if (MLX5_FEATURE(mvdev, VIRTIO_NET_F_CTRL_VQ)) { in update_cvq_info()
2425 if (MLX5_FEATURE(mvdev, VIRTIO_NET_F_MQ)) { in update_cvq_info()
2427 mvdev->max_idx = mvdev->max_vqs; in update_cvq_info()
2432 mvdev->max_idx = 2; in update_cvq_info()
2436 mvdev->max_idx = 1; in update_cvq_info()
2459 static bool get_link_state(struct mlx5_vdpa_dev *mvdev) in get_link_state() argument
2461 if (query_vport_state(mvdev->mdev, MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT, 0) == in get_link_state()
2471 struct mlx5_vdpa_dev *mvdev; in update_carrier() local
2475 mvdev = wqent->mvdev; in update_carrier()
2476 ndev = to_mlx5_vdpa_ndev(mvdev); in update_carrier()
2477 if (get_link_state(mvdev)) in update_carrier()
2478 ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP); in update_carrier()
2480 ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP); in update_carrier()
2496 wqent->mvdev = &ndev->mvdev; in queue_link_work()
2498 queue_work(ndev->mvdev.wq, &wqent->work); in queue_link_work()
2527 if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_STATUS))) in register_link_notifier()
2531 mlx5_notifier_register(ndev->mvdev.mdev, &ndev->nb); in register_link_notifier()
2542 mlx5_notifier_unregister(ndev->mvdev.mdev, &ndev->nb); in unregister_link_notifier()
2543 if (ndev->mvdev.wq) in unregister_link_notifier()
2544 flush_workqueue(ndev->mvdev.wq); in unregister_link_notifier()
2549 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_driver_features() local
2550 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_driver_features()
2553 print_features(mvdev, features, true); in mlx5_vdpa_set_driver_features()
2555 err = verify_driver_features(mvdev, features); in mlx5_vdpa_set_driver_features()
2559 ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features; in mlx5_vdpa_set_driver_features()
2560 if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ)) in mlx5_vdpa_set_driver_features()
2561 ndev->rqt_size = mlx5vdpa16_to_cpu(mvdev, ndev->config.max_virtqueue_pairs); in mlx5_vdpa_set_driver_features()
2575 update_cvq_info(mvdev); in mlx5_vdpa_set_driver_features()
2581 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_config_cb() local
2582 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_config_cb()
2605 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_status() local
2606 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_status()
2608 print_status(mvdev, ndev->mvdev.status, false); in mlx5_vdpa_get_status()
2609 return ndev->mvdev.status; in mlx5_vdpa_get_status()
2640 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in save_channels_info()
2651 for (i = 0; i < ndev->mvdev.max_vqs; i++) in mlx5_clear_vqs()
2663 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in restore_channels_info()
2680 static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, in mlx5_vdpa_change_map() argument
2683 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_change_map()
2692 mlx5_vdpa_destroy_mr_asid(mvdev, asid); in mlx5_vdpa_change_map()
2693 err = mlx5_vdpa_create_mr(mvdev, iotlb, asid); in mlx5_vdpa_change_map()
2697 if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK) || mvdev->suspended) in mlx5_vdpa_change_map()
2701 err = setup_driver(mvdev); in mlx5_vdpa_change_map()
2708 mlx5_vdpa_destroy_mr_asid(mvdev, asid); in mlx5_vdpa_change_map()
2714 static int setup_driver(struct mlx5_vdpa_dev *mvdev) in setup_driver() argument
2716 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in setup_driver()
2722 mlx5_vdpa_warn(mvdev, "setup driver called for already setup driver\n"); in setup_driver()
2732 err = setup_virtqueues(mvdev); in setup_driver()
2734 mlx5_vdpa_warn(mvdev, "setup_virtqueues\n"); in setup_driver()
2740 mlx5_vdpa_warn(mvdev, "create_rqt\n"); in setup_driver()
2746 mlx5_vdpa_warn(mvdev, "create_tir\n"); in setup_driver()
2752 mlx5_vdpa_warn(mvdev, "setup_steering\n"); in setup_driver()
2792 for (i = 0; i < ndev->mvdev.max_vqs; i++) in clear_vqs_ready()
2795 ndev->mvdev.cvq.ready = false; in clear_vqs_ready()
2798 static int setup_cvq_vring(struct mlx5_vdpa_dev *mvdev) in setup_cvq_vring() argument
2800 struct mlx5_control_vq *cvq = &mvdev->cvq; in setup_cvq_vring()
2803 if (mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)) { in setup_cvq_vring()
2806 err = vringh_init_iotlb(&cvq->vring, mvdev->actual_features, in setup_cvq_vring()
2820 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_status() local
2821 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_status()
2824 print_status(mvdev, status, true); in mlx5_vdpa_set_status()
2828 if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) { in mlx5_vdpa_set_status()
2830 err = setup_cvq_vring(mvdev); in mlx5_vdpa_set_status()
2832 mlx5_vdpa_warn(mvdev, "failed to setup control VQ vring\n"); in mlx5_vdpa_set_status()
2836 err = setup_driver(mvdev); in mlx5_vdpa_set_status()
2838 mlx5_vdpa_warn(mvdev, "failed to setup driver\n"); in mlx5_vdpa_set_status()
2842 mlx5_vdpa_warn(mvdev, "did not expect DRIVER_OK to be cleared\n"); in mlx5_vdpa_set_status()
2847 ndev->mvdev.status = status; in mlx5_vdpa_set_status()
2854 mlx5_vdpa_destroy_mr(&ndev->mvdev); in mlx5_vdpa_set_status()
2855 ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED; in mlx5_vdpa_set_status()
2860 static void init_group_to_asid_map(struct mlx5_vdpa_dev *mvdev) in init_group_to_asid_map() argument
2866 mvdev->group2asid[i] = 0; in init_group_to_asid_map()
2871 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_reset() local
2872 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_reset()
2874 print_status(mvdev, 0, true); in mlx5_vdpa_reset()
2875 mlx5_vdpa_info(mvdev, "performing device reset\n"); in mlx5_vdpa_reset()
2881 mlx5_vdpa_destroy_mr(&ndev->mvdev); in mlx5_vdpa_reset()
2882 ndev->mvdev.status = 0; in mlx5_vdpa_reset()
2883 ndev->mvdev.suspended = false; in mlx5_vdpa_reset()
2885 ndev->mvdev.cvq.received_desc = 0; in mlx5_vdpa_reset()
2886 ndev->mvdev.cvq.completed_desc = 0; in mlx5_vdpa_reset()
2887 memset(ndev->event_cbs, 0, sizeof(*ndev->event_cbs) * (mvdev->max_vqs + 1)); in mlx5_vdpa_reset()
2888 ndev->mvdev.actual_features = 0; in mlx5_vdpa_reset()
2889 init_group_to_asid_map(mvdev); in mlx5_vdpa_reset()
2890 ++mvdev->generation; in mlx5_vdpa_reset()
2892 if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) { in mlx5_vdpa_reset()
2893 if (mlx5_vdpa_create_mr(mvdev, NULL, 0)) in mlx5_vdpa_reset()
2894 mlx5_vdpa_warn(mvdev, "create MR failed\n"); in mlx5_vdpa_reset()
2909 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_config() local
2910 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_config()
2924 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_generation() local
2926 return mvdev->generation; in mlx5_vdpa_get_generation()
2929 static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb, in set_map_data() argument
2935 err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map, asid); in set_map_data()
2937 mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err); in set_map_data()
2942 err = mlx5_vdpa_change_map(mvdev, iotlb, asid); in set_map_data()
2950 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_map() local
2951 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_map()
2955 err = set_map_data(mvdev, iotlb, asid); in mlx5_vdpa_set_map()
2962 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_get_vq_dma_dev() local
2964 if (is_ctrl_vq_idx(mvdev, idx)) in mlx5_get_vq_dma_dev()
2967 return mvdev->vdev.dma_dev; in mlx5_get_vq_dma_dev()
2975 if (!msix_mode_supported(&ndev->mvdev)) in free_irqs()
2984 pci_msix_free_irq(ndev->mvdev.mdev->pdev, ent->map); in free_irqs()
2991 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_free() local
2995 ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_free()
2998 mlx5_vdpa_destroy_mr(mvdev); in mlx5_vdpa_free()
3000 pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev)); in mlx5_vdpa_free()
3003 mlx5_vdpa_free_resources(&ndev->mvdev); in mlx5_vdpa_free()
3011 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_get_vq_notification() local
3016 if (!is_index_valid(mvdev, idx) || is_ctrl_vq_idx(mvdev, idx)) in mlx5_get_vq_notification()
3023 if (MLX5_CAP_GEN(mvdev->mdev, log_min_sf_size) + 12 < PAGE_SHIFT) in mlx5_get_vq_notification()
3026 ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_get_vq_notification()
3027 addr = (phys_addr_t)ndev->mvdev.res.phys_kick_addr; in mlx5_get_vq_notification()
3035 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_get_vq_irq() local
3036 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_get_vq_irq()
3039 if (!is_index_valid(mvdev, idx)) in mlx5_get_vq_irq()
3042 if (is_ctrl_vq_idx(mvdev, idx)) in mlx5_get_vq_irq()
3054 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_driver_features() local
3056 return mvdev->actual_features; in mlx5_vdpa_get_driver_features()
3068 if (!counters_supported(&ndev->mvdev)) in counter_set_query()
3078 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in counter_set_query()
3081 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out)); in counter_set_query()
3095 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_vendor_vq_stats() local
3096 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_vendor_vq_stats()
3104 if (!is_index_valid(mvdev, idx)) { in mlx5_vdpa_get_vendor_vq_stats()
3110 if (idx == ctrl_vq_idx(mvdev)) { in mlx5_vdpa_get_vendor_vq_stats()
3111 cvq = &mvdev->cvq; in mlx5_vdpa_get_vendor_vq_stats()
3146 static void mlx5_vdpa_cvq_suspend(struct mlx5_vdpa_dev *mvdev) in mlx5_vdpa_cvq_suspend() argument
3150 if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) in mlx5_vdpa_cvq_suspend()
3153 cvq = &mvdev->cvq; in mlx5_vdpa_cvq_suspend()
3159 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_suspend() local
3160 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_suspend()
3164 mlx5_vdpa_info(mvdev, "suspending device\n"); in mlx5_vdpa_suspend()
3172 mlx5_vdpa_cvq_suspend(mvdev); in mlx5_vdpa_suspend()
3173 mvdev->suspended = true; in mlx5_vdpa_suspend()
3181 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_set_group_asid() local
3186 mvdev->group2asid[group] = asid; in mlx5_set_group_asid()
3244 mlx5_vdpa_warn(&ndev->mvdev, "resources already allocated\n"); in alloc_resources()
3248 err = mlx5_vdpa_alloc_transport_domain(&ndev->mvdev, &res->tdn); in alloc_resources()
3261 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn); in alloc_resources()
3273 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn); in free_resources()
3282 for (i = 0; i < ndev->mvdev.max_vqs; ++i) { in init_mvqs()
3290 for (; i < ndev->mvdev.max_vqs; i++) { in init_mvqs()
3331 if (!msix_mode_supported(&ndev->mvdev)) in allocate_irqs()
3334 if (!ndev->mvdev.mdev->pdev) in allocate_irqs()
3337 ndev->irqp.entries = kcalloc(ndev->mvdev.max_vqs, sizeof(*ndev->irqp.entries), GFP_KERNEL); in allocate_irqs()
3342 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in allocate_irqs()
3345 dev_name(&ndev->mvdev.vdev.dev), i); in allocate_irqs()
3346 ent->map = pci_msix_alloc_irq_at(ndev->mvdev.mdev->pdev, MSI_ANY_INDEX, NULL); in allocate_irqs()
3360 struct mlx5_vdpa_dev *mvdev; in mlx5_vdpa_dev_add() local
3415 ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops, in mlx5_vdpa_dev_add()
3420 ndev->mvdev.max_vqs = max_vqs; in mlx5_vdpa_dev_add()
3421 mvdev = &ndev->mvdev; in mlx5_vdpa_dev_add()
3422 mvdev->mdev = mdev; in mlx5_vdpa_dev_add()
3447 ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, mtu); in mlx5_vdpa_dev_add()
3451 if (get_link_state(mvdev)) in mlx5_vdpa_dev_add()
3452 ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP); in mlx5_vdpa_dev_add()
3454 ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP); in mlx5_vdpa_dev_add()
3482 mlx5_vdpa_warn(&ndev->mvdev, in mlx5_vdpa_dev_add()
3489 config->max_virtqueue_pairs = cpu_to_mlx5vdpa16(mvdev, max_vqs / 2); in mlx5_vdpa_dev_add()
3491 ndev->mvdev.mlx_features = device_features; in mlx5_vdpa_dev_add()
3492 mvdev->vdev.dma_dev = &mdev->pdev->dev; in mlx5_vdpa_dev_add()
3493 err = mlx5_vdpa_alloc_resources(&ndev->mvdev); in mlx5_vdpa_dev_add()
3497 if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) { in mlx5_vdpa_dev_add()
3498 err = mlx5_vdpa_create_mr(mvdev, NULL, 0); in mlx5_vdpa_dev_add()
3507 ndev->cvq_ent.mvdev = mvdev; in mlx5_vdpa_dev_add()
3509 mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_wq"); in mlx5_vdpa_dev_add()
3510 if (!mvdev->wq) { in mlx5_vdpa_dev_add()
3515 mvdev->vdev.mdev = &mgtdev->mgtdev; in mlx5_vdpa_dev_add()
3516 err = _vdpa_register_device(&mvdev->vdev, max_vqs + 1); in mlx5_vdpa_dev_add()
3524 destroy_workqueue(mvdev->wq); in mlx5_vdpa_dev_add()
3528 mlx5_vdpa_destroy_mr(mvdev); in mlx5_vdpa_dev_add()
3530 mlx5_vdpa_free_resources(&ndev->mvdev); in mlx5_vdpa_dev_add()
3535 put_device(&mvdev->vdev.dev); in mlx5_vdpa_dev_add()
3542 struct mlx5_vdpa_dev *mvdev = to_mvdev(dev); in mlx5_vdpa_dev_del() local
3543 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_dev_del()
3548 wq = mvdev->wq; in mlx5_vdpa_dev_del()
3549 mvdev->wq = NULL; in mlx5_vdpa_dev_del()