Lines Matching refs:rnet
921 struct rdma_dev_net *rnet) in add_one_compat_dev() argument
934 if (net_eq(read_pnet(&rnet->net), in add_one_compat_dev()
944 cdev = xa_load(&device->compat_devs, rnet->id); in add_one_compat_dev()
949 ret = xa_reserve(&device->compat_devs, rnet->id, GFP_KERNEL); in add_one_compat_dev()
960 rdma_init_coredev(cdev, device, read_pnet(&rnet->net)); in add_one_compat_dev()
973 ret = xa_err(xa_store(&device->compat_devs, rnet->id, in add_one_compat_dev()
988 xa_release(&device->compat_devs, rnet->id); in add_one_compat_dev()
1019 struct rdma_dev_net *rnet; in add_compat_devs() local
1026 xa_for_each (&rdma_nets, index, rnet) { in add_compat_devs()
1027 ret = add_one_compat_dev(device, rnet); in add_compat_devs()
1058 struct rdma_dev_net *rnet; in add_all_compat_devs() local
1071 xa_for_each (&rdma_nets, net_index, rnet) { in add_all_compat_devs()
1072 ret = add_one_compat_dev(dev, rnet); in add_all_compat_devs()
1086 struct rdma_dev_net *rnet; in rdma_compatdev_set() local
1099 xa_for_each (&rdma_nets, index, rnet) { in rdma_compatdev_set()
1118 struct rdma_dev_net *rnet = rdma_net_to_dev_net(net); in rdma_dev_exit_net() local
1127 ret = xa_err(xa_store(&rdma_nets, rnet->id, NULL, GFP_KERNEL)); in rdma_dev_exit_net()
1140 remove_one_compat_dev(dev, rnet->id); in rdma_dev_exit_net()
1152 rdma_nl_net_exit(rnet); in rdma_dev_exit_net()
1153 xa_erase(&rdma_nets, rnet->id); in rdma_dev_exit_net()
1158 struct rdma_dev_net *rnet = rdma_net_to_dev_net(net); in rdma_dev_init_net() local
1163 write_pnet(&rnet->net, net); in rdma_dev_init_net()
1165 ret = rdma_nl_net_init(rnet); in rdma_dev_init_net()
1173 ret = xa_alloc(&rdma_nets, &rnet->id, rnet, xa_limit_32b, GFP_KERNEL); in rdma_dev_init_net()
1175 rdma_nl_net_exit(rnet); in rdma_dev_init_net()
1185 ret = add_one_compat_dev(dev, rnet); in rdma_dev_init_net()