Lines Matching refs:id_priv

138 	struct rdma_id_private *id_priv;  in rdma_iw_cm_id()  local
140 id_priv = container_of(id, struct rdma_id_private, id); in rdma_iw_cm_id()
142 return id_priv->cm_id.iw; in rdma_iw_cm_id()
153 struct rdma_id_private *id_priv = in rdma_res_to_id() local
156 return &id_priv->id; in rdma_res_to_id()
355 struct rdma_id_private *id_priv; member
406 static int cma_comp_exch(struct rdma_id_private *id_priv, in cma_comp_exch() argument
419 lockdep_assert_held(&id_priv->handler_mutex); in cma_comp_exch()
421 spin_lock_irqsave(&id_priv->lock, flags); in cma_comp_exch()
422 if ((ret = (id_priv->state == comp))) in cma_comp_exch()
423 id_priv->state = exch; in cma_comp_exch()
424 spin_unlock_irqrestore(&id_priv->lock, flags); in cma_comp_exch()
438 static struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv) in cma_src_addr() argument
440 return (struct sockaddr *)&id_priv->id.route.addr.src_addr; in cma_src_addr()
443 static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv) in cma_dst_addr() argument
445 return (struct sockaddr *)&id_priv->id.route.addr.dst_addr; in cma_dst_addr()
471 struct rdma_id_private *id_priv = list_first_entry( in compare_netdev_and_ip() local
473 int ifindex_b = id_priv->id.route.addr.dev_addr.bound_dev_if; in compare_netdev_and_ip()
474 struct sockaddr *sb = cma_dst_addr(id_priv); in compare_netdev_and_ip()
562 static void cma_remove_id_from_tree(struct rdma_id_private *id_priv) in cma_remove_id_from_tree() argument
568 if (list_empty(&id_priv->id_list_entry)) in cma_remove_id_from_tree()
572 id_priv->id.route.addr.dev_addr.bound_dev_if, in cma_remove_id_from_tree()
573 cma_dst_addr(id_priv)); in cma_remove_id_from_tree()
577 list_del_init(&id_priv->id_list_entry); in cma_remove_id_from_tree()
586 static void _cma_attach_to_dev(struct rdma_id_private *id_priv, in _cma_attach_to_dev() argument
590 id_priv->cma_dev = cma_dev; in _cma_attach_to_dev()
591 id_priv->id.device = cma_dev->device; in _cma_attach_to_dev()
592 id_priv->id.route.addr.dev_addr.transport = in _cma_attach_to_dev()
594 list_add_tail(&id_priv->device_item, &cma_dev->id_list); in _cma_attach_to_dev()
596 trace_cm_id_attach(id_priv, cma_dev->device); in _cma_attach_to_dev()
599 static void cma_attach_to_dev(struct rdma_id_private *id_priv, in cma_attach_to_dev() argument
602 _cma_attach_to_dev(id_priv, cma_dev); in cma_attach_to_dev()
603 id_priv->gid_type = in cma_attach_to_dev()
604 cma_dev->default_gid_type[id_priv->id.port_num - in cma_attach_to_dev()
608 static void cma_release_dev(struct rdma_id_private *id_priv) in cma_release_dev() argument
611 list_del_init(&id_priv->device_item); in cma_release_dev()
612 cma_dev_put(id_priv->cma_dev); in cma_release_dev()
613 id_priv->cma_dev = NULL; in cma_release_dev()
614 id_priv->id.device = NULL; in cma_release_dev()
615 if (id_priv->id.route.addr.dev_addr.sgid_attr) { in cma_release_dev()
616 rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr); in cma_release_dev()
617 id_priv->id.route.addr.dev_addr.sgid_attr = NULL; in cma_release_dev()
622 static inline unsigned short cma_family(struct rdma_id_private *id_priv) in cma_family() argument
624 return id_priv->id.route.addr.src_addr.ss_family; in cma_family()
627 static int cma_set_default_qkey(struct rdma_id_private *id_priv) in cma_set_default_qkey() argument
632 switch (id_priv->id.ps) { in cma_set_default_qkey()
635 id_priv->qkey = RDMA_UDP_QKEY; in cma_set_default_qkey()
638 ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid); in cma_set_default_qkey()
639 ret = ib_sa_get_mcmember_rec(id_priv->id.device, in cma_set_default_qkey()
640 id_priv->id.port_num, &rec.mgid, in cma_set_default_qkey()
643 id_priv->qkey = be32_to_cpu(rec.qkey); in cma_set_default_qkey()
651 static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey) in cma_set_qkey() argument
654 (id_priv->qkey && (id_priv->qkey != qkey))) in cma_set_qkey()
657 id_priv->qkey = qkey; in cma_set_qkey()
686 struct rdma_id_private *id_priv) in cma_validate_port() argument
688 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_validate_port()
694 if (!rdma_dev_access_netns(device, id_priv->id.route.addr.dev_addr.net)) in cma_validate_port()
740 static void cma_bind_sgid_attr(struct rdma_id_private *id_priv, in cma_bind_sgid_attr() argument
743 WARN_ON(id_priv->id.route.addr.dev_addr.sgid_attr); in cma_bind_sgid_attr()
744 id_priv->id.route.addr.dev_addr.sgid_attr = sgid_attr; in cma_bind_sgid_attr()
756 static int cma_acquire_dev_by_src_ip(struct rdma_id_private *id_priv) in cma_acquire_dev_by_src_ip() argument
758 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_acquire_dev_by_src_ip()
767 id_priv->id.ps == RDMA_PS_IPOIB) in cma_acquire_dev_by_src_ip()
770 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, in cma_acquire_dev_by_src_ip()
783 gid_type, gidp, id_priv); in cma_acquire_dev_by_src_ip()
785 id_priv->id.port_num = port; in cma_acquire_dev_by_src_ip()
786 cma_bind_sgid_attr(id_priv, sgid_attr); in cma_acquire_dev_by_src_ip()
787 cma_attach_to_dev(id_priv, cma_dev); in cma_acquire_dev_by_src_ip()
809 static int cma_ib_acquire_dev(struct rdma_id_private *id_priv, in cma_ib_acquire_dev() argument
813 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_ib_acquire_dev()
819 id_priv->id.ps == RDMA_PS_IPOIB) in cma_ib_acquire_dev()
823 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, in cma_ib_acquire_dev()
831 gid_type, &gid, id_priv); in cma_ib_acquire_dev()
835 id_priv->id.port_num = req->port; in cma_ib_acquire_dev()
836 cma_bind_sgid_attr(id_priv, sgid_attr); in cma_ib_acquire_dev()
842 cma_attach_to_dev(id_priv, listen_id_priv->cma_dev); in cma_ib_acquire_dev()
844 rdma_restrack_add(&id_priv->res); in cma_ib_acquire_dev()
848 static int cma_iw_acquire_dev(struct rdma_id_private *id_priv, in cma_iw_acquire_dev() argument
851 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_iw_acquire_dev()
860 id_priv->id.ps == RDMA_PS_IPOIB) in cma_iw_acquire_dev()
872 gid_type, &gid, id_priv); in cma_iw_acquire_dev()
874 id_priv->id.port_num = port; in cma_iw_acquire_dev()
875 cma_bind_sgid_attr(id_priv, sgid_attr); in cma_iw_acquire_dev()
888 gid_type, &gid, id_priv); in cma_iw_acquire_dev()
890 id_priv->id.port_num = port; in cma_iw_acquire_dev()
891 cma_bind_sgid_attr(id_priv, sgid_attr); in cma_iw_acquire_dev()
900 cma_attach_to_dev(id_priv, cma_dev); in cma_iw_acquire_dev()
901 rdma_restrack_add(&id_priv->res); in cma_iw_acquire_dev()
911 static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) in cma_resolve_ib_dev() argument
923 addr = (struct sockaddr_ib *) cma_dst_addr(id_priv); in cma_resolve_ib_dev()
949 id_priv->id.port_num = p; in cma_resolve_ib_dev()
958 id_priv->id.port_num = p; in cma_resolve_ib_dev()
968 cma_attach_to_dev(id_priv, cma_dev); in cma_resolve_ib_dev()
969 rdma_restrack_add(&id_priv->res); in cma_resolve_ib_dev()
971 addr = (struct sockaddr_ib *)cma_src_addr(id_priv); in cma_resolve_ib_dev()
973 cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr); in cma_resolve_ib_dev()
977 static void cma_id_get(struct rdma_id_private *id_priv) in cma_id_get() argument
979 refcount_inc(&id_priv->refcount); in cma_id_get()
982 static void cma_id_put(struct rdma_id_private *id_priv) in cma_id_put() argument
984 if (refcount_dec_and_test(&id_priv->refcount)) in cma_id_put()
985 complete(&id_priv->comp); in cma_id_put()
993 struct rdma_id_private *id_priv; in __rdma_create_id() local
995 id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL); in __rdma_create_id()
996 if (!id_priv) in __rdma_create_id()
999 id_priv->state = RDMA_CM_IDLE; in __rdma_create_id()
1000 id_priv->id.context = context; in __rdma_create_id()
1001 id_priv->id.event_handler = event_handler; in __rdma_create_id()
1002 id_priv->id.ps = ps; in __rdma_create_id()
1003 id_priv->id.qp_type = qp_type; in __rdma_create_id()
1004 id_priv->tos_set = false; in __rdma_create_id()
1005 id_priv->timeout_set = false; in __rdma_create_id()
1006 id_priv->min_rnr_timer_set = false; in __rdma_create_id()
1007 id_priv->gid_type = IB_GID_TYPE_IB; in __rdma_create_id()
1008 spin_lock_init(&id_priv->lock); in __rdma_create_id()
1009 mutex_init(&id_priv->qp_mutex); in __rdma_create_id()
1010 init_completion(&id_priv->comp); in __rdma_create_id()
1011 refcount_set(&id_priv->refcount, 1); in __rdma_create_id()
1012 mutex_init(&id_priv->handler_mutex); in __rdma_create_id()
1013 INIT_LIST_HEAD(&id_priv->device_item); in __rdma_create_id()
1014 INIT_LIST_HEAD(&id_priv->id_list_entry); in __rdma_create_id()
1015 INIT_LIST_HEAD(&id_priv->listen_list); in __rdma_create_id()
1016 INIT_LIST_HEAD(&id_priv->mc_list); in __rdma_create_id()
1017 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); in __rdma_create_id()
1018 id_priv->id.route.addr.dev_addr.net = get_net(net); in __rdma_create_id()
1019 id_priv->seq_num &= 0x00ffffff; in __rdma_create_id()
1021 rdma_restrack_new(&id_priv->res, RDMA_RESTRACK_CM_ID); in __rdma_create_id()
1023 rdma_restrack_parent_name(&id_priv->res, &parent->res); in __rdma_create_id()
1025 return id_priv; in __rdma_create_id()
1061 static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) in cma_init_ud_qp() argument
1067 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); in cma_init_ud_qp()
1087 static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) in cma_init_conn_qp() argument
1093 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); in cma_init_conn_qp()
1103 struct rdma_id_private *id_priv; in rdma_create_qp() local
1107 id_priv = container_of(id, struct rdma_id_private, id); in rdma_create_qp()
1121 ret = cma_init_ud_qp(id_priv, qp); in rdma_create_qp()
1123 ret = cma_init_conn_qp(id_priv, qp); in rdma_create_qp()
1128 id_priv->qp_num = qp->qp_num; in rdma_create_qp()
1129 id_priv->srq = (qp->srq != NULL); in rdma_create_qp()
1130 trace_cm_qp_create(id_priv, pd, qp_init_attr, 0); in rdma_create_qp()
1135 trace_cm_qp_create(id_priv, pd, qp_init_attr, ret); in rdma_create_qp()
1142 struct rdma_id_private *id_priv; in rdma_destroy_qp() local
1144 id_priv = container_of(id, struct rdma_id_private, id); in rdma_destroy_qp()
1145 trace_cm_qp_destroy(id_priv); in rdma_destroy_qp()
1146 mutex_lock(&id_priv->qp_mutex); in rdma_destroy_qp()
1147 ib_destroy_qp(id_priv->id.qp); in rdma_destroy_qp()
1148 id_priv->id.qp = NULL; in rdma_destroy_qp()
1149 mutex_unlock(&id_priv->qp_mutex); in rdma_destroy_qp()
1153 static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, in cma_modify_qp_rtr() argument
1159 mutex_lock(&id_priv->qp_mutex); in cma_modify_qp_rtr()
1160 if (!id_priv->id.qp) { in cma_modify_qp_rtr()
1167 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); in cma_modify_qp_rtr()
1171 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); in cma_modify_qp_rtr()
1176 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); in cma_modify_qp_rtr()
1180 BUG_ON(id_priv->cma_dev->device != id_priv->id.device); in cma_modify_qp_rtr()
1184 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); in cma_modify_qp_rtr()
1186 mutex_unlock(&id_priv->qp_mutex); in cma_modify_qp_rtr()
1190 static int cma_modify_qp_rts(struct rdma_id_private *id_priv, in cma_modify_qp_rts() argument
1196 mutex_lock(&id_priv->qp_mutex); in cma_modify_qp_rts()
1197 if (!id_priv->id.qp) { in cma_modify_qp_rts()
1203 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); in cma_modify_qp_rts()
1209 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); in cma_modify_qp_rts()
1211 mutex_unlock(&id_priv->qp_mutex); in cma_modify_qp_rts()
1215 static int cma_modify_qp_err(struct rdma_id_private *id_priv) in cma_modify_qp_err() argument
1220 mutex_lock(&id_priv->qp_mutex); in cma_modify_qp_err()
1221 if (!id_priv->id.qp) { in cma_modify_qp_err()
1227 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE); in cma_modify_qp_err()
1229 mutex_unlock(&id_priv->qp_mutex); in cma_modify_qp_err()
1233 static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, in cma_ib_init_qp_attr() argument
1236 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_ib_init_qp_attr()
1240 if (rdma_cap_eth_ah(id_priv->id.device, id_priv->id.port_num)) in cma_ib_init_qp_attr()
1245 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num, in cma_ib_init_qp_attr()
1250 qp_attr->port_num = id_priv->id.port_num; in cma_ib_init_qp_attr()
1253 if (id_priv->id.qp_type == IB_QPT_UD) { in cma_ib_init_qp_attr()
1254 ret = cma_set_default_qkey(id_priv); in cma_ib_init_qp_attr()
1258 qp_attr->qkey = id_priv->qkey; in cma_ib_init_qp_attr()
1270 struct rdma_id_private *id_priv; in rdma_init_qp_attr() local
1273 id_priv = container_of(id, struct rdma_id_private, id); in rdma_init_qp_attr()
1275 if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) in rdma_init_qp_attr()
1276 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); in rdma_init_qp_attr()
1278 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, in rdma_init_qp_attr()
1282 qp_attr->rq_psn = id_priv->seq_num; in rdma_init_qp_attr()
1284 if (!id_priv->cm_id.iw) { in rdma_init_qp_attr()
1288 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, in rdma_init_qp_attr()
1290 qp_attr->port_num = id_priv->id.port_num; in rdma_init_qp_attr()
1296 if ((*qp_attr_mask & IB_QP_TIMEOUT) && id_priv->timeout_set) in rdma_init_qp_attr()
1297 qp_attr->timeout = id_priv->timeout; in rdma_init_qp_attr()
1299 if ((*qp_attr_mask & IB_QP_MIN_RNR_TIMER) && id_priv->min_rnr_timer_set) in rdma_init_qp_attr()
1300 qp_attr->min_rnr_timer = id_priv->min_rnr_timer; in rdma_init_qp_attr()
1703 static bool cma_match_private_data(struct rdma_id_private *id_priv, in cma_match_private_data() argument
1706 struct sockaddr *addr = cma_src_addr(id_priv); in cma_match_private_data()
1710 if (cma_any_addr(addr) && !id_priv->afonly) in cma_match_private_data()
1794 struct rdma_id_private *id_priv, *id_priv_dev; in cma_find_listener() local
1801 hlist_for_each_entry(id_priv, &bind_list->owners, node) { in cma_find_listener()
1802 if (cma_match_private_data(id_priv, ib_event->private_data)) { in cma_find_listener()
1803 if (id_priv->id.device == cm_id->device && in cma_find_listener()
1804 cma_match_net_dev(&id_priv->id, net_dev, req)) in cma_find_listener()
1805 return id_priv; in cma_find_listener()
1807 &id_priv->listen_list, in cma_find_listener()
1827 struct rdma_id_private *id_priv; in cma_ib_id_from_event() local
1868 id_priv = ERR_PTR(-EHOSTUNREACH); in cma_ib_id_from_event()
1875 id_priv = ERR_PTR(-EHOSTUNREACH); in cma_ib_id_from_event()
1883 id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev); in cma_ib_id_from_event()
1887 if (IS_ERR(id_priv) && *net_dev) { in cma_ib_id_from_event()
1891 return id_priv; in cma_ib_id_from_event()
1894 static inline u8 cma_user_data_offset(struct rdma_id_private *id_priv) in cma_user_data_offset() argument
1896 return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr); in cma_user_data_offset()
1899 static void cma_cancel_route(struct rdma_id_private *id_priv) in cma_cancel_route() argument
1901 if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) { in cma_cancel_route()
1902 if (id_priv->query) in cma_cancel_route()
1903 ib_sa_cancel_query(id_priv->query_id, id_priv->query); in cma_cancel_route()
1907 static void _cma_cancel_listens(struct rdma_id_private *id_priv) in _cma_cancel_listens() argument
1917 list_del_init(&id_priv->listen_any_item); in _cma_cancel_listens()
1919 while (!list_empty(&id_priv->listen_list)) { in _cma_cancel_listens()
1921 list_first_entry(&id_priv->listen_list, in _cma_cancel_listens()
1933 static void cma_cancel_listens(struct rdma_id_private *id_priv) in cma_cancel_listens() argument
1936 _cma_cancel_listens(id_priv); in cma_cancel_listens()
1940 static void cma_cancel_operation(struct rdma_id_private *id_priv, in cma_cancel_operation() argument
1953 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); in cma_cancel_operation()
1956 cma_cancel_route(id_priv); in cma_cancel_operation()
1959 if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev) in cma_cancel_operation()
1960 cma_cancel_listens(id_priv); in cma_cancel_operation()
1967 static void cma_release_port(struct rdma_id_private *id_priv) in cma_release_port() argument
1969 struct rdma_bind_list *bind_list = id_priv->bind_list; in cma_release_port()
1970 struct net *net = id_priv->id.route.addr.dev_addr.net; in cma_release_port()
1976 hlist_del(&id_priv->node); in cma_release_port()
1984 static void destroy_mc(struct rdma_id_private *id_priv, in destroy_mc() argument
1989 if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num)) in destroy_mc()
1992 if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) { in destroy_mc()
1994 &id_priv->id.route.addr.dev_addr; in destroy_mc()
2004 gid_type = id_priv->cma_dev->default_gid_type in destroy_mc()
2005 [id_priv->id.port_num - in destroy_mc()
2007 id_priv->cma_dev->device)]; in destroy_mc()
2019 static void cma_leave_mc_groups(struct rdma_id_private *id_priv) in cma_leave_mc_groups() argument
2023 while (!list_empty(&id_priv->mc_list)) { in cma_leave_mc_groups()
2024 mc = list_first_entry(&id_priv->mc_list, struct cma_multicast, in cma_leave_mc_groups()
2027 destroy_mc(id_priv, mc); in cma_leave_mc_groups()
2031 static void _destroy_id(struct rdma_id_private *id_priv, in _destroy_id() argument
2034 cma_cancel_operation(id_priv, state); in _destroy_id()
2036 rdma_restrack_del(&id_priv->res); in _destroy_id()
2037 cma_remove_id_from_tree(id_priv); in _destroy_id()
2038 if (id_priv->cma_dev) { in _destroy_id()
2039 if (rdma_cap_ib_cm(id_priv->id.device, 1)) { in _destroy_id()
2040 if (id_priv->cm_id.ib) in _destroy_id()
2041 ib_destroy_cm_id(id_priv->cm_id.ib); in _destroy_id()
2042 } else if (rdma_cap_iw_cm(id_priv->id.device, 1)) { in _destroy_id()
2043 if (id_priv->cm_id.iw) in _destroy_id()
2044 iw_destroy_cm_id(id_priv->cm_id.iw); in _destroy_id()
2046 cma_leave_mc_groups(id_priv); in _destroy_id()
2047 cma_release_dev(id_priv); in _destroy_id()
2050 cma_release_port(id_priv); in _destroy_id()
2051 cma_id_put(id_priv); in _destroy_id()
2052 wait_for_completion(&id_priv->comp); in _destroy_id()
2054 if (id_priv->internal_id) in _destroy_id()
2055 cma_id_put(id_priv->id.context); in _destroy_id()
2057 kfree(id_priv->id.route.path_rec); in _destroy_id()
2058 kfree(id_priv->id.route.path_rec_inbound); in _destroy_id()
2059 kfree(id_priv->id.route.path_rec_outbound); in _destroy_id()
2061 put_net(id_priv->id.route.addr.dev_addr.net); in _destroy_id()
2062 kfree(id_priv); in _destroy_id()
2069 static void destroy_id_handler_unlock(struct rdma_id_private *id_priv) in destroy_id_handler_unlock() argument
2075 trace_cm_id_destroy(id_priv); in destroy_id_handler_unlock()
2083 lockdep_assert_held(&id_priv->handler_mutex); in destroy_id_handler_unlock()
2084 spin_lock_irqsave(&id_priv->lock, flags); in destroy_id_handler_unlock()
2085 state = id_priv->state; in destroy_id_handler_unlock()
2086 id_priv->state = RDMA_CM_DESTROYING; in destroy_id_handler_unlock()
2087 spin_unlock_irqrestore(&id_priv->lock, flags); in destroy_id_handler_unlock()
2088 mutex_unlock(&id_priv->handler_mutex); in destroy_id_handler_unlock()
2089 _destroy_id(id_priv, state); in destroy_id_handler_unlock()
2094 struct rdma_id_private *id_priv = in rdma_destroy_id() local
2097 mutex_lock(&id_priv->handler_mutex); in rdma_destroy_id()
2098 destroy_id_handler_unlock(id_priv); in rdma_destroy_id()
2102 static int cma_rep_recv(struct rdma_id_private *id_priv) in cma_rep_recv() argument
2106 ret = cma_modify_qp_rtr(id_priv, NULL); in cma_rep_recv()
2110 ret = cma_modify_qp_rts(id_priv, NULL); in cma_rep_recv()
2114 trace_cm_send_rtu(id_priv); in cma_rep_recv()
2115 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0); in cma_rep_recv()
2122 cma_modify_qp_err(id_priv); in cma_rep_recv()
2123 trace_cm_send_rej(id_priv); in cma_rep_recv()
2124 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, in cma_rep_recv()
2146 static int cma_cm_event_handler(struct rdma_id_private *id_priv, in cma_cm_event_handler() argument
2151 lockdep_assert_held(&id_priv->handler_mutex); in cma_cm_event_handler()
2153 trace_cm_event_handler(id_priv, event); in cma_cm_event_handler()
2154 ret = id_priv->id.event_handler(&id_priv->id, event); in cma_cm_event_handler()
2155 trace_cm_event_done(id_priv, event, ret); in cma_cm_event_handler()
2162 struct rdma_id_private *id_priv = cm_id->context; in cma_ib_handler() local
2167 mutex_lock(&id_priv->handler_mutex); in cma_ib_handler()
2168 state = READ_ONCE(id_priv->state); in cma_ib_handler()
2183 (id_priv->id.qp_type != IB_QPT_UD)) { in cma_ib_handler()
2184 trace_cm_send_mra(id_priv); in cma_ib_handler()
2187 if (id_priv->id.qp) { in cma_ib_handler()
2188 event.status = cma_rep_recv(id_priv); in cma_ib_handler()
2206 if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT, in cma_ib_handler()
2218 pr_debug_ratelimited("RDMA CM: REJECTED: %s\n", rdma_reject_msg(&id_priv->id, in cma_ib_handler()
2220 cma_modify_qp_err(id_priv); in cma_ib_handler()
2232 ret = cma_cm_event_handler(id_priv, &event); in cma_ib_handler()
2235 id_priv->cm_id.ib = NULL; in cma_ib_handler()
2236 destroy_id_handler_unlock(id_priv); in cma_ib_handler()
2240 mutex_unlock(&id_priv->handler_mutex); in cma_ib_handler()
2250 struct rdma_id_private *id_priv; in cma_ib_new_conn_id() local
2260 id_priv = __rdma_create_id(listen_id->route.addr.dev_addr.net, in cma_ib_new_conn_id()
2265 if (IS_ERR(id_priv)) in cma_ib_new_conn_id()
2268 id = &id_priv->id; in cma_ib_new_conn_id()
2289 cma_any_addr(cma_src_addr(id_priv))) { in cma_ib_new_conn_id()
2293 } else if (!cma_any_addr(cma_src_addr(id_priv))) { in cma_ib_new_conn_id()
2294 ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr); in cma_ib_new_conn_id()
2301 id_priv->state = RDMA_CM_CONNECT; in cma_ib_new_conn_id()
2302 return id_priv; in cma_ib_new_conn_id()
2315 struct rdma_id_private *id_priv; in cma_ib_new_udp_id() local
2322 id_priv = __rdma_create_id(net, listen_id->event_handler, in cma_ib_new_udp_id()
2325 if (IS_ERR(id_priv)) in cma_ib_new_udp_id()
2328 id = &id_priv->id; in cma_ib_new_udp_id()
2338 if (!cma_any_addr(cma_src_addr(id_priv))) { in cma_ib_new_udp_id()
2339 ret = cma_translate_addr(cma_src_addr(id_priv), in cma_ib_new_udp_id()
2346 id_priv->state = RDMA_CM_CONNECT; in cma_ib_new_udp_id()
2347 return id_priv; in cma_ib_new_udp_id()
2498 struct rdma_id_private *id_priv = iw_id->context; in cma_iw_handler() local
2504 mutex_lock(&id_priv->handler_mutex); in cma_iw_handler()
2505 if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT) in cma_iw_handler()
2513 memcpy(cma_src_addr(id_priv), laddr, in cma_iw_handler()
2515 memcpy(cma_dst_addr(id_priv), raddr, in cma_iw_handler()
2547 ret = cma_cm_event_handler(id_priv, &event); in cma_iw_handler()
2550 id_priv->cm_id.iw = NULL; in cma_iw_handler()
2551 destroy_id_handler_unlock(id_priv); in cma_iw_handler()
2556 mutex_unlock(&id_priv->handler_mutex); in cma_iw_handler()
2630 static int cma_ib_listen(struct rdma_id_private *id_priv) in cma_ib_listen() argument
2636 addr = cma_src_addr(id_priv); in cma_ib_listen()
2637 svc_id = rdma_get_service_id(&id_priv->id, addr); in cma_ib_listen()
2638 id = ib_cm_insert_listen(id_priv->id.device, in cma_ib_listen()
2642 id_priv->cm_id.ib = id; in cma_ib_listen()
2647 static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog) in cma_iw_listen() argument
2652 id = iw_create_cm_id(id_priv->id.device, in cma_iw_listen()
2654 id_priv); in cma_iw_listen()
2658 mutex_lock(&id_priv->qp_mutex); in cma_iw_listen()
2659 id->tos = id_priv->tos; in cma_iw_listen()
2660 id->tos_set = id_priv->tos_set; in cma_iw_listen()
2661 mutex_unlock(&id_priv->qp_mutex); in cma_iw_listen()
2662 id->afonly = id_priv->afonly; in cma_iw_listen()
2663 id_priv->cm_id.iw = id; in cma_iw_listen()
2665 memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv), in cma_iw_listen()
2666 rdma_addr_size(cma_src_addr(id_priv))); in cma_iw_listen()
2668 ret = iw_cm_listen(id_priv->cm_id.iw, backlog); in cma_iw_listen()
2671 iw_destroy_cm_id(id_priv->cm_id.iw); in cma_iw_listen()
2672 id_priv->cm_id.iw = NULL; in cma_iw_listen()
2681 struct rdma_id_private *id_priv = id->context; in cma_listen_handler() local
2687 id->context = id_priv->id.context; in cma_listen_handler()
2688 id->event_handler = id_priv->id.event_handler; in cma_listen_handler()
2689 trace_cm_event_handler(id_priv, event); in cma_listen_handler()
2690 return id_priv->id.event_handler(id, event); in cma_listen_handler()
2693 static int cma_listen_on_dev(struct rdma_id_private *id_priv, in cma_listen_on_dev() argument
2698 struct net *net = id_priv->id.route.addr.dev_addr.net; in cma_listen_on_dev()
2704 if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1)) in cma_listen_on_dev()
2708 __rdma_create_id(net, cma_listen_handler, id_priv, in cma_listen_on_dev()
2709 id_priv->id.ps, id_priv->id.qp_type, id_priv); in cma_listen_on_dev()
2714 memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv), in cma_listen_on_dev()
2715 rdma_addr_size(cma_src_addr(id_priv))); in cma_listen_on_dev()
2719 cma_id_get(id_priv); in cma_listen_on_dev()
2721 dev_id_priv->afonly = id_priv->afonly; in cma_listen_on_dev()
2722 mutex_lock(&id_priv->qp_mutex); in cma_listen_on_dev()
2723 dev_id_priv->tos_set = id_priv->tos_set; in cma_listen_on_dev()
2724 dev_id_priv->tos = id_priv->tos; in cma_listen_on_dev()
2725 mutex_unlock(&id_priv->qp_mutex); in cma_listen_on_dev()
2727 ret = rdma_listen(&dev_id_priv->id, id_priv->backlog); in cma_listen_on_dev()
2730 list_add_tail(&dev_id_priv->listen_item, &id_priv->listen_list); in cma_listen_on_dev()
2739 static int cma_listen_on_all(struct rdma_id_private *id_priv) in cma_listen_on_all() argument
2746 list_add_tail(&id_priv->listen_any_item, &listen_any_list); in cma_listen_on_all()
2748 ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy); in cma_listen_on_all()
2760 _cma_cancel_listens(id_priv); in cma_listen_on_all()
2769 struct rdma_id_private *id_priv; in rdma_set_service_type() local
2771 id_priv = container_of(id, struct rdma_id_private, id); in rdma_set_service_type()
2772 mutex_lock(&id_priv->qp_mutex); in rdma_set_service_type()
2773 id_priv->tos = (u8) tos; in rdma_set_service_type()
2774 id_priv->tos_set = true; in rdma_set_service_type()
2775 mutex_unlock(&id_priv->qp_mutex); in rdma_set_service_type()
2796 struct rdma_id_private *id_priv; in rdma_set_ack_timeout() local
2801 id_priv = container_of(id, struct rdma_id_private, id); in rdma_set_ack_timeout()
2802 mutex_lock(&id_priv->qp_mutex); in rdma_set_ack_timeout()
2803 id_priv->timeout = timeout; in rdma_set_ack_timeout()
2804 id_priv->timeout_set = true; in rdma_set_ack_timeout()
2805 mutex_unlock(&id_priv->qp_mutex); in rdma_set_ack_timeout()
2831 struct rdma_id_private *id_priv; in rdma_set_min_rnr_timer() local
2840 id_priv = container_of(id, struct rdma_id_private, id); in rdma_set_min_rnr_timer()
2841 mutex_lock(&id_priv->qp_mutex); in rdma_set_min_rnr_timer()
2842 id_priv->min_rnr_timer = min_rnr_timer; in rdma_set_min_rnr_timer()
2843 id_priv->min_rnr_timer_set = true; in rdma_set_min_rnr_timer()
2844 mutex_unlock(&id_priv->qp_mutex); in rdma_set_min_rnr_timer()
2923 static int cma_query_ib_route(struct rdma_id_private *id_priv, in cma_query_ib_route() argument
2926 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_query_ib_route()
2934 if (rdma_cap_opa_ah(id_priv->id.device, id_priv->id.port_num)) in cma_query_ib_route()
2943 path_rec.service_id = rdma_get_service_id(&id_priv->id, in cma_query_ib_route()
2944 cma_dst_addr(id_priv)); in cma_query_ib_route()
2950 switch (cma_family(id_priv)) { in cma_query_ib_route()
2952 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); in cma_query_ib_route()
2956 sin6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); in cma_query_ib_route()
2961 sib = (struct sockaddr_ib *) cma_src_addr(id_priv); in cma_query_ib_route()
2967 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, in cma_query_ib_route()
2968 id_priv->id.port_num, &path_rec, in cma_query_ib_route()
2971 work, &id_priv->query); in cma_query_ib_route()
2973 return (id_priv->query_id < 0) ? id_priv->query_id : 0; in cma_query_ib_route()
2981 struct rdma_id_private *id_priv = mc->id_priv; in cma_iboe_join_work_handler() local
2984 mutex_lock(&id_priv->handler_mutex); in cma_iboe_join_work_handler()
2985 if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING || in cma_iboe_join_work_handler()
2986 READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL) in cma_iboe_join_work_handler()
2989 ret = cma_cm_event_handler(id_priv, event); in cma_iboe_join_work_handler()
2993 mutex_unlock(&id_priv->handler_mutex); in cma_iboe_join_work_handler()
3001 struct rdma_id_private *id_priv = work->id; in cma_work_handler() local
3003 mutex_lock(&id_priv->handler_mutex); in cma_work_handler()
3004 if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING || in cma_work_handler()
3005 READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL) in cma_work_handler()
3008 if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) in cma_work_handler()
3012 if (cma_cm_event_handler(id_priv, &work->event)) { in cma_work_handler()
3013 cma_id_put(id_priv); in cma_work_handler()
3014 destroy_id_handler_unlock(id_priv); in cma_work_handler()
3019 mutex_unlock(&id_priv->handler_mutex); in cma_work_handler()
3020 cma_id_put(id_priv); in cma_work_handler()
3028 struct rdma_id_private *id_priv) in cma_init_resolve_route_work() argument
3030 work->id = id_priv; in cma_init_resolve_route_work()
3038 struct rdma_id_private *id_priv) in enqueue_resolve_addr_work() argument
3041 cma_id_get(id_priv); in enqueue_resolve_addr_work()
3043 work->id = id_priv; in enqueue_resolve_addr_work()
3052 static int cma_resolve_ib_route(struct rdma_id_private *id_priv, in cma_resolve_ib_route() argument
3055 struct rdma_route *route = &id_priv->id.route; in cma_resolve_ib_route()
3063 cma_init_resolve_route_work(work, id_priv); in cma_resolve_ib_route()
3072 ret = cma_query_ib_route(id_priv, timeout_ms, work); in cma_resolve_ib_route()
3105 cma_iboe_set_path_rec_l2_fields(struct rdma_id_private *id_priv) in cma_iboe_set_path_rec_l2_fields() argument
3107 struct rdma_route *route = &id_priv->id.route; in cma_iboe_set_path_rec_l2_fields()
3121 supported_gids = roce_gid_type_mask_support(id_priv->id.device, in cma_iboe_set_path_rec_l2_fields()
3122 id_priv->id.port_num); in cma_iboe_set_path_rec_l2_fields()
3125 id_priv->gid_type); in cma_iboe_set_path_rec_l2_fields()
3139 struct rdma_id_private *id_priv; in rdma_set_ib_path() local
3143 id_priv = container_of(id, struct rdma_id_private, id); in rdma_set_ib_path()
3144 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, in rdma_set_ib_path()
3156 ndev = cma_iboe_set_path_rec_l2_fields(id_priv); in rdma_set_ib_path()
3171 cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED); in rdma_set_ib_path()
3176 static int cma_resolve_iw_route(struct rdma_id_private *id_priv) in cma_resolve_iw_route() argument
3184 cma_init_resolve_route_work(work, id_priv); in cma_resolve_iw_route()
3253 static __be32 cma_get_roce_udp_flow_label(struct rdma_id_private *id_priv) in cma_get_roce_udp_flow_label() argument
3259 addr6 = (struct sockaddr_in6 *)cma_src_addr(id_priv); in cma_get_roce_udp_flow_label()
3261 if ((cma_family(id_priv) != AF_INET6) || !fl) { in cma_get_roce_udp_flow_label()
3262 dport = be16_to_cpu(cma_port(cma_dst_addr(id_priv))); in cma_get_roce_udp_flow_label()
3263 sport = be16_to_cpu(cma_port(cma_src_addr(id_priv))); in cma_get_roce_udp_flow_label()
3271 static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) in cma_resolve_iboe_route() argument
3273 struct rdma_route *route = &id_priv->id.route; in cma_resolve_iboe_route()
3279 u8 default_roce_tos = id_priv->cma_dev->default_roce_tos[id_priv->id.port_num - in cma_resolve_iboe_route()
3280 rdma_start_port(id_priv->cma_dev->device)]; in cma_resolve_iboe_route()
3283 mutex_lock(&id_priv->qp_mutex); in cma_resolve_iboe_route()
3284 tos = id_priv->tos_set ? id_priv->tos : default_roce_tos; in cma_resolve_iboe_route()
3285 mutex_unlock(&id_priv->qp_mutex); in cma_resolve_iboe_route()
3299 ndev = cma_iboe_set_path_rec_l2_fields(id_priv); in cma_resolve_iboe_route()
3305 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, in cma_resolve_iboe_route()
3307 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr, in cma_resolve_iboe_route()
3310 if (((struct sockaddr *)&id_priv->id.route.addr.dst_addr)->sa_family != AF_IB) in cma_resolve_iboe_route()
3332 mutex_lock(&id_priv->qp_mutex); in cma_resolve_iboe_route()
3333 if (id_priv->timeout_set && id_priv->timeout) in cma_resolve_iboe_route()
3334 route->path_rec->packet_life_time = id_priv->timeout - 1; in cma_resolve_iboe_route()
3337 mutex_unlock(&id_priv->qp_mutex); in cma_resolve_iboe_route()
3344 if (rdma_protocol_roce_udp_encap(id_priv->id.device, in cma_resolve_iboe_route()
3345 id_priv->id.port_num)) in cma_resolve_iboe_route()
3347 cma_get_roce_udp_flow_label(id_priv); in cma_resolve_iboe_route()
3349 cma_init_resolve_route_work(work, id_priv); in cma_resolve_iboe_route()
3365 struct rdma_id_private *id_priv; in rdma_resolve_route() local
3371 id_priv = container_of(id, struct rdma_id_private, id); in rdma_resolve_route()
3372 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY)) in rdma_resolve_route()
3375 cma_id_get(id_priv); in rdma_resolve_route()
3377 ret = cma_resolve_ib_route(id_priv, timeout_ms); in rdma_resolve_route()
3379 ret = cma_resolve_iboe_route(id_priv); in rdma_resolve_route()
3381 cma_add_id_to_tree(id_priv); in rdma_resolve_route()
3384 ret = cma_resolve_iw_route(id_priv); in rdma_resolve_route()
3393 cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED); in rdma_resolve_route()
3394 cma_id_put(id_priv); in rdma_resolve_route()
3416 static int cma_bind_loopback(struct rdma_id_private *id_priv) in cma_bind_loopback() argument
3428 if (cma_family(id_priv) == AF_IB && in cma_bind_loopback()
3460 id_priv->id.route.addr.dev_addr.dev_type = in cma_bind_loopback()
3464 rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); in cma_bind_loopback()
3465 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); in cma_bind_loopback()
3466 id_priv->id.port_num = p; in cma_bind_loopback()
3467 cma_attach_to_dev(id_priv, cma_dev); in cma_bind_loopback()
3468 rdma_restrack_add(&id_priv->res); in cma_bind_loopback()
3469 cma_set_loopback(cma_src_addr(id_priv)); in cma_bind_loopback()
3478 struct rdma_id_private *id_priv = context; in addr_handler() local
3483 mutex_lock(&id_priv->handler_mutex); in addr_handler()
3484 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, in addr_handler()
3493 addr = cma_src_addr(id_priv); in addr_handler()
3496 if (!status && !id_priv->cma_dev) { in addr_handler()
3497 status = cma_acquire_dev_by_src_ip(id_priv); in addr_handler()
3501 rdma_restrack_add(&id_priv->res); in addr_handler()
3509 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, in addr_handler()
3517 if (cma_cm_event_handler(id_priv, &event)) { in addr_handler()
3518 destroy_id_handler_unlock(id_priv); in addr_handler()
3522 mutex_unlock(&id_priv->handler_mutex); in addr_handler()
3525 static int cma_resolve_loopback(struct rdma_id_private *id_priv) in cma_resolve_loopback() argument
3535 if (!id_priv->cma_dev) { in cma_resolve_loopback()
3536 ret = cma_bind_loopback(id_priv); in cma_resolve_loopback()
3541 rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); in cma_resolve_loopback()
3542 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); in cma_resolve_loopback()
3544 enqueue_resolve_addr_work(work, id_priv); in cma_resolve_loopback()
3551 static int cma_resolve_ib_addr(struct rdma_id_private *id_priv) in cma_resolve_ib_addr() argument
3560 if (!id_priv->cma_dev) { in cma_resolve_ib_addr()
3561 ret = cma_resolve_ib_dev(id_priv); in cma_resolve_ib_addr()
3566 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *) in cma_resolve_ib_addr()
3567 &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr)); in cma_resolve_ib_addr()
3569 enqueue_resolve_addr_work(work, id_priv); in cma_resolve_ib_addr()
3578 struct rdma_id_private *id_priv; in rdma_set_reuseaddr() local
3582 id_priv = container_of(id, struct rdma_id_private, id); in rdma_set_reuseaddr()
3583 spin_lock_irqsave(&id_priv->lock, flags); in rdma_set_reuseaddr()
3584 if ((reuse && id_priv->state != RDMA_CM_LISTEN) || in rdma_set_reuseaddr()
3585 id_priv->state == RDMA_CM_IDLE) { in rdma_set_reuseaddr()
3586 id_priv->reuseaddr = reuse; in rdma_set_reuseaddr()
3591 spin_unlock_irqrestore(&id_priv->lock, flags); in rdma_set_reuseaddr()
3598 struct rdma_id_private *id_priv; in rdma_set_afonly() local
3602 id_priv = container_of(id, struct rdma_id_private, id); in rdma_set_afonly()
3603 spin_lock_irqsave(&id_priv->lock, flags); in rdma_set_afonly()
3604 if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) { in rdma_set_afonly()
3605 id_priv->options |= (1 << CMA_OPTION_AFONLY); in rdma_set_afonly()
3606 id_priv->afonly = afonly; in rdma_set_afonly()
3611 spin_unlock_irqrestore(&id_priv->lock, flags); in rdma_set_afonly()
3617 struct rdma_id_private *id_priv) in cma_bind_port() argument
3626 addr = cma_src_addr(id_priv); in cma_bind_port()
3644 id_priv->bind_list = bind_list; in cma_bind_port()
3645 hlist_add_head(&id_priv->node, &bind_list->owners); in cma_bind_port()
3649 struct rdma_id_private *id_priv, unsigned short snum) in cma_alloc_port() argument
3660 ret = cma_ps_alloc(id_priv->id.route.addr.dev_addr.net, ps, bind_list, in cma_alloc_port()
3667 cma_bind_port(bind_list, id_priv); in cma_alloc_port()
3675 struct rdma_id_private *id_priv) in cma_port_is_unique() argument
3678 struct sockaddr *daddr = cma_dst_addr(id_priv); in cma_port_is_unique()
3679 struct sockaddr *saddr = cma_src_addr(id_priv); in cma_port_is_unique()
3689 if (id_priv == cur_id) in cma_port_is_unique()
3716 struct rdma_id_private *id_priv) in cma_alloc_any_port() argument
3721 struct net *net = id_priv->id.route.addr.dev_addr.net; in cma_alloc_any_port()
3736 ret = cma_alloc_port(ps, id_priv, rover); in cma_alloc_any_port()
3738 ret = cma_port_is_unique(bind_list, id_priv); in cma_alloc_any_port()
3740 cma_bind_port(bind_list, id_priv); in cma_alloc_any_port()
3767 struct rdma_id_private *id_priv, uint8_t reuseaddr) in cma_check_port() argument
3774 addr = cma_src_addr(id_priv); in cma_check_port()
3776 if (id_priv == cur_id) in cma_check_port()
3783 if (id_priv->afonly && cur_id->afonly && in cma_check_port()
3797 struct rdma_id_private *id_priv) in cma_use_port() argument
3805 snum = ntohs(cma_port(cma_src_addr(id_priv))); in cma_use_port()
3809 bind_list = cma_ps_find(id_priv->id.route.addr.dev_addr.net, ps, snum); in cma_use_port()
3811 ret = cma_alloc_port(ps, id_priv, snum); in cma_use_port()
3813 ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr); in cma_use_port()
3815 cma_bind_port(bind_list, id_priv); in cma_use_port()
3821 cma_select_inet_ps(struct rdma_id_private *id_priv) in cma_select_inet_ps() argument
3823 switch (id_priv->id.ps) { in cma_select_inet_ps()
3828 return id_priv->id.ps; in cma_select_inet_ps()
3836 cma_select_ib_ps(struct rdma_id_private *id_priv) in cma_select_ib_ps() argument
3842 sib = (struct sockaddr_ib *) cma_src_addr(id_priv); in cma_select_ib_ps()
3846 if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) { in cma_select_ib_ps()
3849 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) && in cma_select_ib_ps()
3853 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) && in cma_select_ib_ps()
3867 static int cma_get_port(struct rdma_id_private *id_priv) in cma_get_port() argument
3872 if (cma_family(id_priv) != AF_IB) in cma_get_port()
3873 ps = cma_select_inet_ps(id_priv); in cma_get_port()
3875 ps = cma_select_ib_ps(id_priv); in cma_get_port()
3880 if (cma_any_port(cma_src_addr(id_priv))) in cma_get_port()
3881 ret = cma_alloc_any_port(ps, id_priv); in cma_get_port()
3883 ret = cma_use_port(ps, id_priv); in cma_get_port()
3913 struct rdma_id_private *id_priv = in rdma_listen() local
3917 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) { in rdma_listen()
3927 if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, in rdma_listen()
3936 if (id_priv->reuseaddr) { in rdma_listen()
3938 ret = cma_check_port(id_priv->bind_list, id_priv, 0); in rdma_listen()
3940 id_priv->reuseaddr = 0; in rdma_listen()
3946 id_priv->backlog = backlog; in rdma_listen()
3947 if (id_priv->cma_dev) { in rdma_listen()
3949 ret = cma_ib_listen(id_priv); in rdma_listen()
3953 ret = cma_iw_listen(id_priv, backlog); in rdma_listen()
3961 ret = cma_listen_on_all(id_priv); in rdma_listen()
3968 id_priv->backlog = 0; in rdma_listen()
3973 cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND); in rdma_listen()
3978 static int rdma_bind_addr_dst(struct rdma_id_private *id_priv, in rdma_bind_addr_dst() argument
3988 if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND)) in rdma_bind_addr_dst()
3991 ret = cma_check_linklocal(&id_priv->id.route.addr.dev_addr, addr); in rdma_bind_addr_dst()
3995 memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr)); in rdma_bind_addr_dst()
3997 ret = cma_translate_addr(addr, &id_priv->id.route.addr.dev_addr); in rdma_bind_addr_dst()
4001 ret = cma_acquire_dev_by_src_ip(id_priv); in rdma_bind_addr_dst()
4006 if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) { in rdma_bind_addr_dst()
4008 id_priv->afonly = 1; in rdma_bind_addr_dst()
4011 struct net *net = id_priv->id.route.addr.dev_addr.net; in rdma_bind_addr_dst()
4013 id_priv->afonly = net->ipv6.sysctl.bindv6only; in rdma_bind_addr_dst()
4017 id_daddr = cma_dst_addr(id_priv); in rdma_bind_addr_dst()
4022 ret = cma_get_port(id_priv); in rdma_bind_addr_dst()
4027 rdma_restrack_add(&id_priv->res); in rdma_bind_addr_dst()
4030 if (id_priv->cma_dev) in rdma_bind_addr_dst()
4031 cma_release_dev(id_priv); in rdma_bind_addr_dst()
4033 cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE); in rdma_bind_addr_dst()
4040 struct rdma_id_private *id_priv = in cma_bind_addr() local
4045 return rdma_bind_addr_dst(id_priv, src_addr, dst_addr); in cma_bind_addr()
4065 return rdma_bind_addr_dst(id_priv, (struct sockaddr *)&zero_sock, dst_addr); in cma_bind_addr()
4074 static int resolve_prepare_src(struct rdma_id_private *id_priv, in resolve_prepare_src() argument
4080 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) { in resolve_prepare_src()
4082 ret = cma_bind_addr(&id_priv->id, src_addr, dst_addr); in resolve_prepare_src()
4085 if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, in resolve_prepare_src()
4090 memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr)); in resolve_prepare_src()
4093 if (cma_family(id_priv) != dst_addr->sa_family) { in resolve_prepare_src()
4100 cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); in resolve_prepare_src()
4107 struct rdma_id_private *id_priv = in rdma_resolve_addr() local
4111 ret = resolve_prepare_src(id_priv, src_addr, dst_addr); in rdma_resolve_addr()
4116 ret = cma_resolve_loopback(id_priv); in rdma_resolve_addr()
4119 ret = cma_resolve_ib_addr(id_priv); in rdma_resolve_addr()
4132 if (id_priv->used_resolve_ip) in rdma_resolve_addr()
4135 id_priv->used_resolve_ip = 1; in rdma_resolve_addr()
4136 ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr, in rdma_resolve_addr()
4139 false, id_priv); in rdma_resolve_addr()
4147 cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); in rdma_resolve_addr()
4154 struct rdma_id_private *id_priv = in rdma_bind_addr() local
4157 return rdma_bind_addr_dst(id_priv, addr, cma_dst_addr(id_priv)); in rdma_bind_addr()
4161 static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv) in cma_format_hdr() argument
4167 if (cma_family(id_priv) == AF_INET) { in cma_format_hdr()
4170 src4 = (struct sockaddr_in *) cma_src_addr(id_priv); in cma_format_hdr()
4171 dst4 = (struct sockaddr_in *) cma_dst_addr(id_priv); in cma_format_hdr()
4177 } else if (cma_family(id_priv) == AF_INET6) { in cma_format_hdr()
4180 src6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); in cma_format_hdr()
4181 dst6 = (struct sockaddr_in6 *) cma_dst_addr(id_priv); in cma_format_hdr()
4194 struct rdma_id_private *id_priv = cm_id->context; in cma_sidr_rep_handler() local
4200 mutex_lock(&id_priv->handler_mutex); in cma_sidr_rep_handler()
4201 if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT) in cma_sidr_rep_handler()
4219 ret = cma_set_qkey(id_priv, rep->qkey); in cma_sidr_rep_handler()
4226 ib_init_ah_attr_from_path(id_priv->id.device, in cma_sidr_rep_handler()
4227 id_priv->id.port_num, in cma_sidr_rep_handler()
4228 id_priv->id.route.path_rec, in cma_sidr_rep_handler()
4242 ret = cma_cm_event_handler(id_priv, &event); in cma_sidr_rep_handler()
4247 id_priv->cm_id.ib = NULL; in cma_sidr_rep_handler()
4248 destroy_id_handler_unlock(id_priv); in cma_sidr_rep_handler()
4252 mutex_unlock(&id_priv->handler_mutex); in cma_sidr_rep_handler()
4256 static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, in cma_resolve_ib_udp() argument
4266 offset = cma_user_data_offset(id_priv); in cma_resolve_ib_udp()
4283 ret = cma_format_hdr(private_data, id_priv); in cma_resolve_ib_udp()
4289 id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler, in cma_resolve_ib_udp()
4290 id_priv); in cma_resolve_ib_udp()
4295 id_priv->cm_id.ib = id; in cma_resolve_ib_udp()
4297 req.path = id_priv->id.route.path_rec; in cma_resolve_ib_udp()
4298 req.sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr; in cma_resolve_ib_udp()
4299 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); in cma_resolve_ib_udp()
4303 trace_cm_send_sidr_req(id_priv); in cma_resolve_ib_udp()
4304 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req); in cma_resolve_ib_udp()
4306 ib_destroy_cm_id(id_priv->cm_id.ib); in cma_resolve_ib_udp()
4307 id_priv->cm_id.ib = NULL; in cma_resolve_ib_udp()
4314 static int cma_connect_ib(struct rdma_id_private *id_priv, in cma_connect_ib() argument
4325 offset = cma_user_data_offset(id_priv); in cma_connect_ib()
4341 id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv); in cma_connect_ib()
4346 id_priv->cm_id.ib = id; in cma_connect_ib()
4348 route = &id_priv->id.route; in cma_connect_ib()
4350 ret = cma_format_hdr(private_data, id_priv); in cma_connect_ib()
4362 req.ppath_sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr; in cma_connect_ib()
4364 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); in cma_connect_ib()
4365 req.qp_num = id_priv->qp_num; in cma_connect_ib()
4366 req.qp_type = id_priv->id.qp_type; in cma_connect_ib()
4367 req.starting_psn = id_priv->seq_num; in cma_connect_ib()
4376 req.srq = id_priv->srq ? 1 : 0; in cma_connect_ib()
4377 req.ece.vendor_id = id_priv->ece.vendor_id; in cma_connect_ib()
4378 req.ece.attr_mod = id_priv->ece.attr_mod; in cma_connect_ib()
4380 trace_cm_send_req(id_priv); in cma_connect_ib()
4381 ret = ib_send_cm_req(id_priv->cm_id.ib, &req); in cma_connect_ib()
4385 id_priv->cm_id.ib = NULL; in cma_connect_ib()
4392 static int cma_connect_iw(struct rdma_id_private *id_priv, in cma_connect_iw() argument
4399 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv); in cma_connect_iw()
4403 mutex_lock(&id_priv->qp_mutex); in cma_connect_iw()
4404 cm_id->tos = id_priv->tos; in cma_connect_iw()
4405 cm_id->tos_set = id_priv->tos_set; in cma_connect_iw()
4406 mutex_unlock(&id_priv->qp_mutex); in cma_connect_iw()
4408 id_priv->cm_id.iw = cm_id; in cma_connect_iw()
4410 memcpy(&cm_id->local_addr, cma_src_addr(id_priv), in cma_connect_iw()
4411 rdma_addr_size(cma_src_addr(id_priv))); in cma_connect_iw()
4412 memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv), in cma_connect_iw()
4413 rdma_addr_size(cma_dst_addr(id_priv))); in cma_connect_iw()
4415 ret = cma_modify_qp_rtr(id_priv, conn_param); in cma_connect_iw()
4424 iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num; in cma_connect_iw()
4427 iw_param.qpn = id_priv->qp_num; in cma_connect_iw()
4433 id_priv->cm_id.iw = NULL; in cma_connect_iw()
4449 struct rdma_id_private *id_priv = in rdma_connect_locked() local
4453 if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT)) in rdma_connect_locked()
4457 id_priv->qp_num = conn_param->qp_num; in rdma_connect_locked()
4458 id_priv->srq = conn_param->srq; in rdma_connect_locked()
4463 ret = cma_resolve_ib_udp(id_priv, conn_param); in rdma_connect_locked()
4465 ret = cma_connect_ib(id_priv, conn_param); in rdma_connect_locked()
4467 ret = cma_connect_iw(id_priv, conn_param); in rdma_connect_locked()
4475 cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED); in rdma_connect_locked()
4494 struct rdma_id_private *id_priv = in rdma_connect() local
4498 mutex_lock(&id_priv->handler_mutex); in rdma_connect()
4500 mutex_unlock(&id_priv->handler_mutex); in rdma_connect()
4516 struct rdma_id_private *id_priv = in rdma_connect_ece() local
4519 id_priv->ece.vendor_id = ece->vendor_id; in rdma_connect_ece()
4520 id_priv->ece.attr_mod = ece->attr_mod; in rdma_connect_ece()
4526 static int cma_accept_ib(struct rdma_id_private *id_priv, in cma_accept_ib() argument
4532 ret = cma_modify_qp_rtr(id_priv, conn_param); in cma_accept_ib()
4536 ret = cma_modify_qp_rts(id_priv, conn_param); in cma_accept_ib()
4541 rep.qp_num = id_priv->qp_num; in cma_accept_ib()
4542 rep.starting_psn = id_priv->seq_num; in cma_accept_ib()
4550 rep.srq = id_priv->srq ? 1 : 0; in cma_accept_ib()
4551 rep.ece.vendor_id = id_priv->ece.vendor_id; in cma_accept_ib()
4552 rep.ece.attr_mod = id_priv->ece.attr_mod; in cma_accept_ib()
4554 trace_cm_send_rep(id_priv); in cma_accept_ib()
4555 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep); in cma_accept_ib()
4560 static int cma_accept_iw(struct rdma_id_private *id_priv, in cma_accept_iw() argument
4569 ret = cma_modify_qp_rtr(id_priv, conn_param); in cma_accept_iw()
4577 if (id_priv->id.qp) in cma_accept_iw()
4578 iw_param.qpn = id_priv->qp_num; in cma_accept_iw()
4582 return iw_cm_accept(id_priv->cm_id.iw, &iw_param); in cma_accept_iw()
4585 static int cma_send_sidr_rep(struct rdma_id_private *id_priv, in cma_send_sidr_rep() argument
4596 ret = cma_set_qkey(id_priv, qkey); in cma_send_sidr_rep()
4598 ret = cma_set_default_qkey(id_priv); in cma_send_sidr_rep()
4601 rep.qp_num = id_priv->qp_num; in cma_send_sidr_rep()
4602 rep.qkey = id_priv->qkey; in cma_send_sidr_rep()
4604 rep.ece.vendor_id = id_priv->ece.vendor_id; in cma_send_sidr_rep()
4605 rep.ece.attr_mod = id_priv->ece.attr_mod; in cma_send_sidr_rep()
4611 trace_cm_send_sidr_rep(id_priv); in cma_send_sidr_rep()
4612 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep); in cma_send_sidr_rep()
4635 struct rdma_id_private *id_priv = in rdma_accept() local
4639 lockdep_assert_held(&id_priv->handler_mutex); in rdma_accept()
4641 if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT) in rdma_accept()
4645 id_priv->qp_num = conn_param->qp_num; in rdma_accept()
4646 id_priv->srq = conn_param->srq; in rdma_accept()
4652 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, in rdma_accept()
4657 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, in rdma_accept()
4661 ret = cma_accept_ib(id_priv, conn_param); in rdma_accept()
4663 ret = cma_rep_recv(id_priv); in rdma_accept()
4666 ret = cma_accept_iw(id_priv, conn_param); in rdma_accept()
4675 cma_modify_qp_err(id_priv); in rdma_accept()
4684 struct rdma_id_private *id_priv = in rdma_accept_ece() local
4687 id_priv->ece.vendor_id = ece->vendor_id; in rdma_accept_ece()
4688 id_priv->ece.attr_mod = ece->attr_mod; in rdma_accept_ece()
4696 struct rdma_id_private *id_priv = in rdma_lock_handler() local
4699 mutex_lock(&id_priv->handler_mutex); in rdma_lock_handler()
4705 struct rdma_id_private *id_priv = in rdma_unlock_handler() local
4708 mutex_unlock(&id_priv->handler_mutex); in rdma_unlock_handler()
4714 struct rdma_id_private *id_priv; in rdma_notify() local
4717 id_priv = container_of(id, struct rdma_id_private, id); in rdma_notify()
4718 if (!id_priv->cm_id.ib) in rdma_notify()
4723 ret = ib_cm_notify(id_priv->cm_id.ib, event); in rdma_notify()
4736 struct rdma_id_private *id_priv; in rdma_reject() local
4739 id_priv = container_of(id, struct rdma_id_private, id); in rdma_reject()
4740 if (!id_priv->cm_id.ib) in rdma_reject()
4745 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0, in rdma_reject()
4748 trace_cm_send_rej(id_priv); in rdma_reject()
4749 ret = ib_send_cm_rej(id_priv->cm_id.ib, reason, NULL, 0, in rdma_reject()
4753 ret = iw_cm_reject(id_priv->cm_id.iw, in rdma_reject()
4765 struct rdma_id_private *id_priv; in rdma_disconnect() local
4768 id_priv = container_of(id, struct rdma_id_private, id); in rdma_disconnect()
4769 if (!id_priv->cm_id.ib) in rdma_disconnect()
4773 ret = cma_modify_qp_err(id_priv); in rdma_disconnect()
4777 trace_cm_disconnect(id_priv); in rdma_disconnect()
4778 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) { in rdma_disconnect()
4779 if (!ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0)) in rdma_disconnect()
4780 trace_cm_sent_drep(id_priv); in rdma_disconnect()
4782 trace_cm_sent_dreq(id_priv); in rdma_disconnect()
4785 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0); in rdma_disconnect()
4794 static void cma_make_mc_event(int status, struct rdma_id_private *id_priv, in cma_make_mc_event() argument
4814 dev_addr = &id_priv->id.route.addr.dev_addr; in cma_make_mc_event()
4817 id_priv->cma_dev in cma_make_mc_event()
4818 ->default_gid_type[id_priv->id.port_num - in cma_make_mc_event()
4820 id_priv->cma_dev->device)]; in cma_make_mc_event()
4823 if (ib_init_ah_from_mcmember(id_priv->id.device, id_priv->id.port_num, in cma_make_mc_event()
4831 event->param.ud.qkey = id_priv->qkey; in cma_make_mc_event()
4840 struct rdma_id_private *id_priv = mc->id_priv; in cma_ib_mc_handler() local
4844 mutex_lock(&id_priv->handler_mutex); in cma_ib_mc_handler()
4845 if (READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL || in cma_ib_mc_handler()
4846 READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING) in cma_ib_mc_handler()
4849 ret = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey)); in cma_ib_mc_handler()
4851 cma_make_mc_event(status, id_priv, multicast, &event, mc); in cma_ib_mc_handler()
4852 ret = cma_cm_event_handler(id_priv, &event); in cma_ib_mc_handler()
4858 mutex_unlock(&id_priv->handler_mutex); in cma_ib_mc_handler()
4862 static void cma_set_mgid(struct rdma_id_private *id_priv, in cma_set_mgid() argument
4866 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_set_mgid()
4881 if (id_priv->id.ps == RDMA_PS_UDP) in cma_set_mgid()
4886 if (id_priv->id.ps == RDMA_PS_UDP) in cma_set_mgid()
4892 static int cma_join_ib_multicast(struct rdma_id_private *id_priv, in cma_join_ib_multicast() argument
4896 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_join_ib_multicast()
4901 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num, in cma_join_ib_multicast()
4906 if (!id_priv->qkey) { in cma_join_ib_multicast()
4907 ret = cma_set_default_qkey(id_priv); in cma_join_ib_multicast()
4912 cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid); in cma_join_ib_multicast()
4913 rec.qkey = cpu_to_be32(id_priv->qkey); in cma_join_ib_multicast()
4924 if (id_priv->id.ps == RDMA_PS_IPOIB) in cma_join_ib_multicast()
4931 mc->sa_mc = ib_sa_join_multicast(&sa_client, id_priv->id.device, in cma_join_ib_multicast()
4932 id_priv->id.port_num, &rec, comp_mask, in cma_join_ib_multicast()
4966 static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, in cma_iboe_join_multicast() argument
4969 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_iboe_join_multicast()
4982 gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num - in cma_iboe_join_multicast()
4983 rdma_start_port(id_priv->cma_dev->device)]; in cma_iboe_join_multicast()
5012 if (!id_priv->qkey) in cma_iboe_join_multicast()
5013 cma_set_default_qkey(id_priv); in cma_iboe_join_multicast()
5015 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, in cma_iboe_join_multicast()
5018 cma_make_mc_event(0, id_priv, &ib, &mc->iboe_join.event, mc); in cma_iboe_join_multicast()
5026 struct rdma_id_private *id_priv = in rdma_join_multicast() local
5036 if (!id->device || (READ_ONCE(id_priv->state) != RDMA_CM_ADDR_BOUND && in rdma_join_multicast()
5037 READ_ONCE(id_priv->state) != RDMA_CM_ADDR_RESOLVED)) in rdma_join_multicast()
5040 if (id_priv->id.qp_type != IB_QPT_UD) in rdma_join_multicast()
5049 mc->id_priv = id_priv; in rdma_join_multicast()
5053 ret = cma_iboe_join_multicast(id_priv, mc); in rdma_join_multicast()
5057 ret = cma_join_ib_multicast(id_priv, mc); in rdma_join_multicast()
5065 spin_lock(&id_priv->lock); in rdma_join_multicast()
5066 list_add(&mc->list, &id_priv->mc_list); in rdma_join_multicast()
5067 spin_unlock(&id_priv->lock); in rdma_join_multicast()
5078 struct rdma_id_private *id_priv; in rdma_leave_multicast() local
5081 id_priv = container_of(id, struct rdma_id_private, id); in rdma_leave_multicast()
5082 spin_lock_irq(&id_priv->lock); in rdma_leave_multicast()
5083 list_for_each_entry(mc, &id_priv->mc_list, list) { in rdma_leave_multicast()
5087 spin_unlock_irq(&id_priv->lock); in rdma_leave_multicast()
5089 WARN_ON(id_priv->cma_dev->device != id->device); in rdma_leave_multicast()
5090 destroy_mc(id_priv, mc); in rdma_leave_multicast()
5093 spin_unlock_irq(&id_priv->lock); in rdma_leave_multicast()
5097 static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv) in cma_netdev_change() argument
5102 dev_addr = &id_priv->id.route.addr.dev_addr; in cma_netdev_change()
5108 ndev->name, &id_priv->id); in cma_netdev_change()
5114 work->id = id_priv; in cma_netdev_change()
5116 cma_id_get(id_priv); in cma_netdev_change()
5128 struct rdma_id_private *id_priv; in cma_netdev_callback() local
5139 list_for_each_entry(id_priv, &cma_dev->id_list, device_item) { in cma_netdev_callback()
5140 ret = cma_netdev_change(ndev, id_priv); in cma_netdev_callback()
5152 struct rdma_id_private *id_priv = in cma_netevent_work_handler() local
5156 mutex_lock(&id_priv->handler_mutex); in cma_netevent_work_handler()
5158 if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING || in cma_netevent_work_handler()
5159 READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL) in cma_netevent_work_handler()
5165 if (cma_cm_event_handler(id_priv, &event)) { in cma_netevent_work_handler()
5166 __acquire(&id_priv->handler_mutex); in cma_netevent_work_handler()
5167 id_priv->cm_id.ib = NULL; in cma_netevent_work_handler()
5168 cma_id_put(id_priv); in cma_netevent_work_handler()
5169 destroy_id_handler_unlock(id_priv); in cma_netevent_work_handler()
5174 mutex_unlock(&id_priv->handler_mutex); in cma_netevent_work_handler()
5175 cma_id_put(id_priv); in cma_netevent_work_handler()
5231 static void cma_send_device_removal_put(struct rdma_id_private *id_priv) in cma_send_device_removal_put() argument
5237 mutex_lock(&id_priv->handler_mutex); in cma_send_device_removal_put()
5239 spin_lock_irqsave(&id_priv->lock, flags); in cma_send_device_removal_put()
5240 state = id_priv->state; in cma_send_device_removal_put()
5242 spin_unlock_irqrestore(&id_priv->lock, flags); in cma_send_device_removal_put()
5243 mutex_unlock(&id_priv->handler_mutex); in cma_send_device_removal_put()
5244 cma_id_put(id_priv); in cma_send_device_removal_put()
5247 id_priv->state = RDMA_CM_DEVICE_REMOVAL; in cma_send_device_removal_put()
5248 spin_unlock_irqrestore(&id_priv->lock, flags); in cma_send_device_removal_put()
5250 if (cma_cm_event_handler(id_priv, &event)) { in cma_send_device_removal_put()
5255 cma_id_put(id_priv); in cma_send_device_removal_put()
5256 mutex_unlock(&id_priv->handler_mutex); in cma_send_device_removal_put()
5257 trace_cm_id_destroy(id_priv); in cma_send_device_removal_put()
5258 _destroy_id(id_priv, state); in cma_send_device_removal_put()
5261 mutex_unlock(&id_priv->handler_mutex); in cma_send_device_removal_put()
5267 cma_cancel_operation(id_priv, state); in cma_send_device_removal_put()
5268 cma_id_put(id_priv); in cma_send_device_removal_put()
5275 struct rdma_id_private *id_priv = list_first_entry( in cma_process_remove() local
5278 list_del_init(&id_priv->listen_item); in cma_process_remove()
5279 list_del_init(&id_priv->device_item); in cma_process_remove()
5280 cma_id_get(id_priv); in cma_process_remove()
5283 cma_send_device_removal_put(id_priv); in cma_process_remove()
5308 struct rdma_id_private *id_priv; in cma_add_one() local
5356 list_for_each_entry(id_priv, &listen_any_list, listen_any_item) { in cma_add_one()
5357 ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy); in cma_add_one()