Lines Matching refs:packet

187 			struct ib_umad_packet *packet, bool is_recv_mad)  in queue_packet()  argument
197 for (packet->mad.hdr.id = 0; in queue_packet()
198 packet->mad.hdr.id < IB_UMAD_MAX_AGENTS; in queue_packet()
199 packet->mad.hdr.id++) in queue_packet()
200 if (agent == __get_agent(file, packet->mad.hdr.id)) { in queue_packet()
201 list_add_tail(&packet->list, &file->recv_list); in queue_packet()
214 struct ib_umad_packet *packet) in dequeue_send() argument
217 list_del(&packet->list); in dequeue_send()
225 struct ib_umad_packet *packet = send_wc->send_buf->context[0]; in send_handler() local
227 dequeue_send(file, packet); in send_handler()
228 rdma_destroy_ah(packet->msg->ah, RDMA_DESTROY_AH_SLEEPABLE); in send_handler()
229 ib_free_send_mad(packet->msg); in send_handler()
232 packet->length = IB_MGMT_MAD_HDR; in send_handler()
233 packet->mad.hdr.status = ETIMEDOUT; in send_handler()
234 if (!queue_packet(file, agent, packet, false)) in send_handler()
237 kfree(packet); in send_handler()
245 struct ib_umad_packet *packet; in recv_handler() local
250 packet = kzalloc(sizeof *packet, GFP_KERNEL); in recv_handler()
251 if (!packet) in recv_handler()
254 packet->length = mad_recv_wc->mad_len; in recv_handler()
255 packet->recv_wc = mad_recv_wc; in recv_handler()
257 packet->mad.hdr.status = 0; in recv_handler()
258 packet->mad.hdr.length = hdr_size(file) + mad_recv_wc->mad_len; in recv_handler()
259 packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp); in recv_handler()
265 packet->mad.hdr.lid = ib_lid_be16(0xFFFF & in recv_handler()
268 packet->mad.hdr.lid = ib_lid_be16(mad_recv_wc->wc->slid); in recv_handler()
269 packet->mad.hdr.sl = mad_recv_wc->wc->sl; in recv_handler()
270 packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits; in recv_handler()
271 packet->mad.hdr.pkey_index = mad_recv_wc->wc->pkey_index; in recv_handler()
272 packet->mad.hdr.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH); in recv_handler()
273 if (packet->mad.hdr.grh_present) { in recv_handler()
286 packet->mad.hdr.gid_index = grh->sgid_index; in recv_handler()
287 packet->mad.hdr.hop_limit = grh->hop_limit; in recv_handler()
288 packet->mad.hdr.traffic_class = grh->traffic_class; in recv_handler()
289 memcpy(packet->mad.hdr.gid, &grh->dgid, 16); in recv_handler()
290 packet->mad.hdr.flow_label = cpu_to_be32(grh->flow_label); in recv_handler()
294 if (queue_packet(file, agent, packet, true)) in recv_handler()
299 kfree(packet); in recv_handler()
305 struct ib_umad_packet *packet, size_t count) in copy_recv_mad() argument
311 recv_buf = &packet->recv_wc->recv_buf; in copy_recv_mad()
312 seg_size = packet->recv_wc->mad_seg_size; in copy_recv_mad()
315 if ((packet->length <= seg_size && in copy_recv_mad()
316 count < hdr_size(file) + packet->length) || in copy_recv_mad()
317 (packet->length > seg_size && in copy_recv_mad()
321 if (copy_to_user(buf, &packet->mad, hdr_size(file))) in copy_recv_mad()
325 seg_payload = min_t(int, packet->length, seg_size); in copy_recv_mad()
329 if (seg_payload < packet->length) { in copy_recv_mad()
334 if (count < hdr_size(file) + packet->length) { in copy_recv_mad()
344 for (left = packet->length - seg_payload, buf += seg_payload; in copy_recv_mad()
355 trace_ib_umad_read_recv(file, &packet->mad.hdr, &recv_buf->mad->mad_hdr); in copy_recv_mad()
357 return hdr_size(file) + packet->length; in copy_recv_mad()
361 struct ib_umad_packet *packet, size_t count) in copy_send_mad() argument
363 ssize_t size = hdr_size(file) + packet->length; in copy_send_mad()
368 if (copy_to_user(buf, &packet->mad, hdr_size(file))) in copy_send_mad()
373 if (copy_to_user(buf, packet->mad.data, packet->length)) in copy_send_mad()
376 trace_ib_umad_read_send(file, &packet->mad.hdr, in copy_send_mad()
377 (struct ib_mad_hdr *)&packet->mad.data); in copy_send_mad()
386 struct ib_umad_packet *packet; in ib_umad_read() local
417 packet = list_entry(file->recv_list.next, struct ib_umad_packet, list); in ib_umad_read()
418 list_del(&packet->list); in ib_umad_read()
423 if (packet->recv_wc) in ib_umad_read()
424 ret = copy_recv_mad(file, buf, packet, count); in ib_umad_read()
426 ret = copy_send_mad(file, buf, packet, count); in ib_umad_read()
431 list_add(&packet->list, &file->recv_list); in ib_umad_read()
435 if (packet->recv_wc) in ib_umad_read()
436 ib_free_recv_mad(packet->recv_wc); in ib_umad_read()
437 kfree(packet); in ib_umad_read()
475 struct ib_umad_packet *packet) in is_duplicate() argument
480 hdr = (struct ib_mad_hdr *) packet->mad.data; in is_duplicate()
500 if (same_destination(&packet->mad.hdr, &sent_packet->mad.hdr)) in is_duplicate()
512 struct ib_umad_packet *packet; in ib_umad_write() local
523 packet = kzalloc(sizeof(*packet) + IB_MGMT_RMPP_HDR, GFP_KERNEL); in ib_umad_write()
524 if (!packet) in ib_umad_write()
527 if (copy_from_user(&packet->mad, buf, hdr_size(file))) { in ib_umad_write()
532 if (packet->mad.hdr.id >= IB_UMAD_MAX_AGENTS) { in ib_umad_write()
539 if (copy_from_user(packet->mad.data, buf, IB_MGMT_RMPP_HDR)) { in ib_umad_write()
546 trace_ib_umad_write(file, &packet->mad.hdr, in ib_umad_write()
547 (struct ib_mad_hdr *)&packet->mad.data); in ib_umad_write()
549 agent = __get_agent(file, packet->mad.hdr.id); in ib_umad_write()
558 rdma_ah_set_dlid(&ah_attr, be16_to_cpu(packet->mad.hdr.lid)); in ib_umad_write()
559 rdma_ah_set_sl(&ah_attr, packet->mad.hdr.sl); in ib_umad_write()
560 rdma_ah_set_path_bits(&ah_attr, packet->mad.hdr.path_bits); in ib_umad_write()
562 if (packet->mad.hdr.grh_present) { in ib_umad_write()
564 be32_to_cpu(packet->mad.hdr.flow_label), in ib_umad_write()
565 packet->mad.hdr.gid_index, in ib_umad_write()
566 packet->mad.hdr.hop_limit, in ib_umad_write()
567 packet->mad.hdr.traffic_class); in ib_umad_write()
568 rdma_ah_set_dgid_raw(&ah_attr, packet->mad.hdr.gid); in ib_umad_write()
577 rmpp_mad_hdr = (struct ib_rmpp_mad_hdr *)packet->mad.data; in ib_umad_write()
590 base_version = ((struct ib_mad_hdr *)&packet->mad.data)->base_version; in ib_umad_write()
592 packet->msg = ib_create_send_mad(agent, in ib_umad_write()
593 be32_to_cpu(packet->mad.hdr.qpn), in ib_umad_write()
594 packet->mad.hdr.pkey_index, rmpp_active, in ib_umad_write()
597 if (IS_ERR(packet->msg)) { in ib_umad_write()
598 ret = PTR_ERR(packet->msg); in ib_umad_write()
602 packet->msg->ah = ah; in ib_umad_write()
603 packet->msg->timeout_ms = packet->mad.hdr.timeout_ms; in ib_umad_write()
604 packet->msg->retries = packet->mad.hdr.retries; in ib_umad_write()
605 packet->msg->context[0] = packet; in ib_umad_write()
608 memcpy(packet->msg->mad, packet->mad.data, IB_MGMT_MAD_HDR); in ib_umad_write()
611 if (copy_from_user(packet->msg->mad + copy_offset, in ib_umad_write()
618 ret = copy_rmpp_mad(packet->msg, buf); in ib_umad_write()
628 if (!ib_response_mad(packet->msg->mad)) { in ib_umad_write()
629 tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid; in ib_umad_write()
639 list_add_tail(&packet->list, &file->send_list); in ib_umad_write()
643 ret = is_duplicate(file, packet); in ib_umad_write()
645 list_add_tail(&packet->list, &file->send_list); in ib_umad_write()
653 ret = ib_post_send_mad(packet->msg, NULL); in ib_umad_write()
661 dequeue_send(file, packet); in ib_umad_write()
663 ib_free_send_mad(packet->msg); in ib_umad_write()
669 kfree(packet); in ib_umad_write()
1043 struct ib_umad_packet *packet, *tmp; in ib_umad_close() local
1053 list_for_each_entry_safe(packet, tmp, &file->recv_list, list) { in ib_umad_close()
1054 if (packet->recv_wc) in ib_umad_close()
1055 ib_free_recv_mad(packet->recv_wc); in ib_umad_close()
1056 kfree(packet); in ib_umad_close()