Lines Matching refs:msg

564 static bool flexrm_spu_sanity_check(struct brcm_message *msg)  in flexrm_spu_sanity_check()  argument
568 if (!msg->spu.src || !msg->spu.dst) in flexrm_spu_sanity_check()
570 for (sg = msg->spu.src; sg; sg = sg_next(sg)) { in flexrm_spu_sanity_check()
579 for (sg = msg->spu.dst; sg; sg = sg_next(sg)) { in flexrm_spu_sanity_check()
592 static u32 flexrm_spu_estimate_nonheader_desc_count(struct brcm_message *msg) in flexrm_spu_estimate_nonheader_desc_count() argument
596 struct scatterlist *src_sg = msg->spu.src, *dst_sg = msg->spu.dst; in flexrm_spu_estimate_nonheader_desc_count()
619 static int flexrm_spu_dma_map(struct device *dev, struct brcm_message *msg) in flexrm_spu_dma_map() argument
623 rc = dma_map_sg(dev, msg->spu.src, sg_nents(msg->spu.src), in flexrm_spu_dma_map()
628 rc = dma_map_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst), in flexrm_spu_dma_map()
631 dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src), in flexrm_spu_dma_map()
639 static void flexrm_spu_dma_unmap(struct device *dev, struct brcm_message *msg) in flexrm_spu_dma_unmap() argument
641 dma_unmap_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst), in flexrm_spu_dma_unmap()
643 dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src), in flexrm_spu_dma_unmap()
647 static void *flexrm_spu_write_descs(struct brcm_message *msg, u32 nhcnt, in flexrm_spu_write_descs() argument
655 struct scatterlist *src_sg = msg->spu.src, *dst_sg = msg->spu.dst; in flexrm_spu_write_descs()
705 static bool flexrm_sba_sanity_check(struct brcm_message *msg) in flexrm_sba_sanity_check() argument
709 if (!msg->sba.cmds || !msg->sba.cmds_count) in flexrm_sba_sanity_check()
712 for (i = 0; i < msg->sba.cmds_count; i++) { in flexrm_sba_sanity_check()
713 if (((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) || in flexrm_sba_sanity_check()
714 (msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C)) && in flexrm_sba_sanity_check()
715 (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT)) in flexrm_sba_sanity_check()
717 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) && in flexrm_sba_sanity_check()
718 (msg->sba.cmds[i].data_len > SRCT_LENGTH_MASK)) in flexrm_sba_sanity_check()
720 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C) && in flexrm_sba_sanity_check()
721 (msg->sba.cmds[i].data_len > SRCT_LENGTH_MASK)) in flexrm_sba_sanity_check()
723 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_RESP) && in flexrm_sba_sanity_check()
724 (msg->sba.cmds[i].resp_len > DSTT_LENGTH_MASK)) in flexrm_sba_sanity_check()
726 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT) && in flexrm_sba_sanity_check()
727 (msg->sba.cmds[i].data_len > DSTT_LENGTH_MASK)) in flexrm_sba_sanity_check()
734 static u32 flexrm_sba_estimate_nonheader_desc_count(struct brcm_message *msg) in flexrm_sba_estimate_nonheader_desc_count() argument
739 for (i = 0; i < msg->sba.cmds_count; i++) { in flexrm_sba_estimate_nonheader_desc_count()
742 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) || in flexrm_sba_estimate_nonheader_desc_count()
743 (msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C)) in flexrm_sba_estimate_nonheader_desc_count()
746 if (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_RESP) in flexrm_sba_estimate_nonheader_desc_count()
749 if (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT) in flexrm_sba_estimate_nonheader_desc_count()
756 static void *flexrm_sba_write_descs(struct brcm_message *msg, u32 nhcnt, in flexrm_sba_write_descs() argument
766 for (i = 0; i < msg->sba.cmds_count; i++) { in flexrm_sba_write_descs()
767 c = &msg->sba.cmds[i]; in flexrm_sba_write_descs()
834 static bool flexrm_sanity_check(struct brcm_message *msg) in flexrm_sanity_check() argument
836 if (!msg) in flexrm_sanity_check()
839 switch (msg->type) { in flexrm_sanity_check()
841 return flexrm_spu_sanity_check(msg); in flexrm_sanity_check()
843 return flexrm_sba_sanity_check(msg); in flexrm_sanity_check()
849 static u32 flexrm_estimate_nonheader_desc_count(struct brcm_message *msg) in flexrm_estimate_nonheader_desc_count() argument
851 if (!msg) in flexrm_estimate_nonheader_desc_count()
854 switch (msg->type) { in flexrm_estimate_nonheader_desc_count()
856 return flexrm_spu_estimate_nonheader_desc_count(msg); in flexrm_estimate_nonheader_desc_count()
858 return flexrm_sba_estimate_nonheader_desc_count(msg); in flexrm_estimate_nonheader_desc_count()
864 static int flexrm_dma_map(struct device *dev, struct brcm_message *msg) in flexrm_dma_map() argument
866 if (!dev || !msg) in flexrm_dma_map()
869 switch (msg->type) { in flexrm_dma_map()
871 return flexrm_spu_dma_map(dev, msg); in flexrm_dma_map()
879 static void flexrm_dma_unmap(struct device *dev, struct brcm_message *msg) in flexrm_dma_unmap() argument
881 if (!dev || !msg) in flexrm_dma_unmap()
884 switch (msg->type) { in flexrm_dma_unmap()
886 flexrm_spu_dma_unmap(dev, msg); in flexrm_dma_unmap()
893 static void *flexrm_write_descs(struct brcm_message *msg, u32 nhcnt, in flexrm_write_descs() argument
897 if (!msg || !desc_ptr || !start_desc || !end_desc) in flexrm_write_descs()
903 switch (msg->type) { in flexrm_write_descs()
905 return flexrm_spu_write_descs(msg, nhcnt, reqid, in flexrm_write_descs()
909 return flexrm_sba_write_descs(msg, nhcnt, reqid, in flexrm_write_descs()
977 struct brcm_message *msg) in flexrm_new_request() argument
987 if (!flexrm_sanity_check(msg)) in flexrm_new_request()
989 msg->error = 0; in flexrm_new_request()
998 ring->requests[reqid] = msg; in flexrm_new_request()
1001 ret = flexrm_dma_map(ring->mbox->dev, msg); in flexrm_new_request()
1021 nhcnt = flexrm_estimate_nonheader_desc_count(msg); in flexrm_new_request()
1042 next = flexrm_write_descs(msg, nhcnt, reqid, in flexrm_new_request()
1060 msg->error = ret; in flexrm_new_request()
1064 flexrm_dma_unmap(ring->mbox->dev, msg); in flexrm_new_request()
1079 struct brcm_message *msg = NULL; in flexrm_process_completions() local
1123 msg = ring->requests[reqid]; in flexrm_process_completions()
1124 if (!msg) { in flexrm_process_completions()
1138 flexrm_dma_unmap(ring->mbox->dev, msg); in flexrm_process_completions()
1141 msg->error = err; in flexrm_process_completions()
1142 mbox_chan_received_data(chan, msg); in flexrm_process_completions()
1197 struct brcm_message *msg = data; in flexrm_send_data() local
1199 if (msg->type == BRCM_MESSAGE_BATCH) { in flexrm_send_data()
1200 for (i = msg->batch.msgs_queued; in flexrm_send_data()
1201 i < msg->batch.msgs_count; i++) { in flexrm_send_data()
1202 rc = flexrm_new_request(ring, msg, in flexrm_send_data()
1203 &msg->batch.msgs[i]); in flexrm_send_data()
1205 msg->error = rc; in flexrm_send_data()
1208 msg->batch.msgs_queued++; in flexrm_send_data()
1363 struct brcm_message *msg; in flexrm_shutdown() local
1398 msg = ring->requests[reqid]; in flexrm_shutdown()
1399 if (!msg) in flexrm_shutdown()
1406 flexrm_dma_unmap(ring->mbox->dev, msg); in flexrm_shutdown()
1409 msg->error = -EIO; in flexrm_shutdown()
1410 mbox_chan_received_data(chan, msg); in flexrm_shutdown()
1473 static void flexrm_mbox_msi_write(struct msi_desc *desc, struct msi_msg *msg) in flexrm_mbox_msi_write() argument
1480 writel_relaxed(msg->address_lo, ring->regs + RING_MSI_ADDR_LS); in flexrm_mbox_msi_write()
1481 writel_relaxed(msg->address_hi, ring->regs + RING_MSI_ADDR_MS); in flexrm_mbox_msi_write()
1482 writel_relaxed(msg->data, ring->regs + RING_MSI_DATA_VALUE); in flexrm_mbox_msi_write()