Lines Matching full:cb
31 struct mei_cl_cb *cb, *next; in mei_irq_compl_handler() local
34 list_for_each_entry_safe(cb, next, cmpl_list, list) { in mei_irq_compl_handler()
35 cl = cb->cl; in mei_irq_compl_handler()
36 list_del_init(&cb->list); in mei_irq_compl_handler()
39 mei_cl_complete(cl, cb); in mei_irq_compl_handler()
99 struct mei_cl_cb *cb; in mei_cl_irq_read_msg() local
115 cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list); in mei_cl_irq_read_msg()
116 if (!cb) { in mei_cl_irq_read_msg()
118 cl_err(dev, cl, "pending read cb not found\n"); in mei_cl_irq_read_msg()
121 cb = mei_cl_alloc_cb(cl, mei_cl_mtu(cl), MEI_FOP_READ, cl->fp); in mei_cl_irq_read_msg()
122 if (!cb) in mei_cl_irq_read_msg()
124 list_add_tail(&cb->list, &cl->rd_pending); in mei_cl_irq_read_msg()
136 cb->ext_hdr = kzalloc(sizeof(*gsc_f2h), GFP_KERNEL); in mei_cl_irq_read_msg()
137 if (!cb->ext_hdr) { in mei_cl_irq_read_msg()
138 cb->status = -ENOMEM; in mei_cl_irq_read_msg()
146 cb->status = -EPROTO; in mei_cl_irq_read_msg()
155 cb->status = -EPROTO; in mei_cl_irq_read_msg()
162 if (cb->vtag && cb->vtag != vtag_hdr->vtag) { in mei_cl_irq_read_msg()
164 cb->vtag, vtag_hdr->vtag); in mei_cl_irq_read_msg()
165 cb->status = -EPROTO; in mei_cl_irq_read_msg()
168 cb->vtag = vtag_hdr->vtag; in mei_cl_irq_read_msg()
176 cb->status = -EPROTO; in mei_cl_irq_read_msg()
181 cl_err(dev, cl, "no data allowed in cb with gsc\n"); in mei_cl_irq_read_msg()
182 cb->status = -EPROTO; in mei_cl_irq_read_msg()
187 cb->status = -EPROTO; in mei_cl_irq_read_msg()
190 memcpy(cb->ext_hdr, gsc_f2h, ext_hdr_len); in mei_cl_irq_read_msg()
195 cb->status = -ENODEV; in mei_cl_irq_read_msg()
202 buf_sz = length + cb->buf_idx; in mei_cl_irq_read_msg()
204 if (buf_sz < cb->buf_idx) { in mei_cl_irq_read_msg()
206 length, cb->buf_idx); in mei_cl_irq_read_msg()
207 cb->status = -EMSGSIZE; in mei_cl_irq_read_msg()
211 if (cb->buf.size < buf_sz) { in mei_cl_irq_read_msg()
213 cb->buf.size, length, cb->buf_idx); in mei_cl_irq_read_msg()
214 cb->status = -EMSGSIZE; in mei_cl_irq_read_msg()
219 mei_dma_ring_read(dev, cb->buf.data + cb->buf_idx, length); in mei_cl_irq_read_msg()
221 mei_read_slots(dev, cb->buf.data + cb->buf_idx, 0); in mei_cl_irq_read_msg()
223 mei_read_slots(dev, cb->buf.data + cb->buf_idx, length); in mei_cl_irq_read_msg()
226 cb->buf_idx += length; in mei_cl_irq_read_msg()
229 cl_dbg(dev, cl, "completed read length = %zu\n", cb->buf_idx); in mei_cl_irq_read_msg()
230 list_move_tail(&cb->list, cmpl_list); in mei_cl_irq_read_msg()
239 if (cb) in mei_cl_irq_read_msg()
240 list_move_tail(&cb->list, cmpl_list); in mei_cl_irq_read_msg()
249 * @cb: callback block.
254 static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb, in mei_cl_irq_disconnect_rsp() argument
271 list_move_tail(&cb->list, cmpl_list); in mei_cl_irq_disconnect_rsp()
281 * @cb: callback block.
286 static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb, in mei_cl_irq_read() argument
308 cb->buf_idx = 0; in mei_cl_irq_read()
309 list_move_tail(&cb->list, cmpl_list); in mei_cl_irq_read()
316 list_move_tail(&cb->list, &cl->rd_pending); in mei_cl_irq_read()
512 struct mei_cl_cb *cb, *next; in mei_irq_write_handler() local
527 /* complete all waiting for write CB */ in mei_irq_write_handler()
528 dev_dbg(dev->dev, "complete all waiting for write cb.\n"); in mei_irq_write_handler()
530 list_for_each_entry_safe(cb, next, &dev->write_waiting_list, list) { in mei_irq_write_handler()
531 cl = cb->cl; in mei_irq_write_handler()
536 list_move_tail(&cb->list, cmpl_list); in mei_irq_write_handler()
539 /* complete control write list CB */ in mei_irq_write_handler()
540 dev_dbg(dev->dev, "complete control write list cb.\n"); in mei_irq_write_handler()
541 list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list, list) { in mei_irq_write_handler()
542 cl = cb->cl; in mei_irq_write_handler()
543 switch (cb->fop_type) { in mei_irq_write_handler()
546 ret = mei_cl_irq_disconnect(cl, cb, cmpl_list); in mei_irq_write_handler()
553 ret = mei_cl_irq_read(cl, cb, cmpl_list); in mei_irq_write_handler()
560 ret = mei_cl_irq_connect(cl, cb, cmpl_list); in mei_irq_write_handler()
567 ret = mei_cl_irq_disconnect_rsp(cl, cb, cmpl_list); in mei_irq_write_handler()
574 ret = mei_cl_irq_notify(cl, cb, cmpl_list); in mei_irq_write_handler()
579 ret = mei_cl_irq_dma_map(cl, cb, cmpl_list); in mei_irq_write_handler()
584 ret = mei_cl_irq_dma_unmap(cl, cb, cmpl_list); in mei_irq_write_handler()
593 /* complete write list CB */ in mei_irq_write_handler()
594 dev_dbg(dev->dev, "complete write list cb.\n"); in mei_irq_write_handler()
595 list_for_each_entry_safe(cb, next, &dev->write_list, list) { in mei_irq_write_handler()
596 cl = cb->cl; in mei_irq_write_handler()
597 ret = mei_cl_irq_write(cl, cb, cmpl_list); in mei_irq_write_handler()