xref: /openbmc/linux/drivers/scsi/fnic/vnic_wq_copy.h (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1*e6550b3eSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
25df6d737SAbhijeet Joglekar /*
35df6d737SAbhijeet Joglekar  * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
45df6d737SAbhijeet Joglekar  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
55df6d737SAbhijeet Joglekar  */
65df6d737SAbhijeet Joglekar #ifndef _VNIC_WQ_COPY_H_
75df6d737SAbhijeet Joglekar #define _VNIC_WQ_COPY_H_
85df6d737SAbhijeet Joglekar 
95df6d737SAbhijeet Joglekar #include <linux/pci.h>
105df6d737SAbhijeet Joglekar #include "vnic_wq.h"
115df6d737SAbhijeet Joglekar #include "fcpio.h"
125df6d737SAbhijeet Joglekar 
135df6d737SAbhijeet Joglekar #define	VNIC_WQ_COPY_MAX 1
145df6d737SAbhijeet Joglekar 
155df6d737SAbhijeet Joglekar struct vnic_wq_copy {
165df6d737SAbhijeet Joglekar 	unsigned int index;
175df6d737SAbhijeet Joglekar 	struct vnic_dev *vdev;
185df6d737SAbhijeet Joglekar 	struct vnic_wq_ctrl __iomem *ctrl;	/* memory-mapped */
195df6d737SAbhijeet Joglekar 	struct vnic_dev_ring ring;
205df6d737SAbhijeet Joglekar 	unsigned to_use_index;
215df6d737SAbhijeet Joglekar 	unsigned to_clean_index;
225df6d737SAbhijeet Joglekar };
235df6d737SAbhijeet Joglekar 
vnic_wq_copy_desc_avail(struct vnic_wq_copy * wq)245df6d737SAbhijeet Joglekar static inline unsigned int vnic_wq_copy_desc_avail(struct vnic_wq_copy *wq)
255df6d737SAbhijeet Joglekar {
265df6d737SAbhijeet Joglekar 	return wq->ring.desc_avail;
275df6d737SAbhijeet Joglekar }
285df6d737SAbhijeet Joglekar 
vnic_wq_copy_desc_in_use(struct vnic_wq_copy * wq)295df6d737SAbhijeet Joglekar static inline unsigned int vnic_wq_copy_desc_in_use(struct vnic_wq_copy *wq)
305df6d737SAbhijeet Joglekar {
315df6d737SAbhijeet Joglekar 	return wq->ring.desc_count - 1 - wq->ring.desc_avail;
325df6d737SAbhijeet Joglekar }
335df6d737SAbhijeet Joglekar 
vnic_wq_copy_next_desc(struct vnic_wq_copy * wq)345df6d737SAbhijeet Joglekar static inline void *vnic_wq_copy_next_desc(struct vnic_wq_copy *wq)
355df6d737SAbhijeet Joglekar {
365df6d737SAbhijeet Joglekar 	struct fcpio_host_req *desc = wq->ring.descs;
375df6d737SAbhijeet Joglekar 	return &desc[wq->to_use_index];
385df6d737SAbhijeet Joglekar }
395df6d737SAbhijeet Joglekar 
vnic_wq_copy_post(struct vnic_wq_copy * wq)405df6d737SAbhijeet Joglekar static inline void vnic_wq_copy_post(struct vnic_wq_copy *wq)
415df6d737SAbhijeet Joglekar {
425df6d737SAbhijeet Joglekar 
435df6d737SAbhijeet Joglekar 	((wq->to_use_index + 1) == wq->ring.desc_count) ?
445df6d737SAbhijeet Joglekar 		(wq->to_use_index = 0) : (wq->to_use_index++);
455df6d737SAbhijeet Joglekar 	wq->ring.desc_avail--;
465df6d737SAbhijeet Joglekar 
475df6d737SAbhijeet Joglekar 	/* Adding write memory barrier prevents compiler and/or CPU
485df6d737SAbhijeet Joglekar 	 * reordering, thus avoiding descriptor posting before
495df6d737SAbhijeet Joglekar 	 * descriptor is initialized. Otherwise, hardware can read
505df6d737SAbhijeet Joglekar 	 * stale descriptor fields.
515df6d737SAbhijeet Joglekar 	 */
525df6d737SAbhijeet Joglekar 	wmb();
535df6d737SAbhijeet Joglekar 
545df6d737SAbhijeet Joglekar 	iowrite32(wq->to_use_index, &wq->ctrl->posted_index);
555df6d737SAbhijeet Joglekar }
565df6d737SAbhijeet Joglekar 
vnic_wq_copy_desc_process(struct vnic_wq_copy * wq,u16 index)575df6d737SAbhijeet Joglekar static inline void vnic_wq_copy_desc_process(struct vnic_wq_copy *wq, u16 index)
585df6d737SAbhijeet Joglekar {
595df6d737SAbhijeet Joglekar 	unsigned int cnt;
605df6d737SAbhijeet Joglekar 
615df6d737SAbhijeet Joglekar 	if (wq->to_clean_index <= index)
625df6d737SAbhijeet Joglekar 		cnt = (index - wq->to_clean_index) + 1;
635df6d737SAbhijeet Joglekar 	else
645df6d737SAbhijeet Joglekar 		cnt = wq->ring.desc_count - wq->to_clean_index + index + 1;
655df6d737SAbhijeet Joglekar 
665df6d737SAbhijeet Joglekar 	wq->to_clean_index = ((index + 1) % wq->ring.desc_count);
675df6d737SAbhijeet Joglekar 	wq->ring.desc_avail += cnt;
685df6d737SAbhijeet Joglekar 
695df6d737SAbhijeet Joglekar }
705df6d737SAbhijeet Joglekar 
vnic_wq_copy_service(struct vnic_wq_copy * wq,u16 completed_index,void (* q_service)(struct vnic_wq_copy * wq,struct fcpio_host_req * wq_desc))715df6d737SAbhijeet Joglekar static inline void vnic_wq_copy_service(struct vnic_wq_copy *wq,
725df6d737SAbhijeet Joglekar 	u16 completed_index,
735df6d737SAbhijeet Joglekar 	void (*q_service)(struct vnic_wq_copy *wq,
745df6d737SAbhijeet Joglekar 	struct fcpio_host_req *wq_desc))
755df6d737SAbhijeet Joglekar {
765df6d737SAbhijeet Joglekar 	struct fcpio_host_req *wq_desc = wq->ring.descs;
775df6d737SAbhijeet Joglekar 	unsigned int curr_index;
785df6d737SAbhijeet Joglekar 
795df6d737SAbhijeet Joglekar 	while (1) {
805df6d737SAbhijeet Joglekar 
815df6d737SAbhijeet Joglekar 		if (q_service)
825df6d737SAbhijeet Joglekar 			(*q_service)(wq, &wq_desc[wq->to_clean_index]);
835df6d737SAbhijeet Joglekar 
845df6d737SAbhijeet Joglekar 		wq->ring.desc_avail++;
855df6d737SAbhijeet Joglekar 
865df6d737SAbhijeet Joglekar 		curr_index = wq->to_clean_index;
875df6d737SAbhijeet Joglekar 
885df6d737SAbhijeet Joglekar 		/* increment the to-clean index so that we start
895df6d737SAbhijeet Joglekar 		 * with an unprocessed index next time we enter the loop
905df6d737SAbhijeet Joglekar 		 */
915df6d737SAbhijeet Joglekar 		((wq->to_clean_index + 1) == wq->ring.desc_count) ?
925df6d737SAbhijeet Joglekar 			(wq->to_clean_index = 0) : (wq->to_clean_index++);
935df6d737SAbhijeet Joglekar 
945df6d737SAbhijeet Joglekar 		if (curr_index == completed_index)
955df6d737SAbhijeet Joglekar 			break;
965df6d737SAbhijeet Joglekar 
975df6d737SAbhijeet Joglekar 		/* we have cleaned all the entries */
985df6d737SAbhijeet Joglekar 		if ((completed_index == (u16)-1) &&
995df6d737SAbhijeet Joglekar 		    (wq->to_clean_index == wq->to_use_index))
1005df6d737SAbhijeet Joglekar 			break;
1015df6d737SAbhijeet Joglekar 	}
1025df6d737SAbhijeet Joglekar }
1035df6d737SAbhijeet Joglekar 
1045df6d737SAbhijeet Joglekar void vnic_wq_copy_enable(struct vnic_wq_copy *wq);
1055df6d737SAbhijeet Joglekar int vnic_wq_copy_disable(struct vnic_wq_copy *wq);
1065df6d737SAbhijeet Joglekar void vnic_wq_copy_free(struct vnic_wq_copy *wq);
1075df6d737SAbhijeet Joglekar int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq,
1085df6d737SAbhijeet Joglekar 	unsigned int index, unsigned int desc_count, unsigned int desc_size);
1095df6d737SAbhijeet Joglekar void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index,
1105df6d737SAbhijeet Joglekar 	unsigned int error_interrupt_enable,
1115df6d737SAbhijeet Joglekar 	unsigned int error_interrupt_offset);
1125df6d737SAbhijeet Joglekar void vnic_wq_copy_clean(struct vnic_wq_copy *wq,
1135df6d737SAbhijeet Joglekar 	void (*q_clean)(struct vnic_wq_copy *wq,
1145df6d737SAbhijeet Joglekar 	struct fcpio_host_req *wq_desc));
1155df6d737SAbhijeet Joglekar 
1165df6d737SAbhijeet Joglekar #endif /* _VNIC_WQ_COPY_H_ */
117