xref: /openbmc/linux/drivers/net/ethernet/cavium/liquidio/response_manager.c (revision 93707cbabcc8baf2b2b5f4a99c1f08ee83eb7abd)
1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2016 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more
17  * details.
18  **********************************************************************/
19 #include <linux/pci.h>
20 #include <linux/netdevice.h>
21 #include "liquidio_common.h"
22 #include "octeon_droq.h"
23 #include "octeon_iq.h"
24 #include "response_manager.h"
25 #include "octeon_device.h"
26 #include "octeon_main.h"
27 
28 static void oct_poll_req_completion(struct work_struct *work);
29 
30 int octeon_setup_response_list(struct octeon_device *oct)
31 {
32 	int i, ret = 0;
33 	struct cavium_wq *cwq;
34 
35 	for (i = 0; i < MAX_RESPONSE_LISTS; i++) {
36 		INIT_LIST_HEAD(&oct->response_list[i].head);
37 		spin_lock_init(&oct->response_list[i].lock);
38 		atomic_set(&oct->response_list[i].pending_req_count, 0);
39 	}
40 	spin_lock_init(&oct->cmd_resp_wqlock);
41 
42 	oct->dma_comp_wq.wq = alloc_workqueue("dma-comp", WQ_MEM_RECLAIM, 0);
43 	if (!oct->dma_comp_wq.wq) {
44 		dev_err(&oct->pci_dev->dev, "failed to create wq thread\n");
45 		return -ENOMEM;
46 	}
47 
48 	cwq = &oct->dma_comp_wq;
49 	INIT_DELAYED_WORK(&cwq->wk.work, oct_poll_req_completion);
50 	cwq->wk.ctxptr = oct;
51 	oct->cmd_resp_state = OCT_DRV_ONLINE;
52 	queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(50));
53 
54 	return ret;
55 }
56 
57 void octeon_delete_response_list(struct octeon_device *oct)
58 {
59 	cancel_delayed_work_sync(&oct->dma_comp_wq.wk.work);
60 	destroy_workqueue(oct->dma_comp_wq.wq);
61 }
62 
63 int lio_process_ordered_list(struct octeon_device *octeon_dev,
64 			     u32 force_quit)
65 {
66 	struct octeon_response_list *ordered_sc_list;
67 	struct octeon_soft_command *sc;
68 	int request_complete = 0;
69 	int resp_to_process = MAX_ORD_REQS_TO_PROCESS;
70 	u32 status;
71 	u64 status64;
72 
73 	ordered_sc_list = &octeon_dev->response_list[OCTEON_ORDERED_SC_LIST];
74 
75 	do {
76 		spin_lock_bh(&ordered_sc_list->lock);
77 
78 		if (list_empty(&ordered_sc_list->head)) {
79 			spin_unlock_bh(&ordered_sc_list->lock);
80 			return 1;
81 		}
82 
83 		sc = list_first_entry(&ordered_sc_list->head,
84 				      struct octeon_soft_command, node);
85 
86 		status = OCTEON_REQUEST_PENDING;
87 
88 		/* check if octeon has finished DMA'ing a response
89 		 * to where rptr is pointing to
90 		 */
91 		status64 = *sc->status_word;
92 
93 		if (status64 != COMPLETION_WORD_INIT) {
94 			/* This logic ensures that all 64b have been written.
95 			 * 1. check byte 0 for non-FF
96 			 * 2. if non-FF, then swap result from BE to host order
97 			 * 3. check byte 7 (swapped to 0) for non-FF
98 			 * 4. if non-FF, use the low 32-bit status code
99 			 * 5. if either byte 0 or byte 7 is FF, don't use status
100 			 */
101 			if ((status64 & 0xff) != 0xff) {
102 				octeon_swap_8B_data(&status64, 1);
103 				if (((status64 & 0xff) != 0xff)) {
104 					/* retrieve 16-bit firmware status */
105 					status = (u32)(status64 & 0xffffULL);
106 					if (status) {
107 						status =
108 						  FIRMWARE_STATUS_CODE(status);
109 					} else {
110 						/* i.e. no error */
111 						status = OCTEON_REQUEST_DONE;
112 					}
113 				}
114 			}
115 		} else if (force_quit || (sc->timeout &&
116 			time_after(jiffies, (unsigned long)sc->timeout))) {
117 			dev_err(&octeon_dev->pci_dev->dev, "%s: cmd failed, timeout (%ld, %ld)\n",
118 				__func__, (long)jiffies, (long)sc->timeout);
119 			status = OCTEON_REQUEST_TIMEOUT;
120 		}
121 
122 		if (status != OCTEON_REQUEST_PENDING) {
123 			/* we have received a response or we have timed out */
124 			/* remove node from linked list */
125 			list_del(&sc->node);
126 			atomic_dec(&octeon_dev->response_list
127 					  [OCTEON_ORDERED_SC_LIST].
128 					  pending_req_count);
129 			spin_unlock_bh
130 			    (&ordered_sc_list->lock);
131 
132 			if (sc->callback)
133 				sc->callback(octeon_dev, status,
134 					     sc->callback_arg);
135 
136 			request_complete++;
137 
138 		} else {
139 			/* no response yet */
140 			request_complete = 0;
141 			spin_unlock_bh
142 			    (&ordered_sc_list->lock);
143 		}
144 
145 		/* If we hit the Max Ordered requests to process every loop,
146 		 * we quit
147 		 * and let this function be invoked the next time the poll
148 		 * thread runs
149 		 * to process the remaining requests. This function can take up
150 		 * the entire CPU if there is no upper limit to the requests
151 		 * processed.
152 		 */
153 		if (request_complete >= resp_to_process)
154 			break;
155 	} while (request_complete);
156 
157 	return 0;
158 }
159 
160 static void oct_poll_req_completion(struct work_struct *work)
161 {
162 	struct cavium_wk *wk = (struct cavium_wk *)work;
163 	struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
164 	struct cavium_wq *cwq = &oct->dma_comp_wq;
165 
166 	lio_process_ordered_list(oct, 0);
167 	queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(50));
168 }
169