1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
3 
4 #include <linux/delay.h>
5 #include <linux/device.h>
6 #include <linux/io.h>
7 #include <linux/mm.h>
8 
9 #include <net/mana/gdma.h>
10 #include <net/mana/shm_channel.h>
11 
12 #define PAGE_FRAME_L48_WIDTH_BYTES 6
13 #define PAGE_FRAME_L48_WIDTH_BITS (PAGE_FRAME_L48_WIDTH_BYTES * 8)
14 #define PAGE_FRAME_L48_MASK 0x0000FFFFFFFFFFFF
15 #define PAGE_FRAME_H4_WIDTH_BITS 4
16 #define VECTOR_MASK 0xFFFF
17 #define SHMEM_VF_RESET_STATE ((u32)-1)
18 
19 #define SMC_MSG_TYPE_ESTABLISH_HWC 1
20 #define SMC_MSG_TYPE_ESTABLISH_HWC_VERSION 0
21 
22 #define SMC_MSG_TYPE_DESTROY_HWC 2
23 #define SMC_MSG_TYPE_DESTROY_HWC_VERSION 0
24 
25 #define SMC_MSG_DIRECTION_REQUEST 0
26 #define SMC_MSG_DIRECTION_RESPONSE 1
27 
28 /* Structures labeled with "HW DATA" are exchanged with the hardware. All of
29  * them are naturally aligned and hence don't need __packed.
30  */
31 
32 /* Shared memory channel protocol header
33  *
34  * msg_type: set on request and response; response matches request.
35  * msg_version: newer PF writes back older response (matching request)
36  *  older PF acts on latest version known and sets that version in result
37  *  (less than request).
38  * direction: 0 for request, VF->PF; 1 for response, PF->VF.
39  * status: 0 on request,
40  *   operation result on response (success = 0, failure = 1 or greater).
41  * reset_vf: If set on either establish or destroy request, indicates perform
42  *  FLR before/after the operation.
43  * owner_is_pf: 1 indicates PF owned, 0 indicates VF owned.
44  */
45 union smc_proto_hdr {
46 	u32 as_uint32;
47 
48 	struct {
49 		u8 msg_type	: 3;
50 		u8 msg_version	: 3;
51 		u8 reserved_1	: 1;
52 		u8 direction	: 1;
53 
54 		u8 status;
55 
56 		u8 reserved_2;
57 
58 		u8 reset_vf	: 1;
59 		u8 reserved_3	: 6;
60 		u8 owner_is_pf	: 1;
61 	};
62 }; /* HW DATA */
63 
64 #define SMC_APERTURE_BITS 256
65 #define SMC_BASIC_UNIT (sizeof(u32))
66 #define SMC_APERTURE_DWORDS (SMC_APERTURE_BITS / (SMC_BASIC_UNIT * 8))
67 #define SMC_LAST_DWORD (SMC_APERTURE_DWORDS - 1)
68 
mana_smc_poll_register(void __iomem * base,bool reset)69 static int mana_smc_poll_register(void __iomem *base, bool reset)
70 {
71 	void __iomem *ptr = base + SMC_LAST_DWORD * SMC_BASIC_UNIT;
72 	u32 last_dword;
73 	int i;
74 
75 	/* Poll the hardware for the ownership bit. This should be pretty fast,
76 	 * but let's do it in a loop just in case the hardware or the PF
77 	 * driver are temporarily busy.
78 	 */
79 	for (i = 0; i < 20 * 1000; i++)  {
80 		last_dword = readl(ptr);
81 
82 		/* shmem reads as 0xFFFFFFFF in the reset case */
83 		if (reset && last_dword == SHMEM_VF_RESET_STATE)
84 			return 0;
85 
86 		/* If bit_31 is set, the PF currently owns the SMC. */
87 		if (!(last_dword & BIT(31)))
88 			return 0;
89 
90 		usleep_range(1000, 2000);
91 	}
92 
93 	return -ETIMEDOUT;
94 }
95 
mana_smc_read_response(struct shm_channel * sc,u32 msg_type,u32 msg_version,bool reset_vf)96 static int mana_smc_read_response(struct shm_channel *sc, u32 msg_type,
97 				  u32 msg_version, bool reset_vf)
98 {
99 	void __iomem *base = sc->base;
100 	union smc_proto_hdr hdr;
101 	int err;
102 
103 	/* Wait for PF to respond. */
104 	err = mana_smc_poll_register(base, reset_vf);
105 	if (err)
106 		return err;
107 
108 	hdr.as_uint32 = readl(base + SMC_LAST_DWORD * SMC_BASIC_UNIT);
109 
110 	if (reset_vf && hdr.as_uint32 == SHMEM_VF_RESET_STATE)
111 		return 0;
112 
113 	/* Validate protocol fields from the PF driver */
114 	if (hdr.msg_type != msg_type || hdr.msg_version > msg_version ||
115 	    hdr.direction != SMC_MSG_DIRECTION_RESPONSE) {
116 		dev_err(sc->dev, "Wrong SMC response 0x%x, type=%d, ver=%d\n",
117 			hdr.as_uint32, msg_type, msg_version);
118 		return -EPROTO;
119 	}
120 
121 	/* Validate the operation result */
122 	if (hdr.status != 0) {
123 		dev_err(sc->dev, "SMC operation failed: 0x%x\n", hdr.status);
124 		return -EPROTO;
125 	}
126 
127 	return 0;
128 }
129 
mana_smc_init(struct shm_channel * sc,struct device * dev,void __iomem * base)130 void mana_smc_init(struct shm_channel *sc, struct device *dev,
131 		   void __iomem *base)
132 {
133 	sc->dev = dev;
134 	sc->base = base;
135 }
136 
mana_smc_setup_hwc(struct shm_channel * sc,bool reset_vf,u64 eq_addr,u64 cq_addr,u64 rq_addr,u64 sq_addr,u32 eq_msix_index)137 int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
138 		       u64 cq_addr, u64 rq_addr, u64 sq_addr,
139 		       u32 eq_msix_index)
140 {
141 	union smc_proto_hdr *hdr;
142 	u16 all_addr_h4bits = 0;
143 	u16 frame_addr_seq = 0;
144 	u64 frame_addr = 0;
145 	u8 shm_buf[32];
146 	u64 *shmem;
147 	u32 *dword;
148 	u8 *ptr;
149 	int err;
150 	int i;
151 
152 	/* Ensure VF already has possession of shared memory */
153 	err = mana_smc_poll_register(sc->base, false);
154 	if (err) {
155 		dev_err(sc->dev, "Timeout when setting up HWC: %d\n", err);
156 		return err;
157 	}
158 
159 	if (!MANA_PAGE_ALIGNED(eq_addr) || !MANA_PAGE_ALIGNED(cq_addr) ||
160 	    !MANA_PAGE_ALIGNED(rq_addr) || !MANA_PAGE_ALIGNED(sq_addr))
161 		return -EINVAL;
162 
163 	if ((eq_msix_index & VECTOR_MASK) != eq_msix_index)
164 		return -EINVAL;
165 
166 	/* Scheme for packing four addresses and extra info into 256 bits.
167 	 *
168 	 * Addresses must be page frame aligned, so only frame address bits
169 	 * are transferred.
170 	 *
171 	 * 52-bit frame addresses are split into the lower 48 bits and upper
172 	 * 4 bits. Lower 48 bits of 4 address are written sequentially from
173 	 * the start of the 256-bit shared memory region followed by 16 bits
174 	 * containing the upper 4 bits of the 4 addresses in sequence.
175 	 *
176 	 * A 16 bit EQ vector number fills out the next-to-last 32-bit dword.
177 	 *
178 	 * The final 32-bit dword is used for protocol control information as
179 	 * defined in smc_proto_hdr.
180 	 */
181 
182 	memset(shm_buf, 0, sizeof(shm_buf));
183 	ptr = shm_buf;
184 
185 	/* EQ addr: low 48 bits of frame address */
186 	shmem = (u64 *)ptr;
187 	frame_addr = MANA_PFN(eq_addr);
188 	*shmem = frame_addr & PAGE_FRAME_L48_MASK;
189 	all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
190 		(frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
191 	ptr += PAGE_FRAME_L48_WIDTH_BYTES;
192 
193 	/* CQ addr: low 48 bits of frame address */
194 	shmem = (u64 *)ptr;
195 	frame_addr = MANA_PFN(cq_addr);
196 	*shmem = frame_addr & PAGE_FRAME_L48_MASK;
197 	all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
198 		(frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
199 	ptr += PAGE_FRAME_L48_WIDTH_BYTES;
200 
201 	/* RQ addr: low 48 bits of frame address */
202 	shmem = (u64 *)ptr;
203 	frame_addr = MANA_PFN(rq_addr);
204 	*shmem = frame_addr & PAGE_FRAME_L48_MASK;
205 	all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
206 		(frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
207 	ptr += PAGE_FRAME_L48_WIDTH_BYTES;
208 
209 	/* SQ addr: low 48 bits of frame address */
210 	shmem = (u64 *)ptr;
211 	frame_addr = MANA_PFN(sq_addr);
212 	*shmem = frame_addr & PAGE_FRAME_L48_MASK;
213 	all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
214 		(frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
215 	ptr += PAGE_FRAME_L48_WIDTH_BYTES;
216 
217 	/* High 4 bits of the four frame addresses */
218 	*((u16 *)ptr) = all_addr_h4bits;
219 	ptr += sizeof(u16);
220 
221 	/* EQ MSIX vector number */
222 	*((u16 *)ptr) = (u16)eq_msix_index;
223 	ptr += sizeof(u16);
224 
225 	/* 32-bit protocol header in final dword */
226 	*((u32 *)ptr) = 0;
227 
228 	hdr = (union smc_proto_hdr *)ptr;
229 	hdr->msg_type = SMC_MSG_TYPE_ESTABLISH_HWC;
230 	hdr->msg_version = SMC_MSG_TYPE_ESTABLISH_HWC_VERSION;
231 	hdr->direction = SMC_MSG_DIRECTION_REQUEST;
232 	hdr->reset_vf = reset_vf;
233 
234 	/* Write 256-message buffer to shared memory (final 32-bit write
235 	 * triggers HW to set possession bit to PF).
236 	 */
237 	dword = (u32 *)shm_buf;
238 	for (i = 0; i < SMC_APERTURE_DWORDS; i++)
239 		writel(*dword++, sc->base + i * SMC_BASIC_UNIT);
240 
241 	/* Read shmem response (polling for VF possession) and validate.
242 	 * For setup, waiting for response on shared memory is not strictly
243 	 * necessary, since wait occurs later for results to appear in EQE's.
244 	 */
245 	err = mana_smc_read_response(sc, SMC_MSG_TYPE_ESTABLISH_HWC,
246 				     SMC_MSG_TYPE_ESTABLISH_HWC_VERSION,
247 				     reset_vf);
248 	if (err) {
249 		dev_err(sc->dev, "Error when setting up HWC: %d\n", err);
250 		return err;
251 	}
252 
253 	return 0;
254 }
255 
mana_smc_teardown_hwc(struct shm_channel * sc,bool reset_vf)256 int mana_smc_teardown_hwc(struct shm_channel *sc, bool reset_vf)
257 {
258 	union smc_proto_hdr hdr = {};
259 	int err;
260 
261 	/* Ensure already has possession of shared memory */
262 	err = mana_smc_poll_register(sc->base, false);
263 	if (err) {
264 		dev_err(sc->dev, "Timeout when tearing down HWC\n");
265 		return err;
266 	}
267 
268 	/* Set up protocol header for HWC destroy message */
269 	hdr.msg_type = SMC_MSG_TYPE_DESTROY_HWC;
270 	hdr.msg_version = SMC_MSG_TYPE_DESTROY_HWC_VERSION;
271 	hdr.direction = SMC_MSG_DIRECTION_REQUEST;
272 	hdr.reset_vf = reset_vf;
273 
274 	/* Write message in high 32 bits of 256-bit shared memory, causing HW
275 	 * to set possession bit to PF.
276 	 */
277 	writel(hdr.as_uint32, sc->base + SMC_LAST_DWORD * SMC_BASIC_UNIT);
278 
279 	/* Read shmem response (polling for VF possession) and validate.
280 	 * For teardown, waiting for response is required to ensure hardware
281 	 * invalidates MST entries before software frees memory.
282 	 */
283 	err = mana_smc_read_response(sc, SMC_MSG_TYPE_DESTROY_HWC,
284 				     SMC_MSG_TYPE_DESTROY_HWC_VERSION,
285 				     reset_vf);
286 	if (err) {
287 		dev_err(sc->dev, "Error when tearing down HWC: %d\n", err);
288 		return err;
289 	}
290 
291 	return 0;
292 }
293