1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
3 
4 #include <linux/delay.h>
5 #include <linux/device.h>
6 #include <linux/io.h>
7 #include <linux/mm.h>
8 
9 #include <net/mana/shm_channel.h>
10 
11 #define PAGE_FRAME_L48_WIDTH_BYTES 6
12 #define PAGE_FRAME_L48_WIDTH_BITS (PAGE_FRAME_L48_WIDTH_BYTES * 8)
13 #define PAGE_FRAME_L48_MASK 0x0000FFFFFFFFFFFF
14 #define PAGE_FRAME_H4_WIDTH_BITS 4
15 #define VECTOR_MASK 0xFFFF
16 #define SHMEM_VF_RESET_STATE ((u32)-1)
17 
18 #define SMC_MSG_TYPE_ESTABLISH_HWC 1
19 #define SMC_MSG_TYPE_ESTABLISH_HWC_VERSION 0
20 
21 #define SMC_MSG_TYPE_DESTROY_HWC 2
22 #define SMC_MSG_TYPE_DESTROY_HWC_VERSION 0
23 
24 #define SMC_MSG_DIRECTION_REQUEST 0
25 #define SMC_MSG_DIRECTION_RESPONSE 1
26 
27 /* Structures labeled with "HW DATA" are exchanged with the hardware. All of
28  * them are naturally aligned and hence don't need __packed.
29  */
30 
31 /* Shared memory channel protocol header
32  *
33  * msg_type: set on request and response; response matches request.
34  * msg_version: newer PF writes back older response (matching request)
35  *  older PF acts on latest version known and sets that version in result
36  *  (less than request).
37  * direction: 0 for request, VF->PF; 1 for response, PF->VF.
38  * status: 0 on request,
39  *   operation result on response (success = 0, failure = 1 or greater).
40  * reset_vf: If set on either establish or destroy request, indicates perform
41  *  FLR before/after the operation.
42  * owner_is_pf: 1 indicates PF owned, 0 indicates VF owned.
43  */
44 union smc_proto_hdr {
45 	u32 as_uint32;
46 
47 	struct {
48 		u8 msg_type	: 3;
49 		u8 msg_version	: 3;
50 		u8 reserved_1	: 1;
51 		u8 direction	: 1;
52 
53 		u8 status;
54 
55 		u8 reserved_2;
56 
57 		u8 reset_vf	: 1;
58 		u8 reserved_3	: 6;
59 		u8 owner_is_pf	: 1;
60 	};
61 }; /* HW DATA */
62 
63 #define SMC_APERTURE_BITS 256
64 #define SMC_BASIC_UNIT (sizeof(u32))
65 #define SMC_APERTURE_DWORDS (SMC_APERTURE_BITS / (SMC_BASIC_UNIT * 8))
66 #define SMC_LAST_DWORD (SMC_APERTURE_DWORDS - 1)
67 
mana_smc_poll_register(void __iomem * base,bool reset)68 static int mana_smc_poll_register(void __iomem *base, bool reset)
69 {
70 	void __iomem *ptr = base + SMC_LAST_DWORD * SMC_BASIC_UNIT;
71 	u32 last_dword;
72 	int i;
73 
74 	/* Poll the hardware for the ownership bit. This should be pretty fast,
75 	 * but let's do it in a loop just in case the hardware or the PF
76 	 * driver are temporarily busy.
77 	 */
78 	for (i = 0; i < 20 * 1000; i++)  {
79 		last_dword = readl(ptr);
80 
81 		/* shmem reads as 0xFFFFFFFF in the reset case */
82 		if (reset && last_dword == SHMEM_VF_RESET_STATE)
83 			return 0;
84 
85 		/* If bit_31 is set, the PF currently owns the SMC. */
86 		if (!(last_dword & BIT(31)))
87 			return 0;
88 
89 		usleep_range(1000, 2000);
90 	}
91 
92 	return -ETIMEDOUT;
93 }
94 
mana_smc_read_response(struct shm_channel * sc,u32 msg_type,u32 msg_version,bool reset_vf)95 static int mana_smc_read_response(struct shm_channel *sc, u32 msg_type,
96 				  u32 msg_version, bool reset_vf)
97 {
98 	void __iomem *base = sc->base;
99 	union smc_proto_hdr hdr;
100 	int err;
101 
102 	/* Wait for PF to respond. */
103 	err = mana_smc_poll_register(base, reset_vf);
104 	if (err)
105 		return err;
106 
107 	hdr.as_uint32 = readl(base + SMC_LAST_DWORD * SMC_BASIC_UNIT);
108 
109 	if (reset_vf && hdr.as_uint32 == SHMEM_VF_RESET_STATE)
110 		return 0;
111 
112 	/* Validate protocol fields from the PF driver */
113 	if (hdr.msg_type != msg_type || hdr.msg_version > msg_version ||
114 	    hdr.direction != SMC_MSG_DIRECTION_RESPONSE) {
115 		dev_err(sc->dev, "Wrong SMC response 0x%x, type=%d, ver=%d\n",
116 			hdr.as_uint32, msg_type, msg_version);
117 		return -EPROTO;
118 	}
119 
120 	/* Validate the operation result */
121 	if (hdr.status != 0) {
122 		dev_err(sc->dev, "SMC operation failed: 0x%x\n", hdr.status);
123 		return -EPROTO;
124 	}
125 
126 	return 0;
127 }
128 
mana_smc_init(struct shm_channel * sc,struct device * dev,void __iomem * base)129 void mana_smc_init(struct shm_channel *sc, struct device *dev,
130 		   void __iomem *base)
131 {
132 	sc->dev = dev;
133 	sc->base = base;
134 }
135 
mana_smc_setup_hwc(struct shm_channel * sc,bool reset_vf,u64 eq_addr,u64 cq_addr,u64 rq_addr,u64 sq_addr,u32 eq_msix_index)136 int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
137 		       u64 cq_addr, u64 rq_addr, u64 sq_addr,
138 		       u32 eq_msix_index)
139 {
140 	union smc_proto_hdr *hdr;
141 	u16 all_addr_h4bits = 0;
142 	u16 frame_addr_seq = 0;
143 	u64 frame_addr = 0;
144 	u8 shm_buf[32];
145 	u64 *shmem;
146 	u32 *dword;
147 	u8 *ptr;
148 	int err;
149 	int i;
150 
151 	/* Ensure VF already has possession of shared memory */
152 	err = mana_smc_poll_register(sc->base, false);
153 	if (err) {
154 		dev_err(sc->dev, "Timeout when setting up HWC: %d\n", err);
155 		return err;
156 	}
157 
158 	if (!PAGE_ALIGNED(eq_addr) || !PAGE_ALIGNED(cq_addr) ||
159 	    !PAGE_ALIGNED(rq_addr) || !PAGE_ALIGNED(sq_addr))
160 		return -EINVAL;
161 
162 	if ((eq_msix_index & VECTOR_MASK) != eq_msix_index)
163 		return -EINVAL;
164 
165 	/* Scheme for packing four addresses and extra info into 256 bits.
166 	 *
167 	 * Addresses must be page frame aligned, so only frame address bits
168 	 * are transferred.
169 	 *
170 	 * 52-bit frame addresses are split into the lower 48 bits and upper
171 	 * 4 bits. Lower 48 bits of 4 address are written sequentially from
172 	 * the start of the 256-bit shared memory region followed by 16 bits
173 	 * containing the upper 4 bits of the 4 addresses in sequence.
174 	 *
175 	 * A 16 bit EQ vector number fills out the next-to-last 32-bit dword.
176 	 *
177 	 * The final 32-bit dword is used for protocol control information as
178 	 * defined in smc_proto_hdr.
179 	 */
180 
181 	memset(shm_buf, 0, sizeof(shm_buf));
182 	ptr = shm_buf;
183 
184 	/* EQ addr: low 48 bits of frame address */
185 	shmem = (u64 *)ptr;
186 	frame_addr = PHYS_PFN(eq_addr);
187 	*shmem = frame_addr & PAGE_FRAME_L48_MASK;
188 	all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
189 		(frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
190 	ptr += PAGE_FRAME_L48_WIDTH_BYTES;
191 
192 	/* CQ addr: low 48 bits of frame address */
193 	shmem = (u64 *)ptr;
194 	frame_addr = PHYS_PFN(cq_addr);
195 	*shmem = frame_addr & PAGE_FRAME_L48_MASK;
196 	all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
197 		(frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
198 	ptr += PAGE_FRAME_L48_WIDTH_BYTES;
199 
200 	/* RQ addr: low 48 bits of frame address */
201 	shmem = (u64 *)ptr;
202 	frame_addr = PHYS_PFN(rq_addr);
203 	*shmem = frame_addr & PAGE_FRAME_L48_MASK;
204 	all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
205 		(frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
206 	ptr += PAGE_FRAME_L48_WIDTH_BYTES;
207 
208 	/* SQ addr: low 48 bits of frame address */
209 	shmem = (u64 *)ptr;
210 	frame_addr = PHYS_PFN(sq_addr);
211 	*shmem = frame_addr & PAGE_FRAME_L48_MASK;
212 	all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
213 		(frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
214 	ptr += PAGE_FRAME_L48_WIDTH_BYTES;
215 
216 	/* High 4 bits of the four frame addresses */
217 	*((u16 *)ptr) = all_addr_h4bits;
218 	ptr += sizeof(u16);
219 
220 	/* EQ MSIX vector number */
221 	*((u16 *)ptr) = (u16)eq_msix_index;
222 	ptr += sizeof(u16);
223 
224 	/* 32-bit protocol header in final dword */
225 	*((u32 *)ptr) = 0;
226 
227 	hdr = (union smc_proto_hdr *)ptr;
228 	hdr->msg_type = SMC_MSG_TYPE_ESTABLISH_HWC;
229 	hdr->msg_version = SMC_MSG_TYPE_ESTABLISH_HWC_VERSION;
230 	hdr->direction = SMC_MSG_DIRECTION_REQUEST;
231 	hdr->reset_vf = reset_vf;
232 
233 	/* Write 256-message buffer to shared memory (final 32-bit write
234 	 * triggers HW to set possession bit to PF).
235 	 */
236 	dword = (u32 *)shm_buf;
237 	for (i = 0; i < SMC_APERTURE_DWORDS; i++)
238 		writel(*dword++, sc->base + i * SMC_BASIC_UNIT);
239 
240 	/* Read shmem response (polling for VF possession) and validate.
241 	 * For setup, waiting for response on shared memory is not strictly
242 	 * necessary, since wait occurs later for results to appear in EQE's.
243 	 */
244 	err = mana_smc_read_response(sc, SMC_MSG_TYPE_ESTABLISH_HWC,
245 				     SMC_MSG_TYPE_ESTABLISH_HWC_VERSION,
246 				     reset_vf);
247 	if (err) {
248 		dev_err(sc->dev, "Error when setting up HWC: %d\n", err);
249 		return err;
250 	}
251 
252 	return 0;
253 }
254 
mana_smc_teardown_hwc(struct shm_channel * sc,bool reset_vf)255 int mana_smc_teardown_hwc(struct shm_channel *sc, bool reset_vf)
256 {
257 	union smc_proto_hdr hdr = {};
258 	int err;
259 
260 	/* Ensure already has possession of shared memory */
261 	err = mana_smc_poll_register(sc->base, false);
262 	if (err) {
263 		dev_err(sc->dev, "Timeout when tearing down HWC\n");
264 		return err;
265 	}
266 
267 	/* Set up protocol header for HWC destroy message */
268 	hdr.msg_type = SMC_MSG_TYPE_DESTROY_HWC;
269 	hdr.msg_version = SMC_MSG_TYPE_DESTROY_HWC_VERSION;
270 	hdr.direction = SMC_MSG_DIRECTION_REQUEST;
271 	hdr.reset_vf = reset_vf;
272 
273 	/* Write message in high 32 bits of 256-bit shared memory, causing HW
274 	 * to set possession bit to PF.
275 	 */
276 	writel(hdr.as_uint32, sc->base + SMC_LAST_DWORD * SMC_BASIC_UNIT);
277 
278 	/* Read shmem response (polling for VF possession) and validate.
279 	 * For teardown, waiting for response is required to ensure hardware
280 	 * invalidates MST entries before software frees memory.
281 	 */
282 	err = mana_smc_read_response(sc, SMC_MSG_TYPE_DESTROY_HWC,
283 				     SMC_MSG_TYPE_DESTROY_HWC_VERSION,
284 				     reset_vf);
285 	if (err) {
286 		dev_err(sc->dev, "Error when tearing down HWC: %d\n", err);
287 		return err;
288 	}
289 
290 	return 0;
291 }
292