1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2020 Broadcom Limited
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  */
9 
10 #include <asm/byteorder.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmapool.h>
13 #include <linux/errno.h>
14 #include <linux/ethtool.h>
15 #include <linux/if_ether.h>
16 #include <linux/io.h>
17 #include <linux/irq.h>
18 #include <linux/kernel.h>
19 #include <linux/netdevice.h>
20 #include <linux/pci.h>
21 #include <linux/skbuff.h>
22 
23 #include "bnxt_hsi.h"
24 #include "bnxt.h"
25 #include "bnxt_hwrm.h"
26 
27 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
28 			    u16 cmpl_ring, u16 target_id)
29 {
30 	struct input *req = request;
31 
32 	req->req_type = cpu_to_le16(req_type);
33 	req->cmpl_ring = cpu_to_le16(cmpl_ring);
34 	req->target_id = cpu_to_le16(target_id);
35 	req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
36 }
37 
38 static u64 hwrm_calc_sentinel(struct bnxt_hwrm_ctx *ctx, u16 req_type)
39 {
40 	return (((uintptr_t)ctx) + req_type) ^ BNXT_HWRM_SENTINEL;
41 }
42 
43 /**
44  * __hwrm_req_init() - Initialize an HWRM request.
45  * @bp: The driver context.
46  * @req: A pointer to the request pointer to initialize.
47  * @req_type: The request type. This will be converted to the little endian
48  *	before being written to the req_type field of the returned request.
49  * @req_len: The length of the request to be allocated.
50  *
51  * Allocate DMA resources and initialize a new HWRM request object of the
52  * given type. The response address field in the request is configured with
53  * the DMA bus address that has been mapped for the response and the passed
54  * request is pointed to kernel virtual memory mapped for the request (such
55  * that short_input indirection can be accomplished without copying). The
56  * request’s target and completion ring are initialized to default values and
57  * can be overridden by writing to the returned request object directly.
58  *
59  * The initialized request can be further customized by writing to its fields
60  * directly, taking care to covert such fields to little endian. The request
61  * object will be consumed (and all its associated resources release) upon
62  * passing it to hwrm_req_send() unless ownership of the request has been
63  * claimed by the caller via a call to hwrm_req_hold(). If the request is not
64  * consumed, either because it is never sent or because ownership has been
65  * claimed, then it must be released by a call to hwrm_req_drop().
66  *
67  * Return: zero on success, negative error code otherwise:
68  *	E2BIG: the type of request pointer is too large to fit.
69  *	ENOMEM: an allocation failure occurred.
70  */
71 int __hwrm_req_init(struct bnxt *bp, void **req, u16 req_type, u32 req_len)
72 {
73 	struct bnxt_hwrm_ctx *ctx;
74 	dma_addr_t dma_handle;
75 	u8 *req_addr;
76 
77 	if (req_len > BNXT_HWRM_CTX_OFFSET)
78 		return -E2BIG;
79 
80 	req_addr = dma_pool_alloc(bp->hwrm_dma_pool, GFP_KERNEL | __GFP_ZERO,
81 				  &dma_handle);
82 	if (!req_addr)
83 		return -ENOMEM;
84 
85 	ctx = (struct bnxt_hwrm_ctx *)(req_addr + BNXT_HWRM_CTX_OFFSET);
86 	/* safety first, sentinel used to check for invalid requests */
87 	ctx->sentinel = hwrm_calc_sentinel(ctx, req_type);
88 	ctx->req_len = req_len;
89 	ctx->req = (struct input *)req_addr;
90 	ctx->resp = (struct output *)(req_addr + BNXT_HWRM_RESP_OFFSET);
91 	ctx->dma_handle = dma_handle;
92 	ctx->flags = 0; /* __GFP_ZERO, but be explicit regarding ownership */
93 	ctx->timeout = bp->hwrm_cmd_timeout ?: DFLT_HWRM_CMD_TIMEOUT;
94 	ctx->allocated = BNXT_HWRM_DMA_SIZE - BNXT_HWRM_CTX_OFFSET;
95 	ctx->gfp = GFP_KERNEL;
96 	ctx->slice_addr = NULL;
97 
98 	/* initialize common request fields */
99 	ctx->req->req_type = cpu_to_le16(req_type);
100 	ctx->req->resp_addr = cpu_to_le64(dma_handle + BNXT_HWRM_RESP_OFFSET);
101 	ctx->req->cmpl_ring = cpu_to_le16(BNXT_HWRM_NO_CMPL_RING);
102 	ctx->req->target_id = cpu_to_le16(BNXT_HWRM_TARGET);
103 	*req = ctx->req;
104 
105 	return 0;
106 }
107 
108 static struct bnxt_hwrm_ctx *__hwrm_ctx(struct bnxt *bp, u8 *req_addr)
109 {
110 	void *ctx_addr = req_addr + BNXT_HWRM_CTX_OFFSET;
111 	struct input *req = (struct input *)req_addr;
112 	struct bnxt_hwrm_ctx *ctx = ctx_addr;
113 	u64 sentinel;
114 
115 	if (!req) {
116 		/* can only be due to software bug, be loud */
117 		netdev_err(bp->dev, "null HWRM request");
118 		dump_stack();
119 		return NULL;
120 	}
121 
122 	/* HWRM API has no type safety, verify sentinel to validate address */
123 	sentinel = hwrm_calc_sentinel(ctx, le16_to_cpu(req->req_type));
124 	if (ctx->sentinel != sentinel) {
125 		/* can only be due to software bug, be loud */
126 		netdev_err(bp->dev, "HWRM sentinel mismatch, req_type = %u\n",
127 			   (u32)le16_to_cpu(req->req_type));
128 		dump_stack();
129 		return NULL;
130 	}
131 
132 	return ctx;
133 }
134 
135 /**
136  * hwrm_req_timeout() - Set the completion timeout for the request.
137  * @bp: The driver context.
138  * @req: The request to set the timeout.
139  * @timeout: The timeout in milliseconds.
140  *
141  * Set the timeout associated with the request for subsequent calls to
142  * hwrm_req_send(). Some requests are long running and require a different
143  * timeout than the default.
144  */
145 void hwrm_req_timeout(struct bnxt *bp, void *req, unsigned int timeout)
146 {
147 	struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
148 
149 	if (ctx)
150 		ctx->timeout = timeout;
151 }
152 
153 /**
154  * hwrm_req_alloc_flags() - Sets GFP allocation flags for slices.
155  * @bp: The driver context.
156  * @req: The request for which calls to hwrm_req_dma_slice() will have altered
157  *	allocation flags.
158  * @flags: A bitmask of GFP flags. These flags are passed to
159  *	dma_alloc_coherent() whenever it is used to allocate backing memory
160  *	for slices. Note that calls to hwrm_req_dma_slice() will not always
161  *	result in new allocations, however, memory suballocated from the
162  *	request buffer is already __GFP_ZERO.
163  *
164  * Sets the GFP allocation flags associated with the request for subsequent
165  * calls to hwrm_req_dma_slice(). This can be useful for specifying __GFP_ZERO
166  * for slice allocations.
167  */
168 void hwrm_req_alloc_flags(struct bnxt *bp, void *req, gfp_t gfp)
169 {
170 	struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
171 
172 	if (ctx)
173 		ctx->gfp = gfp;
174 }
175 
176 /**
177  * hwrm_req_replace() - Replace request data.
178  * @bp: The driver context.
179  * @req: The request to modify. A call to hwrm_req_replace() is conceptually
180  *	an assignment of new_req to req. Subsequent calls to HWRM API functions,
181  *	such as hwrm_req_send(), should thus use req and not new_req (in fact,
182  *	calls to HWRM API functions will fail if non-managed request objects
183  *	are passed).
184  * @len: The length of new_req.
185  * @new_req: The pre-built request to copy or reference.
186  *
187  * Replaces the request data in req with that of new_req. This is useful in
188  * scenarios where a request object has already been constructed by a third
189  * party prior to creating a resource managed request using hwrm_req_init().
190  * Depending on the length, hwrm_req_replace() will either copy the new
191  * request data into the DMA memory allocated for req, or it will simply
192  * reference the new request and use it in lieu of req during subsequent
193  * calls to hwrm_req_send(). The resource management is associated with
194  * req and is independent of and does not apply to new_req. The caller must
195  * ensure that the lifetime of new_req is least as long as req. Any slices
196  * that may have been associated with the original request are released.
197  *
198  * Return: zero on success, negative error code otherwise:
199  *     E2BIG: Request is too large.
200  *     EINVAL: Invalid request to modify.
201  */
202 int hwrm_req_replace(struct bnxt *bp, void *req, void *new_req, u32 len)
203 {
204 	struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
205 	struct input *internal_req = req;
206 	u16 req_type;
207 
208 	if (!ctx)
209 		return -EINVAL;
210 
211 	if (len > BNXT_HWRM_CTX_OFFSET)
212 		return -E2BIG;
213 
214 	/* free any existing slices */
215 	ctx->allocated = BNXT_HWRM_DMA_SIZE - BNXT_HWRM_CTX_OFFSET;
216 	if (ctx->slice_addr) {
217 		dma_free_coherent(&bp->pdev->dev, ctx->slice_size,
218 				  ctx->slice_addr, ctx->slice_handle);
219 		ctx->slice_addr = NULL;
220 	}
221 	ctx->gfp = GFP_KERNEL;
222 
223 	if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || len > BNXT_HWRM_MAX_REQ_LEN) {
224 		memcpy(internal_req, new_req, len);
225 	} else {
226 		internal_req->req_type = ((struct input *)new_req)->req_type;
227 		ctx->req = new_req;
228 	}
229 
230 	ctx->req_len = len;
231 	ctx->req->resp_addr = cpu_to_le64(ctx->dma_handle +
232 					  BNXT_HWRM_RESP_OFFSET);
233 
234 	/* update sentinel for potentially new request type */
235 	req_type = le16_to_cpu(internal_req->req_type);
236 	ctx->sentinel = hwrm_calc_sentinel(ctx, req_type);
237 
238 	return 0;
239 }
240 
241 /**
242  * hwrm_req_flags() - Set non internal flags of the ctx
243  * @bp: The driver context.
244  * @req: The request containing the HWRM command
245  * @flags: ctx flags that don't have BNXT_HWRM_INTERNAL_FLAG set
246  *
247  * ctx flags can be used by the callers to instruct how the subsequent
248  * hwrm_req_send() should behave. Example: callers can use hwrm_req_flags
249  * with BNXT_HWRM_CTX_SILENT to omit kernel prints of errors of hwrm_req_send()
250  * or with BNXT_HWRM_FULL_WAIT enforce hwrm_req_send() to wait for full timeout
251  * even if FW is not responding.
252  * This generic function can be used to set any flag that is not an internal flag
253  * of the HWRM module.
254  */
255 void hwrm_req_flags(struct bnxt *bp, void *req, enum bnxt_hwrm_ctx_flags flags)
256 {
257 	struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
258 
259 	if (ctx)
260 		ctx->flags |= (flags & HWRM_API_FLAGS);
261 }
262 
263 /**
264  * hwrm_req_hold() - Claim ownership of the request's resources.
265  * @bp: The driver context.
266  * @req: A pointer to the request to own. The request will no longer be
267  *	consumed by calls to hwrm_req_send().
268  *
269  * Take ownership of the request. Ownership places responsibility on the
270  * caller to free the resources associated with the request via a call to
271  * hwrm_req_drop(). The caller taking ownership implies that a subsequent
272  * call to hwrm_req_send() will not consume the request (ie. sending will
273  * not free the associated resources if the request is owned by the caller).
274  * Taking ownership returns a reference to the response. Retaining and
275  * accessing the response data is the most common reason to take ownership
276  * of the request. Ownership can also be acquired in order to reuse the same
277  * request object across multiple invocations of hwrm_req_send().
278  *
279  * Return: A pointer to the response object.
280  *
281  * The resources associated with the response will remain available to the
282  * caller until ownership of the request is relinquished via a call to
283  * hwrm_req_drop(). It is not possible for hwrm_req_hold() to return NULL if
284  * a valid request is provided. A returned NULL value would imply a driver
285  * bug and the implementation will complain loudly in the logs to aid in
286  * detection. It should not be necessary to check the result for NULL.
287  */
288 void *hwrm_req_hold(struct bnxt *bp, void *req)
289 {
290 	struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
291 	struct input *input = (struct input *)req;
292 
293 	if (!ctx)
294 		return NULL;
295 
296 	if (ctx->flags & BNXT_HWRM_INTERNAL_CTX_OWNED) {
297 		/* can only be due to software bug, be loud */
298 		netdev_err(bp->dev, "HWRM context already owned, req_type = %u\n",
299 			   (u32)le16_to_cpu(input->req_type));
300 		dump_stack();
301 		return NULL;
302 	}
303 
304 	ctx->flags |= BNXT_HWRM_INTERNAL_CTX_OWNED;
305 	return ((u8 *)req) + BNXT_HWRM_RESP_OFFSET;
306 }
307 
308 static void __hwrm_ctx_drop(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
309 {
310 	void *addr = ((u8 *)ctx) - BNXT_HWRM_CTX_OFFSET;
311 	dma_addr_t dma_handle = ctx->dma_handle; /* save before invalidate */
312 
313 	/* unmap any auxiliary DMA slice */
314 	if (ctx->slice_addr)
315 		dma_free_coherent(&bp->pdev->dev, ctx->slice_size,
316 				  ctx->slice_addr, ctx->slice_handle);
317 
318 	/* invalidate, ensure ownership, sentinel and dma_handle are cleared */
319 	memset(ctx, 0, sizeof(struct bnxt_hwrm_ctx));
320 
321 	/* return the buffer to the DMA pool */
322 	if (dma_handle)
323 		dma_pool_free(bp->hwrm_dma_pool, addr, dma_handle);
324 }
325 
326 /**
327  * hwrm_req_drop() - Release all resources associated with the request.
328  * @bp: The driver context.
329  * @req: The request to consume, releasing the associated resources. The
330  *	request object, any slices, and its associated response are no
331  *	longer valid.
332  *
333  * It is legal to call hwrm_req_drop() on an unowned request, provided it
334  * has not already been consumed by hwrm_req_send() (for example, to release
335  * an aborted request). A given request should not be dropped more than once,
336  * nor should it be dropped after having been consumed by hwrm_req_send(). To
337  * do so is an error (the context will not be found and a stack trace will be
338  * rendered in the kernel log).
339  */
340 void hwrm_req_drop(struct bnxt *bp, void *req)
341 {
342 	struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
343 
344 	if (ctx)
345 		__hwrm_ctx_drop(bp, ctx);
346 }
347 
348 static int __hwrm_to_stderr(u32 hwrm_err)
349 {
350 	switch (hwrm_err) {
351 	case HWRM_ERR_CODE_SUCCESS:
352 		return 0;
353 	case HWRM_ERR_CODE_RESOURCE_LOCKED:
354 		return -EROFS;
355 	case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
356 		return -EACCES;
357 	case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
358 		return -ENOSPC;
359 	case HWRM_ERR_CODE_INVALID_PARAMS:
360 	case HWRM_ERR_CODE_INVALID_FLAGS:
361 	case HWRM_ERR_CODE_INVALID_ENABLES:
362 	case HWRM_ERR_CODE_UNSUPPORTED_TLV:
363 	case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR:
364 		return -EINVAL;
365 	case HWRM_ERR_CODE_NO_BUFFER:
366 		return -ENOMEM;
367 	case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
368 	case HWRM_ERR_CODE_BUSY:
369 		return -EAGAIN;
370 	case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
371 		return -EOPNOTSUPP;
372 	default:
373 		return -EIO;
374 	}
375 }
376 
377 static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
378 {
379 	u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
380 	u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
381 	struct hwrm_short_input short_input = {0};
382 	u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
383 	unsigned int i, timeout, tmo_count;
384 	u16 dst = BNXT_HWRM_CHNL_CHIMP;
385 	int intr_process, rc = -EBUSY;
386 	u32 *data = (u32 *)ctx->req;
387 	u32 msg_len = ctx->req_len;
388 	u16 cp_ring_id, len = 0;
389 	u32 req_type;
390 	u8 *valid;
391 
392 	if (ctx->flags & BNXT_HWRM_INTERNAL_RESP_DIRTY)
393 		memset(ctx->resp, 0, PAGE_SIZE);
394 
395 	req_type = le16_to_cpu(ctx->req->req_type);
396 	if (BNXT_NO_FW_ACCESS(bp) && req_type != HWRM_FUNC_RESET)
397 		goto exit;
398 
399 	if (msg_len > BNXT_HWRM_MAX_REQ_LEN &&
400 	    msg_len > bp->hwrm_max_ext_req_len) {
401 		rc = -E2BIG;
402 		goto exit;
403 	}
404 
405 	if (bnxt_kong_hwrm_message(bp, ctx->req)) {
406 		dst = BNXT_HWRM_CHNL_KONG;
407 		bar_offset = BNXT_GRCPF_REG_KONG_COMM;
408 		doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
409 		if (le16_to_cpu(ctx->req->cmpl_ring) != INVALID_HW_RING_ID) {
410 			netdev_err(bp->dev, "Ring completions not supported for KONG commands, req_type = %d\n",
411 				   req_type);
412 			rc = -EINVAL;
413 			goto exit;
414 		}
415 	}
416 
417 	cp_ring_id = le16_to_cpu(ctx->req->cmpl_ring);
418 	intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
419 
420 	ctx->req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst));
421 	/* currently supports only one outstanding message */
422 	if (intr_process)
423 		bp->hwrm_intr_seq_id = le16_to_cpu(ctx->req->seq_id);
424 
425 	if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
426 	    msg_len > BNXT_HWRM_MAX_REQ_LEN) {
427 		short_input.req_type = ctx->req->req_type;
428 		short_input.signature =
429 				cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
430 		short_input.size = cpu_to_le16(msg_len);
431 		short_input.req_addr = cpu_to_le64(ctx->dma_handle);
432 
433 		data = (u32 *)&short_input;
434 		msg_len = sizeof(short_input);
435 
436 		max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
437 	}
438 
439 	/* Ensure any associated DMA buffers are written before doorbell */
440 	wmb();
441 
442 	/* Write request msg to hwrm channel */
443 	__iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
444 
445 	for (i = msg_len; i < max_req_len; i += 4)
446 		writel(0, bp->bar0 + bar_offset + i);
447 
448 	/* Ring channel doorbell */
449 	writel(1, bp->bar0 + doorbell_offset);
450 
451 	if (!pci_is_enabled(bp->pdev)) {
452 		rc = -ENODEV;
453 		goto exit;
454 	}
455 
456 	/* Limit timeout to an upper limit */
457 	timeout = min_t(uint, ctx->timeout, HWRM_CMD_MAX_TIMEOUT);
458 	/* convert timeout to usec */
459 	timeout *= 1000;
460 
461 	i = 0;
462 	/* Short timeout for the first few iterations:
463 	 * number of loops = number of loops for short timeout +
464 	 * number of loops for standard timeout.
465 	 */
466 	tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
467 	timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
468 	tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
469 
470 	if (intr_process) {
471 		u16 seq_id = bp->hwrm_intr_seq_id;
472 
473 		/* Wait until hwrm response cmpl interrupt is processed */
474 		while (bp->hwrm_intr_seq_id != (u16)~seq_id &&
475 		       i++ < tmo_count) {
476 			/* Abort the wait for completion if the FW health
477 			 * check has failed.
478 			 */
479 			if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
480 				goto exit;
481 			/* on first few passes, just barely sleep */
482 			if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
483 				usleep_range(HWRM_SHORT_MIN_TIMEOUT,
484 					     HWRM_SHORT_MAX_TIMEOUT);
485 			} else {
486 				if (HWRM_WAIT_MUST_ABORT(bp, ctx))
487 					break;
488 				usleep_range(HWRM_MIN_TIMEOUT,
489 					     HWRM_MAX_TIMEOUT);
490 			}
491 		}
492 
493 		if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
494 			if (!(ctx->flags & BNXT_HWRM_CTX_SILENT))
495 				netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
496 					   le16_to_cpu(ctx->req->req_type));
497 			goto exit;
498 		}
499 		len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len));
500 		valid = ((u8 *)ctx->resp) + len - 1;
501 	} else {
502 		__le16 seen_out_of_seq = ctx->req->seq_id; /* will never see */
503 		int j;
504 
505 		/* Check if response len is updated */
506 		for (i = 0; i < tmo_count; i++) {
507 			/* Abort the wait for completion if the FW health
508 			 * check has failed.
509 			 */
510 			if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
511 				goto exit;
512 			len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len));
513 			if (len) {
514 				__le16 resp_seq = READ_ONCE(ctx->resp->seq_id);
515 
516 				if (resp_seq == ctx->req->seq_id)
517 					break;
518 				if (resp_seq != seen_out_of_seq) {
519 					netdev_warn(bp->dev, "Discarding out of seq response: 0x%x for msg {0x%x 0x%x}\n",
520 						    le16_to_cpu(resp_seq),
521 						    le16_to_cpu(ctx->req->req_type),
522 						    le16_to_cpu(ctx->req->seq_id));
523 					seen_out_of_seq = resp_seq;
524 				}
525 			}
526 
527 			/* on first few passes, just barely sleep */
528 			if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
529 				usleep_range(HWRM_SHORT_MIN_TIMEOUT,
530 					     HWRM_SHORT_MAX_TIMEOUT);
531 			} else {
532 				if (HWRM_WAIT_MUST_ABORT(bp, ctx))
533 					goto timeout_abort;
534 				usleep_range(HWRM_MIN_TIMEOUT,
535 					     HWRM_MAX_TIMEOUT);
536 			}
537 		}
538 
539 		if (i >= tmo_count) {
540 timeout_abort:
541 			if (!(ctx->flags & BNXT_HWRM_CTX_SILENT))
542 				netdev_err(bp->dev, "Error (timeout: %u) msg {0x%x 0x%x} len:%d\n",
543 					   hwrm_total_timeout(i),
544 					   le16_to_cpu(ctx->req->req_type),
545 					   le16_to_cpu(ctx->req->seq_id), len);
546 			goto exit;
547 		}
548 
549 		/* Last byte of resp contains valid bit */
550 		valid = ((u8 *)ctx->resp) + len - 1;
551 		for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
552 			/* make sure we read from updated DMA memory */
553 			dma_rmb();
554 			if (*valid)
555 				break;
556 			usleep_range(1, 5);
557 		}
558 
559 		if (j >= HWRM_VALID_BIT_DELAY_USEC) {
560 			if (!(ctx->flags & BNXT_HWRM_CTX_SILENT))
561 				netdev_err(bp->dev, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n",
562 					   hwrm_total_timeout(i),
563 					   le16_to_cpu(ctx->req->req_type),
564 					   le16_to_cpu(ctx->req->seq_id), len,
565 					   *valid);
566 			goto exit;
567 		}
568 	}
569 
570 	/* Zero valid bit for compatibility.  Valid bit in an older spec
571 	 * may become a new field in a newer spec.  We must make sure that
572 	 * a new field not implemented by old spec will read zero.
573 	 */
574 	*valid = 0;
575 	rc = le16_to_cpu(ctx->resp->error_code);
576 	if (rc && !(ctx->flags & BNXT_HWRM_CTX_SILENT)) {
577 		netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
578 			   le16_to_cpu(ctx->resp->req_type),
579 			   le16_to_cpu(ctx->resp->seq_id), rc);
580 	}
581 	rc = __hwrm_to_stderr(rc);
582 exit:
583 	if (ctx->flags & BNXT_HWRM_INTERNAL_CTX_OWNED)
584 		ctx->flags |= BNXT_HWRM_INTERNAL_RESP_DIRTY;
585 	else
586 		__hwrm_ctx_drop(bp, ctx);
587 	return rc;
588 }
589 
590 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
591 				 int timeout, bool silent)
592 {
593 	struct bnxt_hwrm_ctx default_ctx = {0};
594 	struct bnxt_hwrm_ctx *ctx = &default_ctx;
595 	struct input *req = msg;
596 	int rc;
597 
598 	if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
599 	    msg_len > BNXT_HWRM_MAX_REQ_LEN) {
600 		rc = __hwrm_req_init(bp, (void **)&req,
601 				     le16_to_cpu(req->req_type), msg_len);
602 		if (rc)
603 			return rc;
604 		memcpy(req, msg, msg_len); /* also copies resp_addr */
605 		ctx = __hwrm_ctx(bp, (u8 *)req);
606 		/* belts and brances, NULL ctx shouldn't be possible here */
607 		if (!ctx)
608 			return -ENOMEM;
609 	}
610 
611 	ctx->req = req;
612 	ctx->req_len = msg_len;
613 	ctx->resp = bp->hwrm_cmd_resp_addr;
614 	/* global response is not reallocated __GFP_ZERO between requests */
615 	ctx->flags = BNXT_HWRM_INTERNAL_RESP_DIRTY;
616 	ctx->timeout = timeout ?: DFLT_HWRM_CMD_TIMEOUT;
617 	if (silent)
618 		ctx->flags |= BNXT_HWRM_CTX_SILENT;
619 
620 	/* will consume req if allocated with __hwrm_req_init() */
621 	return __hwrm_send(bp, ctx);
622 }
623 
624 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
625 {
626 	return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
627 }
628 
629 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
630 			      int timeout)
631 {
632 	return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
633 }
634 
635 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
636 {
637 	int rc;
638 
639 	mutex_lock(&bp->hwrm_cmd_lock);
640 	rc = _hwrm_send_message(bp, msg, msg_len, timeout);
641 	mutex_unlock(&bp->hwrm_cmd_lock);
642 	return rc;
643 }
644 
645 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
646 			     int timeout)
647 {
648 	int rc;
649 
650 	mutex_lock(&bp->hwrm_cmd_lock);
651 	rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
652 	mutex_unlock(&bp->hwrm_cmd_lock);
653 	return rc;
654 }
655 
656 /**
657  * hwrm_req_send() - Execute an HWRM command.
658  * @bp: The driver context.
659  * @req: A pointer to the request to send. The DMA resources associated with
660  *	the request will be released (ie. the request will be consumed) unless
661  *	ownership of the request has been assumed by the caller via a call to
662  *	hwrm_req_hold().
663  *
664  * Send an HWRM request to the device and wait for a response. The request is
665  * consumed if it is not owned by the caller. This function will block until
666  * the request has either completed or times out due to an error.
667  *
668  * Return: A result code.
669  *
670  * The result is zero on success, otherwise the negative error code indicates
671  * one of the following errors:
672  *	E2BIG: The request was too large.
673  *	EBUSY: The firmware is in a fatal state or the request timed out
674  *	EACCESS: HWRM access denied.
675  *	ENOSPC: HWRM resource allocation error.
676  *	EINVAL: Request parameters are invalid.
677  *	ENOMEM: HWRM has no buffers.
678  *	EAGAIN: HWRM busy or reset in progress.
679  *	EOPNOTSUPP: Invalid request type.
680  *	EIO: Any other error.
681  * Error handling is orthogonal to request ownership. An unowned request will
682  * still be consumed on error. If the caller owns the request, then the caller
683  * is responsible for releasing the resources. Otherwise, hwrm_req_send() will
684  * always consume the request.
685  */
686 int hwrm_req_send(struct bnxt *bp, void *req)
687 {
688 	struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
689 	int rc;
690 
691 	if (!ctx)
692 		return -EINVAL;
693 
694 	mutex_lock(&bp->hwrm_cmd_lock);
695 	rc = __hwrm_send(bp, ctx);
696 	mutex_unlock(&bp->hwrm_cmd_lock);
697 	return rc;
698 }
699 
700 /**
701  * hwrm_req_send_silent() - A silent version of hwrm_req_send().
702  * @bp: The driver context.
703  * @req: The request to send without logging.
704  *
705  * The same as hwrm_req_send(), except that the request is silenced using
706  * hwrm_req_silence() prior the call. This version of the function is
707  * provided solely to preserve the legacy API’s flavor for this functionality.
708  *
709  * Return: A result code, see hwrm_req_send().
710  */
711 int hwrm_req_send_silent(struct bnxt *bp, void *req)
712 {
713 	hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT);
714 	return hwrm_req_send(bp, req);
715 }
716 
717 /**
718  * hwrm_req_dma_slice() - Allocate a slice of DMA mapped memory.
719  * @bp: The driver context.
720  * @req: The request for which indirect data will be associated.
721  * @size: The size of the allocation.
722  * @dma: The bus address associated with the allocation. The HWRM API has no
723  *	knowledge about the type of the request and so cannot infer how the
724  *	caller intends to use the indirect data. Thus, the caller is
725  *	responsible for configuring the request object appropriately to
726  *	point to the associated indirect memory. Note, DMA handle has the
727  *	same definition as it does in dma_alloc_coherent(), the caller is
728  *	responsible for endian conversions via cpu_to_le64() before assigning
729  *	this address.
730  *
731  * Allocates DMA mapped memory for indirect data related to a request. The
732  * lifetime of the DMA resources will be bound to that of the request (ie.
733  * they will be automatically released when the request is either consumed by
734  * hwrm_req_send() or dropped by hwrm_req_drop()). Small allocations are
735  * efficiently suballocated out of the request buffer space, hence the name
736  * slice, while larger requests are satisfied via an underlying call to
737  * dma_alloc_coherent(). Multiple suballocations are supported, however, only
738  * one externally mapped region is.
739  *
740  * Return: The kernel virtual address of the DMA mapping.
741  */
742 void *
743 hwrm_req_dma_slice(struct bnxt *bp, void *req, u32 size, dma_addr_t *dma_handle)
744 {
745 	struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
746 	u8 *end = ((u8 *)req) + BNXT_HWRM_DMA_SIZE;
747 	struct input *input = req;
748 	u8 *addr, *req_addr = req;
749 	u32 max_offset, offset;
750 
751 	if (!ctx)
752 		return NULL;
753 
754 	max_offset = BNXT_HWRM_DMA_SIZE - ctx->allocated;
755 	offset = max_offset - size;
756 	offset = ALIGN_DOWN(offset, BNXT_HWRM_DMA_ALIGN);
757 	addr = req_addr + offset;
758 
759 	if (addr < req_addr + max_offset && req_addr + ctx->req_len <= addr) {
760 		ctx->allocated = end - addr;
761 		*dma_handle = ctx->dma_handle + offset;
762 		return addr;
763 	}
764 
765 	/* could not suballocate from ctx buffer, try create a new mapping */
766 	if (ctx->slice_addr) {
767 		/* if one exists, can only be due to software bug, be loud */
768 		netdev_err(bp->dev, "HWRM refusing to reallocate DMA slice, req_type = %u\n",
769 			   (u32)le16_to_cpu(input->req_type));
770 		dump_stack();
771 		return NULL;
772 	}
773 
774 	addr = dma_alloc_coherent(&bp->pdev->dev, size, dma_handle, ctx->gfp);
775 
776 	if (!addr)
777 		return NULL;
778 
779 	ctx->slice_addr = addr;
780 	ctx->slice_size = size;
781 	ctx->slice_handle = *dma_handle;
782 
783 	return addr;
784 }
785