1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
9  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of version 2 of the GNU General Public License as
13  * published by the Free Software Foundation.
14  *
15  * This program is distributed in the hope that it will be useful, but
16  * WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23  * USA
24  *
25  * The full GNU General Public License is included in this distribution
26  * in the file called COPYING.
27  *
28  * Contact Information:
29  *  Intel Linux Wireless <linuxwifi@intel.com>
30  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31  *
32  * BSD LICENSE
33  *
34  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
35  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36  * All rights reserved.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  *
42  *  * Redistributions of source code must retain the above copyright
43  *    notice, this list of conditions and the following disclaimer.
44  *  * Redistributions in binary form must reproduce the above copyright
45  *    notice, this list of conditions and the following disclaimer in
46  *    the documentation and/or other materials provided with the
47  *    distribution.
48  *  * Neither the name Intel Corporation nor the names of its
49  *    contributors may be used to endorse or promote products derived
50  *    from this software without specific prior written permission.
51  *
52  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63  *
64  *****************************************************************************/
65 #ifndef __iwl_trans_h__
66 #define __iwl_trans_h__
67 
68 #include <linux/ieee80211.h>
69 #include <linux/mm.h> /* for page_address */
70 #include <linux/lockdep.h>
71 #include <linux/kernel.h>
72 
73 #include "iwl-debug.h"
74 #include "iwl-config.h"
75 #include "iwl-fw.h"
76 #include "iwl-op-mode.h"
77 
78 /**
79  * DOC: Transport layer - what is it ?
80  *
81  * The transport layer is the layer that deals with the HW directly. It provides
82  * an abstraction of the underlying HW to the upper layer. The transport layer
83  * doesn't provide any policy, algorithm or anything of this kind, but only
84  * mechanisms to make the HW do something. It is not completely stateless but
85  * close to it.
86  * We will have an implementation for each different supported bus.
87  */
88 
89 /**
90  * DOC: Life cycle of the transport layer
91  *
92  * The transport layer has a very precise life cycle.
93  *
94  *	1) A helper function is called during the module initialization and
95  *	   registers the bus driver's ops with the transport's alloc function.
96  *	2) Bus's probe calls to the transport layer's allocation functions.
97  *	   Of course this function is bus specific.
98  *	3) This allocation functions will spawn the upper layer which will
99  *	   register mac80211.
100  *
101  *	4) At some point (i.e. mac80211's start call), the op_mode will call
102  *	   the following sequence:
103  *	   start_hw
104  *	   start_fw
105  *
106  *	5) Then when finished (or reset):
107  *	   stop_device
108  *
109  *	6) Eventually, the free function will be called.
110  */
111 
112 /**
113  * DOC: Host command section
114  *
115  * A host command is a command issued by the upper layer to the fw. There are
116  * several versions of fw that have several APIs. The transport layer is
117  * completely agnostic to these differences.
118  * The transport does provide helper functionality (i.e. SYNC / ASYNC mode),
119  */
120 #define SEQ_TO_QUEUE(s)	(((s) >> 8) & 0x1f)
121 #define QUEUE_TO_SEQ(q)	(((q) & 0x1f) << 8)
122 #define SEQ_TO_INDEX(s)	((s) & 0xff)
123 #define INDEX_TO_SEQ(i)	((i) & 0xff)
124 #define SEQ_RX_FRAME	cpu_to_le16(0x8000)
125 
126 /*
127  * those functions retrieve specific information from
128  * the id field in the iwl_host_cmd struct which contains
129  * the command id, the group id and the version of the command
130  * and vice versa
131 */
132 static inline u8 iwl_cmd_opcode(u32 cmdid)
133 {
134 	return cmdid & 0xFF;
135 }
136 
137 static inline u8 iwl_cmd_groupid(u32 cmdid)
138 {
139 	return ((cmdid & 0xFF00) >> 8);
140 }
141 
142 static inline u8 iwl_cmd_version(u32 cmdid)
143 {
144 	return ((cmdid & 0xFF0000) >> 16);
145 }
146 
147 static inline u32 iwl_cmd_id(u8 opcode, u8 groupid, u8 version)
148 {
149 	return opcode + (groupid << 8) + (version << 16);
150 }
151 
152 /* make u16 wide id out of u8 group and opcode */
153 #define WIDE_ID(grp, opcode) ((grp << 8) | opcode)
154 
155 /* due to the conversion, this group is special; new groups
156  * should be defined in the appropriate fw-api header files
157  */
158 #define IWL_ALWAYS_LONG_GROUP	1
159 
160 /**
161  * struct iwl_cmd_header
162  *
163  * This header format appears in the beginning of each command sent from the
164  * driver, and each response/notification received from uCode.
165  */
166 struct iwl_cmd_header {
167 	u8 cmd;		/* Command ID:  REPLY_RXON, etc. */
168 	u8 group_id;
169 	/*
170 	 * The driver sets up the sequence number to values of its choosing.
171 	 * uCode does not use this value, but passes it back to the driver
172 	 * when sending the response to each driver-originated command, so
173 	 * the driver can match the response to the command.  Since the values
174 	 * don't get used by uCode, the driver may set up an arbitrary format.
175 	 *
176 	 * There is one exception:  uCode sets bit 15 when it originates
177 	 * the response/notification, i.e. when the response/notification
178 	 * is not a direct response to a command sent by the driver.  For
179 	 * example, uCode issues REPLY_RX when it sends a received frame
180 	 * to the driver; it is not a direct response to any driver command.
181 	 *
182 	 * The Linux driver uses the following format:
183 	 *
184 	 *  0:7		tfd index - position within TX queue
185 	 *  8:12	TX queue id
186 	 *  13:14	reserved
187 	 *  15		unsolicited RX or uCode-originated notification
188 	 */
189 	__le16 sequence;
190 } __packed;
191 
192 /**
193  * struct iwl_cmd_header_wide
194  *
195  * This header format appears in the beginning of each command sent from the
196  * driver, and each response/notification received from uCode.
197  * this is the wide version that contains more information about the command
198  * like length, version and command type
199  */
200 struct iwl_cmd_header_wide {
201 	u8 cmd;
202 	u8 group_id;
203 	__le16 sequence;
204 	__le16 length;
205 	u8 reserved;
206 	u8 version;
207 } __packed;
208 
209 #define FH_RSCSR_FRAME_SIZE_MSK		0x00003FFF	/* bits 0-13 */
210 #define FH_RSCSR_FRAME_INVALID		0x55550000
211 #define FH_RSCSR_FRAME_ALIGN		0x40
212 
213 struct iwl_rx_packet {
214 	/*
215 	 * The first 4 bytes of the RX frame header contain both the RX frame
216 	 * size and some flags.
217 	 * Bit fields:
218 	 * 31:    flag flush RB request
219 	 * 30:    flag ignore TC (terminal counter) request
220 	 * 29:    flag fast IRQ request
221 	 * 28-14: Reserved
222 	 * 13-00: RX frame size
223 	 */
224 	__le32 len_n_flags;
225 	struct iwl_cmd_header hdr;
226 	u8 data[];
227 } __packed;
228 
229 static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt)
230 {
231 	return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
232 }
233 
234 static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
235 {
236 	return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
237 }
238 
239 /**
240  * enum CMD_MODE - how to send the host commands ?
241  *
242  * @CMD_ASYNC: Return right away and don't wait for the response
243  * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of
244  *	the response. The caller needs to call iwl_free_resp when done.
245  * @CMD_HIGH_PRIO: The command is high priority - it goes to the front of the
246  *	command queue, but after other high priority commands. Valid only
247  *	with CMD_ASYNC.
248  * @CMD_SEND_IN_IDLE: The command should be sent even when the trans is idle.
249  * @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle.
250  * @CMD_WAKE_UP_TRANS: The command response should wake up the trans
251  *	(i.e. mark it as non-idle).
252  * @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be
253  *	called after this command completes. Valid only with CMD_ASYNC.
254  * @CMD_TB_BITMAP_POS: Position of the first bit for the TB bitmap. We need to
255  *	check that we leave enough room for the TBs bitmap which needs 20 bits.
256  */
257 enum CMD_MODE {
258 	CMD_ASYNC		= BIT(0),
259 	CMD_WANT_SKB		= BIT(1),
260 	CMD_SEND_IN_RFKILL	= BIT(2),
261 	CMD_HIGH_PRIO		= BIT(3),
262 	CMD_SEND_IN_IDLE	= BIT(4),
263 	CMD_MAKE_TRANS_IDLE	= BIT(5),
264 	CMD_WAKE_UP_TRANS	= BIT(6),
265 	CMD_WANT_ASYNC_CALLBACK	= BIT(7),
266 
267 	CMD_TB_BITMAP_POS	= 11,
268 };
269 
270 #define DEF_CMD_PAYLOAD_SIZE 320
271 
272 /**
273  * struct iwl_device_cmd
274  *
275  * For allocation of the command and tx queues, this establishes the overall
276  * size of the largest command we send to uCode, except for commands that
277  * aren't fully copied and use other TFD space.
278  */
279 struct iwl_device_cmd {
280 	union {
281 		struct {
282 			struct iwl_cmd_header hdr;	/* uCode API */
283 			u8 payload[DEF_CMD_PAYLOAD_SIZE];
284 		};
285 		struct {
286 			struct iwl_cmd_header_wide hdr_wide;
287 			u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
288 					sizeof(struct iwl_cmd_header_wide) +
289 					sizeof(struct iwl_cmd_header)];
290 		};
291 	};
292 } __packed;
293 
294 #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
295 
296 /*
297  * number of transfer buffers (fragments) per transmit frame descriptor;
298  * this is just the driver's idea, the hardware supports 20
299  */
300 #define IWL_MAX_CMD_TBS_PER_TFD	2
301 
302 /**
303  * struct iwl_hcmd_dataflag - flag for each one of the chunks of the command
304  *
305  * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
306  *	ring. The transport layer doesn't map the command's buffer to DMA, but
307  *	rather copies it to a previously allocated DMA buffer. This flag tells
308  *	the transport layer not to copy the command, but to map the existing
309  *	buffer (that is passed in) instead. This saves the memcpy and allows
310  *	commands that are bigger than the fixed buffer to be submitted.
311  *	Note that a TFD entry after a NOCOPY one cannot be a normal copied one.
312  * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this
313  *	chunk internally and free it again after the command completes. This
314  *	can (currently) be used only once per command.
315  *	Note that a TFD entry after a DUP one cannot be a normal copied one.
316  */
317 enum iwl_hcmd_dataflag {
318 	IWL_HCMD_DFL_NOCOPY	= BIT(0),
319 	IWL_HCMD_DFL_DUP	= BIT(1),
320 };
321 
322 /**
323  * struct iwl_host_cmd - Host command to the uCode
324  *
325  * @data: array of chunks that composes the data of the host command
326  * @resp_pkt: response packet, if %CMD_WANT_SKB was set
327  * @_rx_page_order: (internally used to free response packet)
328  * @_rx_page_addr: (internally used to free response packet)
329  * @flags: can be CMD_*
330  * @len: array of the lengths of the chunks in data
331  * @dataflags: IWL_HCMD_DFL_*
332  * @id: command id of the host command, for wide commands encoding the
333  *	version and group as well
334  */
335 struct iwl_host_cmd {
336 	const void *data[IWL_MAX_CMD_TBS_PER_TFD];
337 	struct iwl_rx_packet *resp_pkt;
338 	unsigned long _rx_page_addr;
339 	u32 _rx_page_order;
340 
341 	u32 flags;
342 	u32 id;
343 	u16 len[IWL_MAX_CMD_TBS_PER_TFD];
344 	u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
345 };
346 
347 static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
348 {
349 	free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
350 }
351 
352 struct iwl_rx_cmd_buffer {
353 	struct page *_page;
354 	int _offset;
355 	bool _page_stolen;
356 	u32 _rx_page_order;
357 	unsigned int truesize;
358 };
359 
360 static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
361 {
362 	return (void *)((unsigned long)page_address(r->_page) + r->_offset);
363 }
364 
365 static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
366 {
367 	return r->_offset;
368 }
369 
370 static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
371 {
372 	r->_page_stolen = true;
373 	get_page(r->_page);
374 	return r->_page;
375 }
376 
377 static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
378 {
379 	__free_pages(r->_page, r->_rx_page_order);
380 }
381 
382 #define MAX_NO_RECLAIM_CMDS	6
383 
384 /*
385  * The first entry in driver_data array in ieee80211_tx_info
386  * that can be used by the transport.
387  */
388 #define IWL_TRANS_FIRST_DRIVER_DATA 2
389 #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
390 
391 /*
392  * Maximum number of HW queues the transport layer
393  * currently supports
394  */
395 #define IWL_MAX_HW_QUEUES		32
396 #define IWL_MAX_TID_COUNT	8
397 #define IWL_FRAME_LIMIT	64
398 #define IWL_MAX_RX_HW_QUEUES	16
399 
400 /**
401  * enum iwl_wowlan_status - WoWLAN image/device status
402  * @IWL_D3_STATUS_ALIVE: firmware is still running after resume
403  * @IWL_D3_STATUS_RESET: device was reset while suspended
404  */
405 enum iwl_d3_status {
406 	IWL_D3_STATUS_ALIVE,
407 	IWL_D3_STATUS_RESET,
408 };
409 
410 /**
411  * enum iwl_trans_status: transport status flags
412  * @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed
413  * @STATUS_DEVICE_ENABLED: APM is enabled
414  * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
415  * @STATUS_INT_ENABLED: interrupts are enabled
416  * @STATUS_RFKILL: the HW RFkill switch is in KILL position
417  * @STATUS_FW_ERROR: the fw is in error state
418  * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands
419  *	are sent
420  * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
421  * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
422  */
423 enum iwl_trans_status {
424 	STATUS_SYNC_HCMD_ACTIVE,
425 	STATUS_DEVICE_ENABLED,
426 	STATUS_TPOWER_PMI,
427 	STATUS_INT_ENABLED,
428 	STATUS_RFKILL,
429 	STATUS_FW_ERROR,
430 	STATUS_TRANS_GOING_IDLE,
431 	STATUS_TRANS_IDLE,
432 	STATUS_TRANS_DEAD,
433 };
434 
435 static inline int
436 iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
437 {
438 	switch (rb_size) {
439 	case IWL_AMSDU_4K:
440 		return get_order(4 * 1024);
441 	case IWL_AMSDU_8K:
442 		return get_order(8 * 1024);
443 	case IWL_AMSDU_12K:
444 		return get_order(12 * 1024);
445 	default:
446 		WARN_ON(1);
447 		return -1;
448 	}
449 }
450 
451 struct iwl_hcmd_names {
452 	u8 cmd_id;
453 	const char *const cmd_name;
454 };
455 
456 #define HCMD_NAME(x)	\
457 	{ .cmd_id = x, .cmd_name = #x }
458 
459 struct iwl_hcmd_arr {
460 	const struct iwl_hcmd_names *arr;
461 	int size;
462 };
463 
464 #define HCMD_ARR(x)	\
465 	{ .arr = x, .size = ARRAY_SIZE(x) }
466 
467 /**
468  * struct iwl_trans_config - transport configuration
469  *
470  * @op_mode: pointer to the upper layer.
471  * @cmd_queue: the index of the command queue.
472  *	Must be set before start_fw.
473  * @cmd_fifo: the fifo for host commands
474  * @cmd_q_wdg_timeout: the timeout of the watchdog timer for the command queue.
475  * @no_reclaim_cmds: Some devices erroneously don't set the
476  *	SEQ_RX_FRAME bit on some notifications, this is the
477  *	list of such notifications to filter. Max length is
478  *	%MAX_NO_RECLAIM_CMDS.
479  * @n_no_reclaim_cmds: # of commands in list
480  * @rx_buf_size: RX buffer size needed for A-MSDUs
481  *	if unset 4k will be the RX buffer size
482  * @bc_table_dword: set to true if the BC table expects the byte count to be
483  *	in DWORD (as opposed to bytes)
484  * @scd_set_active: should the transport configure the SCD for HCMD queue
485  * @wide_cmd_header: firmware supports wide host command header
486  * @sw_csum_tx: transport should compute the TCP checksum
487  * @command_groups: array of command groups, each member is an array of the
488  *	commands in the group; for debugging only
489  * @command_groups_size: number of command groups, to avoid illegal access
490  * @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until
491  *	we get the ALIVE from the uCode
492  */
493 struct iwl_trans_config {
494 	struct iwl_op_mode *op_mode;
495 
496 	u8 cmd_queue;
497 	u8 cmd_fifo;
498 	unsigned int cmd_q_wdg_timeout;
499 	const u8 *no_reclaim_cmds;
500 	unsigned int n_no_reclaim_cmds;
501 
502 	enum iwl_amsdu_size rx_buf_size;
503 	bool bc_table_dword;
504 	bool scd_set_active;
505 	bool wide_cmd_header;
506 	bool sw_csum_tx;
507 	const struct iwl_hcmd_arr *command_groups;
508 	int command_groups_size;
509 
510 	u32 sdio_adma_addr;
511 };
512 
513 struct iwl_trans_dump_data {
514 	u32 len;
515 	u8 data[];
516 };
517 
518 struct iwl_trans;
519 
520 struct iwl_trans_txq_scd_cfg {
521 	u8 fifo;
522 	s8 sta_id;
523 	u8 tid;
524 	bool aggregate;
525 	int frame_limit;
526 };
527 
528 /**
529  * struct iwl_trans_ops - transport specific operations
530  *
531  * All the handlers MUST be implemented
532  *
533  * @start_hw: starts the HW. If low_power is true, the NIC needs to be taken
534  *	out of a low power state. From that point on, the HW can send
535  *	interrupts. May sleep.
536  * @op_mode_leave: Turn off the HW RF kill indication if on
537  *	May sleep
538  * @start_fw: allocates and inits all the resources for the transport
539  *	layer. Also kick a fw image.
540  *	May sleep
541  * @fw_alive: called when the fw sends alive notification. If the fw provides
542  *	the SCD base address in SRAM, then provide it here, or 0 otherwise.
543  *	May sleep
544  * @stop_device: stops the whole device (embedded CPU put to reset) and stops
545  *	the HW. If low_power is true, the NIC will be put in low power state.
546  *	From that point on, the HW will be stopped but will still issue an
547  *	interrupt if the HW RF kill switch is triggered.
548  *	This callback must do the right thing and not crash even if %start_hw()
549  *	was called but not &start_fw(). May sleep.
550  * @d3_suspend: put the device into the correct mode for WoWLAN during
551  *	suspend. This is optional, if not implemented WoWLAN will not be
552  *	supported. This callback may sleep.
553  * @d3_resume: resume the device after WoWLAN, enabling the opmode to
554  *	talk to the WoWLAN image to get its status. This is optional, if not
555  *	implemented WoWLAN will not be supported. This callback may sleep.
556  * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
557  *	If RFkill is asserted in the middle of a SYNC host command, it must
558  *	return -ERFKILL straight away.
559  *	May sleep only if CMD_ASYNC is not set
560  * @tx: send an skb. The transport relies on the op_mode to zero the
561  *	the ieee80211_tx_info->driver_data. If the MPDU is an A-MSDU, all
562  *	the CSUM will be taken care of (TCP CSUM and IP header in case of
563  *	IPv4). If the MPDU is a single MSDU, the op_mode must compute the IP
564  *	header if it is IPv4.
565  *	Must be atomic
566  * @reclaim: free packet until ssn. Returns a list of freed packets.
567  *	Must be atomic
568  * @txq_enable: setup a queue. To setup an AC queue, use the
569  *	iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
570  *	this one. The op_mode must not configure the HCMD queue. The scheduler
571  *	configuration may be %NULL, in which case the hardware will not be
572  *	configured. May sleep.
573  * @txq_disable: de-configure a Tx queue to send AMPDUs
574  *	Must be atomic
575  * @wait_tx_queue_empty: wait until tx queues are empty. May sleep.
576  * @freeze_txq_timer: prevents the timer of the queue from firing until the
577  *	queue is set to awake. Must be atomic.
578  * @block_txq_ptrs: stop updating the write pointers of the Tx queues. Note
579  *	that the transport needs to refcount the calls since this function
580  *	will be called several times with block = true, and then the queues
581  *	need to be unblocked only after the same number of calls with
582  *	block = false.
583  * @write8: write a u8 to a register at offset ofs from the BAR
584  * @write32: write a u32 to a register at offset ofs from the BAR
585  * @read32: read a u32 register at offset ofs from the BAR
586  * @read_prph: read a DWORD from a periphery register
587  * @write_prph: write a DWORD to a periphery register
588  * @read_mem: read device's SRAM in DWORD
589  * @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory
590  *	will be zeroed.
591  * @configure: configure parameters required by the transport layer from
592  *	the op_mode. May be called several times before start_fw, can't be
593  *	called after that.
594  * @set_pmi: set the power pmi state
595  * @grab_nic_access: wake the NIC to be able to access non-HBUS regs.
596  *	Sleeping is not allowed between grab_nic_access and
597  *	release_nic_access.
598  * @release_nic_access: let the NIC go to sleep. The "flags" parameter
599  *	must be the same one that was sent before to the grab_nic_access.
600  * @set_bits_mask - set SRAM register according to value and mask.
601  * @ref: grab a reference to the transport/FW layers, disallowing
602  *	certain low power states
603  * @unref: release a reference previously taken with @ref. Note that
604  *	initially the reference count is 1, making an initial @unref
605  *	necessary to allow low power states.
606  * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last
607  *	TX'ed commands and similar. The buffer will be vfree'd by the caller.
608  *	Note that the transport must fill in the proper file headers.
609  */
610 struct iwl_trans_ops {
611 
612 	int (*start_hw)(struct iwl_trans *iwl_trans, bool low_power);
613 	void (*op_mode_leave)(struct iwl_trans *iwl_trans);
614 	int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
615 			bool run_in_rfkill);
616 	int (*update_sf)(struct iwl_trans *trans,
617 			 struct iwl_sf_region *st_fwrd_space);
618 	void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
619 	void (*stop_device)(struct iwl_trans *trans, bool low_power);
620 
621 	void (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset);
622 	int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
623 			 bool test, bool reset);
624 
625 	int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
626 
627 	int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
628 		  struct iwl_device_cmd *dev_cmd, int queue);
629 	void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
630 			struct sk_buff_head *skbs);
631 
632 	void (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
633 			   const struct iwl_trans_txq_scd_cfg *cfg,
634 			   unsigned int queue_wdg_timeout);
635 	void (*txq_disable)(struct iwl_trans *trans, int queue,
636 			    bool configure_scd);
637 
638 	int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm);
639 	void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
640 				 bool freeze);
641 	void (*block_txq_ptrs)(struct iwl_trans *trans, bool block);
642 
643 	void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
644 	void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
645 	u32 (*read32)(struct iwl_trans *trans, u32 ofs);
646 	u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
647 	void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
648 	int (*read_mem)(struct iwl_trans *trans, u32 addr,
649 			void *buf, int dwords);
650 	int (*write_mem)(struct iwl_trans *trans, u32 addr,
651 			 const void *buf, int dwords);
652 	void (*configure)(struct iwl_trans *trans,
653 			  const struct iwl_trans_config *trans_cfg);
654 	void (*set_pmi)(struct iwl_trans *trans, bool state);
655 	bool (*grab_nic_access)(struct iwl_trans *trans, unsigned long *flags);
656 	void (*release_nic_access)(struct iwl_trans *trans,
657 				   unsigned long *flags);
658 	void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
659 			      u32 value);
660 	void (*ref)(struct iwl_trans *trans);
661 	void (*unref)(struct iwl_trans *trans);
662 	int  (*suspend)(struct iwl_trans *trans);
663 	void (*resume)(struct iwl_trans *trans);
664 
665 	struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
666 						 const struct iwl_fw_dbg_trigger_tlv
667 						 *trigger);
668 };
669 
670 /**
671  * enum iwl_trans_state - state of the transport layer
672  *
673  * @IWL_TRANS_NO_FW: no fw has sent an alive response
674  * @IWL_TRANS_FW_ALIVE: a fw has sent an alive response
675  */
676 enum iwl_trans_state {
677 	IWL_TRANS_NO_FW = 0,
678 	IWL_TRANS_FW_ALIVE	= 1,
679 };
680 
681 /**
682  * DOC: Platform power management
683  *
684  * There are two types of platform power management: system-wide
685  * (WoWLAN) and runtime.
686  *
687  * In system-wide power management the entire platform goes into a low
688  * power state (e.g. idle or suspend to RAM) at the same time and the
689  * device is configured as a wakeup source for the entire platform.
690  * This is usually triggered by userspace activity (e.g. the user
691  * presses the suspend button or a power management daemon decides to
692  * put the platform in low power mode).  The device's behavior in this
693  * mode is dictated by the wake-on-WLAN configuration.
694  *
695  * In runtime power management, only the devices which are themselves
696  * idle enter a low power state.  This is done at runtime, which means
697  * that the entire system is still running normally.  This mode is
698  * usually triggered automatically by the device driver and requires
699  * the ability to enter and exit the low power modes in a very short
700  * time, so there is not much impact in usability.
701  *
702  * The terms used for the device's behavior are as follows:
703  *
704  *	- D0: the device is fully powered and the host is awake;
705  *	- D3: the device is in low power mode and only reacts to
706  *		specific events (e.g. magic-packet received or scan
707  *		results found);
708  *	- D0I3: the device is in low power mode and reacts to any
709  *		activity (e.g. RX);
710  *
711  * These terms reflect the power modes in the firmware and are not to
712  * be confused with the physical device power state.  The NIC can be
713  * in D0I3 mode even if, for instance, the PCI device is in D3 state.
714  */
715 
716 /**
717  * enum iwl_plat_pm_mode - platform power management mode
718  *
719  * This enumeration describes the device's platform power management
720  * behavior when in idle mode (i.e. runtime power management) or when
721  * in system-wide suspend (i.e WoWLAN).
722  *
723  * @IWL_PLAT_PM_MODE_DISABLED: power management is disabled for this
724  *	device.  At runtime, this means that nothing happens and the
725  *	device always remains in active.  In system-wide suspend mode,
726  *	it means that the all connections will be closed automatically
727  *	by mac80211 before the platform is suspended.
728  * @IWL_PLAT_PM_MODE_D3: the device goes into D3 mode (i.e. WoWLAN).
729  *	For runtime power management, this mode is not officially
730  *	supported.
731  * @IWL_PLAT_PM_MODE_D0I3: the device goes into D0I3 mode.
732  */
733 enum iwl_plat_pm_mode {
734 	IWL_PLAT_PM_MODE_DISABLED,
735 	IWL_PLAT_PM_MODE_D3,
736 	IWL_PLAT_PM_MODE_D0I3,
737 };
738 
739 /* Max time to wait for trans to become idle/non-idle on d0i3
740  * enter/exit (in msecs).
741  */
742 #define IWL_TRANS_IDLE_TIMEOUT 2000
743 
744 /**
745  * struct iwl_trans - transport common data
746  *
747  * @ops - pointer to iwl_trans_ops
748  * @op_mode - pointer to the op_mode
749  * @cfg - pointer to the configuration
750  * @status: a bit-mask of transport status flags
751  * @dev - pointer to struct device * that represents the device
752  * @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
753  *	0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
754  * @hw_id: a u32 with the ID of the device / sub-device.
755  *	Set during transport allocation.
756  * @hw_id_str: a string with info about HW ID. Set during transport allocation.
757  * @pm_support: set to true in start_hw if link pm is supported
758  * @ltr_enabled: set to true if the LTR is enabled
759  * @num_rx_queues: number of RX queues allocated by the transport;
760  *	the transport must set this before calling iwl_drv_start()
761  * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
762  *	The user should use iwl_trans_{alloc,free}_tx_cmd.
763  * @dev_cmd_headroom: room needed for the transport's private use before the
764  *	device_cmd for Tx - for internal use only
765  *	The user should use iwl_trans_{alloc,free}_tx_cmd.
766  * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
767  *	starting the firmware, used for tracing
768  * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
769  *	start of the 802.11 header in the @rx_mpdu_cmd
770  * @dflt_pwr_limit: default power limit fetched from the platform (ACPI)
771  * @dbg_dest_tlv: points to the destination TLV for debug
772  * @dbg_conf_tlv: array of pointers to configuration TLVs for debug
773  * @dbg_trigger_tlv: array of pointers to triggers TLVs for debug
774  * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
775  * @paging_req_addr: The location were the FW will upload / download the pages
776  *	from. The address is set by the opmode
777  * @paging_db: Pointer to the opmode paging data base, the pointer is set by
778  *	the opmode.
779  * @paging_download_buf: Buffer used for copying all of the pages before
780  *	downloading them to the FW. The buffer is allocated in the opmode
781  * @system_pm_mode: the system-wide power management mode in use.
782  *	This mode is set dynamically, depending on the WoWLAN values
783  *	configured from the userspace at runtime.
784  * @runtime_pm_mode: the runtime power management mode in use.  This
785  *	mode is set during the initialization phase and is not
786  *	supposed to change during runtime.
787  */
788 struct iwl_trans {
789 	const struct iwl_trans_ops *ops;
790 	struct iwl_op_mode *op_mode;
791 	const struct iwl_cfg *cfg;
792 	enum iwl_trans_state state;
793 	unsigned long status;
794 
795 	struct device *dev;
796 	u32 max_skb_frags;
797 	u32 hw_rev;
798 	u32 hw_id;
799 	char hw_id_str[52];
800 
801 	u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
802 
803 	bool pm_support;
804 	bool ltr_enabled;
805 
806 	const struct iwl_hcmd_arr *command_groups;
807 	int command_groups_size;
808 
809 	u8 num_rx_queues;
810 
811 	/* The following fields are internal only */
812 	struct kmem_cache *dev_cmd_pool;
813 	size_t dev_cmd_headroom;
814 	char dev_cmd_pool_name[50];
815 
816 	struct dentry *dbgfs_dir;
817 
818 #ifdef CONFIG_LOCKDEP
819 	struct lockdep_map sync_cmd_lockdep_map;
820 #endif
821 
822 	u64 dflt_pwr_limit;
823 
824 	const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
825 	const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
826 	struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
827 	u8 dbg_dest_reg_num;
828 
829 	/*
830 	 * Paging parameters - All of the parameters should be set by the
831 	 * opmode when paging is enabled
832 	 */
833 	u32 paging_req_addr;
834 	struct iwl_fw_paging *paging_db;
835 	void *paging_download_buf;
836 
837 	enum iwl_plat_pm_mode system_pm_mode;
838 	enum iwl_plat_pm_mode runtime_pm_mode;
839 	bool suspending;
840 
841 	/* pointer to trans specific struct */
842 	/*Ensure that this pointer will always be aligned to sizeof pointer */
843 	char trans_specific[0] __aligned(sizeof(void *));
844 };
845 
846 const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id);
847 int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans);
848 
849 static inline void iwl_trans_configure(struct iwl_trans *trans,
850 				       const struct iwl_trans_config *trans_cfg)
851 {
852 	trans->op_mode = trans_cfg->op_mode;
853 
854 	trans->ops->configure(trans, trans_cfg);
855 	WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg));
856 }
857 
858 static inline int _iwl_trans_start_hw(struct iwl_trans *trans, bool low_power)
859 {
860 	might_sleep();
861 
862 	return trans->ops->start_hw(trans, low_power);
863 }
864 
865 static inline int iwl_trans_start_hw(struct iwl_trans *trans)
866 {
867 	return trans->ops->start_hw(trans, true);
868 }
869 
870 static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
871 {
872 	might_sleep();
873 
874 	if (trans->ops->op_mode_leave)
875 		trans->ops->op_mode_leave(trans);
876 
877 	trans->op_mode = NULL;
878 
879 	trans->state = IWL_TRANS_NO_FW;
880 }
881 
882 static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
883 {
884 	might_sleep();
885 
886 	trans->state = IWL_TRANS_FW_ALIVE;
887 
888 	trans->ops->fw_alive(trans, scd_addr);
889 }
890 
891 static inline int iwl_trans_start_fw(struct iwl_trans *trans,
892 				     const struct fw_img *fw,
893 				     bool run_in_rfkill)
894 {
895 	might_sleep();
896 
897 	WARN_ON_ONCE(!trans->rx_mpdu_cmd);
898 
899 	clear_bit(STATUS_FW_ERROR, &trans->status);
900 	return trans->ops->start_fw(trans, fw, run_in_rfkill);
901 }
902 
903 static inline int iwl_trans_update_sf(struct iwl_trans *trans,
904 				      struct iwl_sf_region *st_fwrd_space)
905 {
906 	might_sleep();
907 
908 	if (trans->ops->update_sf)
909 		return trans->ops->update_sf(trans, st_fwrd_space);
910 
911 	return 0;
912 }
913 
914 static inline void _iwl_trans_stop_device(struct iwl_trans *trans,
915 					  bool low_power)
916 {
917 	might_sleep();
918 
919 	trans->ops->stop_device(trans, low_power);
920 
921 	trans->state = IWL_TRANS_NO_FW;
922 }
923 
924 static inline void iwl_trans_stop_device(struct iwl_trans *trans)
925 {
926 	_iwl_trans_stop_device(trans, true);
927 }
928 
929 static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test,
930 					bool reset)
931 {
932 	might_sleep();
933 	if (trans->ops->d3_suspend)
934 		trans->ops->d3_suspend(trans, test, reset);
935 }
936 
937 static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
938 				      enum iwl_d3_status *status,
939 				      bool test, bool reset)
940 {
941 	might_sleep();
942 	if (!trans->ops->d3_resume)
943 		return 0;
944 
945 	return trans->ops->d3_resume(trans, status, test, reset);
946 }
947 
948 static inline void iwl_trans_ref(struct iwl_trans *trans)
949 {
950 	if (trans->ops->ref)
951 		trans->ops->ref(trans);
952 }
953 
954 static inline void iwl_trans_unref(struct iwl_trans *trans)
955 {
956 	if (trans->ops->unref)
957 		trans->ops->unref(trans);
958 }
959 
960 static inline int iwl_trans_suspend(struct iwl_trans *trans)
961 {
962 	if (!trans->ops->suspend)
963 		return 0;
964 
965 	return trans->ops->suspend(trans);
966 }
967 
968 static inline void iwl_trans_resume(struct iwl_trans *trans)
969 {
970 	if (trans->ops->resume)
971 		trans->ops->resume(trans);
972 }
973 
974 static inline struct iwl_trans_dump_data *
975 iwl_trans_dump_data(struct iwl_trans *trans,
976 		    const struct iwl_fw_dbg_trigger_tlv *trigger)
977 {
978 	if (!trans->ops->dump_data)
979 		return NULL;
980 	return trans->ops->dump_data(trans, trigger);
981 }
982 
983 static inline struct iwl_device_cmd *
984 iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
985 {
986 	u8 *dev_cmd_ptr = kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
987 
988 	if (unlikely(dev_cmd_ptr == NULL))
989 		return NULL;
990 
991 	return (struct iwl_device_cmd *)
992 			(dev_cmd_ptr + trans->dev_cmd_headroom);
993 }
994 
995 int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
996 
997 static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
998 					 struct iwl_device_cmd *dev_cmd)
999 {
1000 	u8 *dev_cmd_ptr = (u8 *)dev_cmd - trans->dev_cmd_headroom;
1001 
1002 	kmem_cache_free(trans->dev_cmd_pool, dev_cmd_ptr);
1003 }
1004 
1005 static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
1006 			       struct iwl_device_cmd *dev_cmd, int queue)
1007 {
1008 	if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
1009 		return -EIO;
1010 
1011 	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1012 		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1013 		return -EIO;
1014 	}
1015 
1016 	return trans->ops->tx(trans, skb, dev_cmd, queue);
1017 }
1018 
1019 static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
1020 				     int ssn, struct sk_buff_head *skbs)
1021 {
1022 	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1023 		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1024 		return;
1025 	}
1026 
1027 	trans->ops->reclaim(trans, queue, ssn, skbs);
1028 }
1029 
1030 static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
1031 					 bool configure_scd)
1032 {
1033 	trans->ops->txq_disable(trans, queue, configure_scd);
1034 }
1035 
1036 static inline void
1037 iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
1038 			 const struct iwl_trans_txq_scd_cfg *cfg,
1039 			 unsigned int queue_wdg_timeout)
1040 {
1041 	might_sleep();
1042 
1043 	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1044 		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1045 		return;
1046 	}
1047 
1048 	trans->ops->txq_enable(trans, queue, ssn, cfg, queue_wdg_timeout);
1049 }
1050 
1051 static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
1052 					int fifo, int sta_id, int tid,
1053 					int frame_limit, u16 ssn,
1054 					unsigned int queue_wdg_timeout)
1055 {
1056 	struct iwl_trans_txq_scd_cfg cfg = {
1057 		.fifo = fifo,
1058 		.sta_id = sta_id,
1059 		.tid = tid,
1060 		.frame_limit = frame_limit,
1061 		.aggregate = sta_id >= 0,
1062 	};
1063 
1064 	iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout);
1065 }
1066 
1067 static inline
1068 void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
1069 			     unsigned int queue_wdg_timeout)
1070 {
1071 	struct iwl_trans_txq_scd_cfg cfg = {
1072 		.fifo = fifo,
1073 		.sta_id = -1,
1074 		.tid = IWL_MAX_TID_COUNT,
1075 		.frame_limit = IWL_FRAME_LIMIT,
1076 		.aggregate = false,
1077 	};
1078 
1079 	iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
1080 }
1081 
1082 static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
1083 					      unsigned long txqs,
1084 					      bool freeze)
1085 {
1086 	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1087 		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1088 		return;
1089 	}
1090 
1091 	if (trans->ops->freeze_txq_timer)
1092 		trans->ops->freeze_txq_timer(trans, txqs, freeze);
1093 }
1094 
1095 static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans,
1096 					    bool block)
1097 {
1098 	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1099 		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1100 		return;
1101 	}
1102 
1103 	if (trans->ops->block_txq_ptrs)
1104 		trans->ops->block_txq_ptrs(trans, block);
1105 }
1106 
1107 static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans,
1108 						u32 txqs)
1109 {
1110 	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1111 		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1112 		return -EIO;
1113 	}
1114 
1115 	return trans->ops->wait_tx_queue_empty(trans, txqs);
1116 }
1117 
1118 static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1119 {
1120 	trans->ops->write8(trans, ofs, val);
1121 }
1122 
1123 static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1124 {
1125 	trans->ops->write32(trans, ofs, val);
1126 }
1127 
1128 static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
1129 {
1130 	return trans->ops->read32(trans, ofs);
1131 }
1132 
1133 static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
1134 {
1135 	return trans->ops->read_prph(trans, ofs);
1136 }
1137 
1138 static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
1139 					u32 val)
1140 {
1141 	return trans->ops->write_prph(trans, ofs, val);
1142 }
1143 
1144 static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
1145 				     void *buf, int dwords)
1146 {
1147 	return trans->ops->read_mem(trans, addr, buf, dwords);
1148 }
1149 
1150 #define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize)		      \
1151 	do {								      \
1152 		if (__builtin_constant_p(bufsize))			      \
1153 			BUILD_BUG_ON((bufsize) % sizeof(u32));		      \
1154 		iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
1155 	} while (0)
1156 
1157 static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
1158 {
1159 	u32 value;
1160 
1161 	if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1)))
1162 		return 0xa5a5a5a5;
1163 
1164 	return value;
1165 }
1166 
1167 static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
1168 				      const void *buf, int dwords)
1169 {
1170 	return trans->ops->write_mem(trans, addr, buf, dwords);
1171 }
1172 
1173 static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
1174 					u32 val)
1175 {
1176 	return iwl_trans_write_mem(trans, addr, &val, 1);
1177 }
1178 
1179 static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
1180 {
1181 	if (trans->ops->set_pmi)
1182 		trans->ops->set_pmi(trans, state);
1183 }
1184 
1185 static inline void
1186 iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
1187 {
1188 	trans->ops->set_bits_mask(trans, reg, mask, value);
1189 }
1190 
1191 #define iwl_trans_grab_nic_access(trans, flags)	\
1192 	__cond_lock(nic_access,				\
1193 		    likely((trans)->ops->grab_nic_access(trans, flags)))
1194 
1195 static inline void __releases(nic_access)
1196 iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags)
1197 {
1198 	trans->ops->release_nic_access(trans, flags);
1199 	__release(nic_access);
1200 }
1201 
1202 static inline void iwl_trans_fw_error(struct iwl_trans *trans)
1203 {
1204 	if (WARN_ON_ONCE(!trans->op_mode))
1205 		return;
1206 
1207 	/* prevent double restarts due to the same erroneous FW */
1208 	if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status))
1209 		iwl_op_mode_nic_error(trans->op_mode);
1210 }
1211 
1212 /*****************************************************
1213  * transport helper functions
1214  *****************************************************/
1215 struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
1216 				  struct device *dev,
1217 				  const struct iwl_cfg *cfg,
1218 				  const struct iwl_trans_ops *ops,
1219 				  size_t dev_cmd_headroom);
1220 void iwl_trans_free(struct iwl_trans *trans);
1221 
1222 /*****************************************************
1223 * driver (transport) register/unregister functions
1224 ******************************************************/
1225 int __must_check iwl_pci_register_driver(void);
1226 void iwl_pci_unregister_driver(void);
1227 
1228 #endif /* __iwl_trans_h__ */
1229