1 /****************************************************************************** 2 * 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * GPL LICENSE SUMMARY 7 * 8 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of version 2 of the GNU General Public License as 14 * published by the Free Software Foundation. 15 * 16 * This program is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, 24 * USA 25 * 26 * The full GNU General Public License is included in this distribution 27 * in the file called COPYING. 28 * 29 * Contact Information: 30 * Intel Linux Wireless <linuxwifi@intel.com> 31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 32 * 33 * BSD LICENSE 34 * 35 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 37 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 38 * All rights reserved. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 44 * * Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * * Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in 48 * the documentation and/or other materials provided with the 49 * distribution. 50 * * Neither the name Intel Corporation nor the names of its 51 * contributors may be used to endorse or promote products derived 52 * from this software without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 * 66 *****************************************************************************/ 67 #ifndef __iwl_trans_h__ 68 #define __iwl_trans_h__ 69 70 #include <linux/ieee80211.h> 71 #include <linux/mm.h> /* for page_address */ 72 #include <linux/lockdep.h> 73 #include <linux/kernel.h> 74 75 #include "iwl-debug.h" 76 #include "iwl-config.h" 77 #include "fw/img.h" 78 #include "iwl-op-mode.h" 79 #include "fw/api/cmdhdr.h" 80 #include "fw/api/txq.h" 81 82 /** 83 * DOC: Transport layer - what is it ? 84 * 85 * The transport layer is the layer that deals with the HW directly. It provides 86 * an abstraction of the underlying HW to the upper layer. The transport layer 87 * doesn't provide any policy, algorithm or anything of this kind, but only 88 * mechanisms to make the HW do something. It is not completely stateless but 89 * close to it. 90 * We will have an implementation for each different supported bus. 91 */ 92 93 /** 94 * DOC: Life cycle of the transport layer 95 * 96 * The transport layer has a very precise life cycle. 97 * 98 * 1) A helper function is called during the module initialization and 99 * registers the bus driver's ops with the transport's alloc function. 100 * 2) Bus's probe calls to the transport layer's allocation functions. 101 * Of course this function is bus specific. 102 * 3) This allocation functions will spawn the upper layer which will 103 * register mac80211. 104 * 105 * 4) At some point (i.e. mac80211's start call), the op_mode will call 106 * the following sequence: 107 * start_hw 108 * start_fw 109 * 110 * 5) Then when finished (or reset): 111 * stop_device 112 * 113 * 6) Eventually, the free function will be called. 114 */ 115 116 #define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */ 117 #define FH_RSCSR_FRAME_INVALID 0x55550000 118 #define FH_RSCSR_FRAME_ALIGN 0x40 119 #define FH_RSCSR_RPA_EN BIT(25) 120 #define FH_RSCSR_RXQ_POS 16 121 #define FH_RSCSR_RXQ_MASK 0x3F0000 122 123 struct iwl_rx_packet { 124 /* 125 * The first 4 bytes of the RX frame header contain both the RX frame 126 * size and some flags. 127 * Bit fields: 128 * 31: flag flush RB request 129 * 30: flag ignore TC (terminal counter) request 130 * 29: flag fast IRQ request 131 * 28-26: Reserved 132 * 25: Offload enabled 133 * 24: RPF enabled 134 * 23: RSS enabled 135 * 22: Checksum enabled 136 * 21-16: RX queue 137 * 15-14: Reserved 138 * 13-00: RX frame size 139 */ 140 __le32 len_n_flags; 141 struct iwl_cmd_header hdr; 142 u8 data[]; 143 } __packed; 144 145 static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt) 146 { 147 return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; 148 } 149 150 static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt) 151 { 152 return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr); 153 } 154 155 /** 156 * enum CMD_MODE - how to send the host commands ? 157 * 158 * @CMD_ASYNC: Return right away and don't wait for the response 159 * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of 160 * the response. The caller needs to call iwl_free_resp when done. 161 * @CMD_HIGH_PRIO: The command is high priority - it goes to the front of the 162 * command queue, but after other high priority commands. Valid only 163 * with CMD_ASYNC. 164 * @CMD_SEND_IN_IDLE: The command should be sent even when the trans is idle. 165 * @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle. 166 * @CMD_WAKE_UP_TRANS: The command response should wake up the trans 167 * (i.e. mark it as non-idle). 168 * @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be 169 * called after this command completes. Valid only with CMD_ASYNC. 170 */ 171 enum CMD_MODE { 172 CMD_ASYNC = BIT(0), 173 CMD_WANT_SKB = BIT(1), 174 CMD_SEND_IN_RFKILL = BIT(2), 175 CMD_HIGH_PRIO = BIT(3), 176 CMD_SEND_IN_IDLE = BIT(4), 177 CMD_MAKE_TRANS_IDLE = BIT(5), 178 CMD_WAKE_UP_TRANS = BIT(6), 179 CMD_WANT_ASYNC_CALLBACK = BIT(7), 180 }; 181 182 #define DEF_CMD_PAYLOAD_SIZE 320 183 184 /** 185 * struct iwl_device_cmd 186 * 187 * For allocation of the command and tx queues, this establishes the overall 188 * size of the largest command we send to uCode, except for commands that 189 * aren't fully copied and use other TFD space. 190 */ 191 struct iwl_device_cmd { 192 union { 193 struct { 194 struct iwl_cmd_header hdr; /* uCode API */ 195 u8 payload[DEF_CMD_PAYLOAD_SIZE]; 196 }; 197 struct { 198 struct iwl_cmd_header_wide hdr_wide; 199 u8 payload_wide[DEF_CMD_PAYLOAD_SIZE - 200 sizeof(struct iwl_cmd_header_wide) + 201 sizeof(struct iwl_cmd_header)]; 202 }; 203 }; 204 } __packed; 205 206 #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd)) 207 208 /* 209 * number of transfer buffers (fragments) per transmit frame descriptor; 210 * this is just the driver's idea, the hardware supports 20 211 */ 212 #define IWL_MAX_CMD_TBS_PER_TFD 2 213 214 /** 215 * enum iwl_hcmd_dataflag - flag for each one of the chunks of the command 216 * 217 * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's 218 * ring. The transport layer doesn't map the command's buffer to DMA, but 219 * rather copies it to a previously allocated DMA buffer. This flag tells 220 * the transport layer not to copy the command, but to map the existing 221 * buffer (that is passed in) instead. This saves the memcpy and allows 222 * commands that are bigger than the fixed buffer to be submitted. 223 * Note that a TFD entry after a NOCOPY one cannot be a normal copied one. 224 * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this 225 * chunk internally and free it again after the command completes. This 226 * can (currently) be used only once per command. 227 * Note that a TFD entry after a DUP one cannot be a normal copied one. 228 */ 229 enum iwl_hcmd_dataflag { 230 IWL_HCMD_DFL_NOCOPY = BIT(0), 231 IWL_HCMD_DFL_DUP = BIT(1), 232 }; 233 234 /** 235 * struct iwl_host_cmd - Host command to the uCode 236 * 237 * @data: array of chunks that composes the data of the host command 238 * @resp_pkt: response packet, if %CMD_WANT_SKB was set 239 * @_rx_page_order: (internally used to free response packet) 240 * @_rx_page_addr: (internally used to free response packet) 241 * @flags: can be CMD_* 242 * @len: array of the lengths of the chunks in data 243 * @dataflags: IWL_HCMD_DFL_* 244 * @id: command id of the host command, for wide commands encoding the 245 * version and group as well 246 */ 247 struct iwl_host_cmd { 248 const void *data[IWL_MAX_CMD_TBS_PER_TFD]; 249 struct iwl_rx_packet *resp_pkt; 250 unsigned long _rx_page_addr; 251 u32 _rx_page_order; 252 253 u32 flags; 254 u32 id; 255 u16 len[IWL_MAX_CMD_TBS_PER_TFD]; 256 u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD]; 257 }; 258 259 static inline void iwl_free_resp(struct iwl_host_cmd *cmd) 260 { 261 free_pages(cmd->_rx_page_addr, cmd->_rx_page_order); 262 } 263 264 struct iwl_rx_cmd_buffer { 265 struct page *_page; 266 int _offset; 267 bool _page_stolen; 268 u32 _rx_page_order; 269 unsigned int truesize; 270 }; 271 272 static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r) 273 { 274 return (void *)((unsigned long)page_address(r->_page) + r->_offset); 275 } 276 277 static inline int rxb_offset(struct iwl_rx_cmd_buffer *r) 278 { 279 return r->_offset; 280 } 281 282 static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r) 283 { 284 r->_page_stolen = true; 285 get_page(r->_page); 286 return r->_page; 287 } 288 289 static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r) 290 { 291 __free_pages(r->_page, r->_rx_page_order); 292 } 293 294 #define MAX_NO_RECLAIM_CMDS 6 295 296 #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo)))) 297 298 /* 299 * Maximum number of HW queues the transport layer 300 * currently supports 301 */ 302 #define IWL_MAX_HW_QUEUES 32 303 #define IWL_MAX_TVQM_QUEUES 512 304 305 #define IWL_MAX_TID_COUNT 8 306 #define IWL_MGMT_TID 15 307 #define IWL_FRAME_LIMIT 64 308 #define IWL_MAX_RX_HW_QUEUES 16 309 310 /** 311 * enum iwl_wowlan_status - WoWLAN image/device status 312 * @IWL_D3_STATUS_ALIVE: firmware is still running after resume 313 * @IWL_D3_STATUS_RESET: device was reset while suspended 314 */ 315 enum iwl_d3_status { 316 IWL_D3_STATUS_ALIVE, 317 IWL_D3_STATUS_RESET, 318 }; 319 320 /** 321 * enum iwl_trans_status: transport status flags 322 * @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed 323 * @STATUS_DEVICE_ENABLED: APM is enabled 324 * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up) 325 * @STATUS_INT_ENABLED: interrupts are enabled 326 * @STATUS_RFKILL_HW: the actual HW state of the RF-kill switch 327 * @STATUS_RFKILL_OPMODE: RF-kill state reported to opmode 328 * @STATUS_FW_ERROR: the fw is in error state 329 * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands 330 * are sent 331 * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent 332 * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation 333 */ 334 enum iwl_trans_status { 335 STATUS_SYNC_HCMD_ACTIVE, 336 STATUS_DEVICE_ENABLED, 337 STATUS_TPOWER_PMI, 338 STATUS_INT_ENABLED, 339 STATUS_RFKILL_HW, 340 STATUS_RFKILL_OPMODE, 341 STATUS_FW_ERROR, 342 STATUS_TRANS_GOING_IDLE, 343 STATUS_TRANS_IDLE, 344 STATUS_TRANS_DEAD, 345 }; 346 347 static inline int 348 iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size) 349 { 350 switch (rb_size) { 351 case IWL_AMSDU_4K: 352 return get_order(4 * 1024); 353 case IWL_AMSDU_8K: 354 return get_order(8 * 1024); 355 case IWL_AMSDU_12K: 356 return get_order(12 * 1024); 357 default: 358 WARN_ON(1); 359 return -1; 360 } 361 } 362 363 struct iwl_hcmd_names { 364 u8 cmd_id; 365 const char *const cmd_name; 366 }; 367 368 #define HCMD_NAME(x) \ 369 { .cmd_id = x, .cmd_name = #x } 370 371 struct iwl_hcmd_arr { 372 const struct iwl_hcmd_names *arr; 373 int size; 374 }; 375 376 #define HCMD_ARR(x) \ 377 { .arr = x, .size = ARRAY_SIZE(x) } 378 379 /** 380 * struct iwl_trans_config - transport configuration 381 * 382 * @op_mode: pointer to the upper layer. 383 * @cmd_queue: the index of the command queue. 384 * Must be set before start_fw. 385 * @cmd_fifo: the fifo for host commands 386 * @cmd_q_wdg_timeout: the timeout of the watchdog timer for the command queue. 387 * @no_reclaim_cmds: Some devices erroneously don't set the 388 * SEQ_RX_FRAME bit on some notifications, this is the 389 * list of such notifications to filter. Max length is 390 * %MAX_NO_RECLAIM_CMDS. 391 * @n_no_reclaim_cmds: # of commands in list 392 * @rx_buf_size: RX buffer size needed for A-MSDUs 393 * if unset 4k will be the RX buffer size 394 * @bc_table_dword: set to true if the BC table expects the byte count to be 395 * in DWORD (as opposed to bytes) 396 * @scd_set_active: should the transport configure the SCD for HCMD queue 397 * @sw_csum_tx: transport should compute the TCP checksum 398 * @command_groups: array of command groups, each member is an array of the 399 * commands in the group; for debugging only 400 * @command_groups_size: number of command groups, to avoid illegal access 401 * @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until 402 * we get the ALIVE from the uCode 403 * @cb_data_offs: offset inside skb->cb to store transport data at, must have 404 * space for at least two pointers 405 */ 406 struct iwl_trans_config { 407 struct iwl_op_mode *op_mode; 408 409 u8 cmd_queue; 410 u8 cmd_fifo; 411 unsigned int cmd_q_wdg_timeout; 412 const u8 *no_reclaim_cmds; 413 unsigned int n_no_reclaim_cmds; 414 415 enum iwl_amsdu_size rx_buf_size; 416 bool bc_table_dword; 417 bool scd_set_active; 418 bool sw_csum_tx; 419 const struct iwl_hcmd_arr *command_groups; 420 int command_groups_size; 421 422 u32 sdio_adma_addr; 423 424 u8 cb_data_offs; 425 }; 426 427 struct iwl_trans_dump_data { 428 u32 len; 429 u8 data[]; 430 }; 431 432 struct iwl_trans; 433 434 struct iwl_trans_txq_scd_cfg { 435 u8 fifo; 436 u8 sta_id; 437 u8 tid; 438 bool aggregate; 439 int frame_limit; 440 }; 441 442 /** 443 * struct iwl_trans_ops - transport specific operations 444 * 445 * All the handlers MUST be implemented 446 * 447 * @start_hw: starts the HW. If low_power is true, the NIC needs to be taken 448 * out of a low power state. From that point on, the HW can send 449 * interrupts. May sleep. 450 * @op_mode_leave: Turn off the HW RF kill indication if on 451 * May sleep 452 * @start_fw: allocates and inits all the resources for the transport 453 * layer. Also kick a fw image. 454 * May sleep 455 * @fw_alive: called when the fw sends alive notification. If the fw provides 456 * the SCD base address in SRAM, then provide it here, or 0 otherwise. 457 * May sleep 458 * @stop_device: stops the whole device (embedded CPU put to reset) and stops 459 * the HW. If low_power is true, the NIC will be put in low power state. 460 * From that point on, the HW will be stopped but will still issue an 461 * interrupt if the HW RF kill switch is triggered. 462 * This callback must do the right thing and not crash even if %start_hw() 463 * was called but not &start_fw(). May sleep. 464 * @d3_suspend: put the device into the correct mode for WoWLAN during 465 * suspend. This is optional, if not implemented WoWLAN will not be 466 * supported. This callback may sleep. 467 * @d3_resume: resume the device after WoWLAN, enabling the opmode to 468 * talk to the WoWLAN image to get its status. This is optional, if not 469 * implemented WoWLAN will not be supported. This callback may sleep. 470 * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted. 471 * If RFkill is asserted in the middle of a SYNC host command, it must 472 * return -ERFKILL straight away. 473 * May sleep only if CMD_ASYNC is not set 474 * @tx: send an skb. The transport relies on the op_mode to zero the 475 * the ieee80211_tx_info->driver_data. If the MPDU is an A-MSDU, all 476 * the CSUM will be taken care of (TCP CSUM and IP header in case of 477 * IPv4). If the MPDU is a single MSDU, the op_mode must compute the IP 478 * header if it is IPv4. 479 * Must be atomic 480 * @reclaim: free packet until ssn. Returns a list of freed packets. 481 * Must be atomic 482 * @txq_enable: setup a queue. To setup an AC queue, use the 483 * iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before 484 * this one. The op_mode must not configure the HCMD queue. The scheduler 485 * configuration may be %NULL, in which case the hardware will not be 486 * configured. If true is returned, the operation mode needs to increment 487 * the sequence number of the packets routed to this queue because of a 488 * hardware scheduler bug. May sleep. 489 * @txq_disable: de-configure a Tx queue to send AMPDUs 490 * Must be atomic 491 * @txq_set_shared_mode: change Tx queue shared/unshared marking 492 * @wait_tx_queues_empty: wait until tx queues are empty. May sleep. 493 * @wait_txq_empty: wait until specific tx queue is empty. May sleep. 494 * @freeze_txq_timer: prevents the timer of the queue from firing until the 495 * queue is set to awake. Must be atomic. 496 * @block_txq_ptrs: stop updating the write pointers of the Tx queues. Note 497 * that the transport needs to refcount the calls since this function 498 * will be called several times with block = true, and then the queues 499 * need to be unblocked only after the same number of calls with 500 * block = false. 501 * @write8: write a u8 to a register at offset ofs from the BAR 502 * @write32: write a u32 to a register at offset ofs from the BAR 503 * @read32: read a u32 register at offset ofs from the BAR 504 * @read_prph: read a DWORD from a periphery register 505 * @write_prph: write a DWORD to a periphery register 506 * @read_mem: read device's SRAM in DWORD 507 * @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory 508 * will be zeroed. 509 * @configure: configure parameters required by the transport layer from 510 * the op_mode. May be called several times before start_fw, can't be 511 * called after that. 512 * @set_pmi: set the power pmi state 513 * @grab_nic_access: wake the NIC to be able to access non-HBUS regs. 514 * Sleeping is not allowed between grab_nic_access and 515 * release_nic_access. 516 * @release_nic_access: let the NIC go to sleep. The "flags" parameter 517 * must be the same one that was sent before to the grab_nic_access. 518 * @set_bits_mask - set SRAM register according to value and mask. 519 * @ref: grab a reference to the transport/FW layers, disallowing 520 * certain low power states 521 * @unref: release a reference previously taken with @ref. Note that 522 * initially the reference count is 1, making an initial @unref 523 * necessary to allow low power states. 524 * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last 525 * TX'ed commands and similar. The buffer will be vfree'd by the caller. 526 * Note that the transport must fill in the proper file headers. 527 */ 528 struct iwl_trans_ops { 529 530 int (*start_hw)(struct iwl_trans *iwl_trans, bool low_power); 531 void (*op_mode_leave)(struct iwl_trans *iwl_trans); 532 int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw, 533 bool run_in_rfkill); 534 int (*update_sf)(struct iwl_trans *trans, 535 struct iwl_sf_region *st_fwrd_space); 536 void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr); 537 void (*stop_device)(struct iwl_trans *trans, bool low_power); 538 539 void (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset); 540 int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status, 541 bool test, bool reset); 542 543 int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd); 544 545 int (*tx)(struct iwl_trans *trans, struct sk_buff *skb, 546 struct iwl_device_cmd *dev_cmd, int queue); 547 void (*reclaim)(struct iwl_trans *trans, int queue, int ssn, 548 struct sk_buff_head *skbs); 549 550 bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn, 551 const struct iwl_trans_txq_scd_cfg *cfg, 552 unsigned int queue_wdg_timeout); 553 void (*txq_disable)(struct iwl_trans *trans, int queue, 554 bool configure_scd); 555 /* a000 functions */ 556 int (*txq_alloc)(struct iwl_trans *trans, 557 struct iwl_tx_queue_cfg_cmd *cmd, 558 int cmd_id, 559 unsigned int queue_wdg_timeout); 560 void (*txq_free)(struct iwl_trans *trans, int queue); 561 562 void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id, 563 bool shared); 564 565 int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm); 566 int (*wait_txq_empty)(struct iwl_trans *trans, int queue); 567 void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs, 568 bool freeze); 569 void (*block_txq_ptrs)(struct iwl_trans *trans, bool block); 570 571 void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val); 572 void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val); 573 u32 (*read32)(struct iwl_trans *trans, u32 ofs); 574 u32 (*read_prph)(struct iwl_trans *trans, u32 ofs); 575 void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val); 576 int (*read_mem)(struct iwl_trans *trans, u32 addr, 577 void *buf, int dwords); 578 int (*write_mem)(struct iwl_trans *trans, u32 addr, 579 const void *buf, int dwords); 580 void (*configure)(struct iwl_trans *trans, 581 const struct iwl_trans_config *trans_cfg); 582 void (*set_pmi)(struct iwl_trans *trans, bool state); 583 bool (*grab_nic_access)(struct iwl_trans *trans, unsigned long *flags); 584 void (*release_nic_access)(struct iwl_trans *trans, 585 unsigned long *flags); 586 void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask, 587 u32 value); 588 void (*ref)(struct iwl_trans *trans); 589 void (*unref)(struct iwl_trans *trans); 590 int (*suspend)(struct iwl_trans *trans); 591 void (*resume)(struct iwl_trans *trans); 592 593 struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans, 594 const struct iwl_fw_dbg_trigger_tlv 595 *trigger); 596 }; 597 598 /** 599 * enum iwl_trans_state - state of the transport layer 600 * 601 * @IWL_TRANS_NO_FW: no fw has sent an alive response 602 * @IWL_TRANS_FW_ALIVE: a fw has sent an alive response 603 */ 604 enum iwl_trans_state { 605 IWL_TRANS_NO_FW = 0, 606 IWL_TRANS_FW_ALIVE = 1, 607 }; 608 609 /** 610 * DOC: Platform power management 611 * 612 * There are two types of platform power management: system-wide 613 * (WoWLAN) and runtime. 614 * 615 * In system-wide power management the entire platform goes into a low 616 * power state (e.g. idle or suspend to RAM) at the same time and the 617 * device is configured as a wakeup source for the entire platform. 618 * This is usually triggered by userspace activity (e.g. the user 619 * presses the suspend button or a power management daemon decides to 620 * put the platform in low power mode). The device's behavior in this 621 * mode is dictated by the wake-on-WLAN configuration. 622 * 623 * In runtime power management, only the devices which are themselves 624 * idle enter a low power state. This is done at runtime, which means 625 * that the entire system is still running normally. This mode is 626 * usually triggered automatically by the device driver and requires 627 * the ability to enter and exit the low power modes in a very short 628 * time, so there is not much impact in usability. 629 * 630 * The terms used for the device's behavior are as follows: 631 * 632 * - D0: the device is fully powered and the host is awake; 633 * - D3: the device is in low power mode and only reacts to 634 * specific events (e.g. magic-packet received or scan 635 * results found); 636 * - D0I3: the device is in low power mode and reacts to any 637 * activity (e.g. RX); 638 * 639 * These terms reflect the power modes in the firmware and are not to 640 * be confused with the physical device power state. The NIC can be 641 * in D0I3 mode even if, for instance, the PCI device is in D3 state. 642 */ 643 644 /** 645 * enum iwl_plat_pm_mode - platform power management mode 646 * 647 * This enumeration describes the device's platform power management 648 * behavior when in idle mode (i.e. runtime power management) or when 649 * in system-wide suspend (i.e WoWLAN). 650 * 651 * @IWL_PLAT_PM_MODE_DISABLED: power management is disabled for this 652 * device. At runtime, this means that nothing happens and the 653 * device always remains in active. In system-wide suspend mode, 654 * it means that the all connections will be closed automatically 655 * by mac80211 before the platform is suspended. 656 * @IWL_PLAT_PM_MODE_D3: the device goes into D3 mode (i.e. WoWLAN). 657 * For runtime power management, this mode is not officially 658 * supported. 659 * @IWL_PLAT_PM_MODE_D0I3: the device goes into D0I3 mode. 660 */ 661 enum iwl_plat_pm_mode { 662 IWL_PLAT_PM_MODE_DISABLED, 663 IWL_PLAT_PM_MODE_D3, 664 IWL_PLAT_PM_MODE_D0I3, 665 }; 666 667 /* Max time to wait for trans to become idle/non-idle on d0i3 668 * enter/exit (in msecs). 669 */ 670 #define IWL_TRANS_IDLE_TIMEOUT 2000 671 672 /** 673 * struct iwl_trans - transport common data 674 * 675 * @ops - pointer to iwl_trans_ops 676 * @op_mode - pointer to the op_mode 677 * @cfg - pointer to the configuration 678 * @drv - pointer to iwl_drv 679 * @status: a bit-mask of transport status flags 680 * @dev - pointer to struct device * that represents the device 681 * @max_skb_frags: maximum number of fragments an SKB can have when transmitted. 682 * 0 indicates that frag SKBs (NETIF_F_SG) aren't supported. 683 * @hw_rf_id a u32 with the device RF ID 684 * @hw_id: a u32 with the ID of the device / sub-device. 685 * Set during transport allocation. 686 * @hw_id_str: a string with info about HW ID. Set during transport allocation. 687 * @pm_support: set to true in start_hw if link pm is supported 688 * @ltr_enabled: set to true if the LTR is enabled 689 * @wide_cmd_header: true when ucode supports wide command header format 690 * @num_rx_queues: number of RX queues allocated by the transport; 691 * the transport must set this before calling iwl_drv_start() 692 * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only. 693 * The user should use iwl_trans_{alloc,free}_tx_cmd. 694 * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before 695 * starting the firmware, used for tracing 696 * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the 697 * start of the 802.11 header in the @rx_mpdu_cmd 698 * @dflt_pwr_limit: default power limit fetched from the platform (ACPI) 699 * @dbg_dest_tlv: points to the destination TLV for debug 700 * @dbg_conf_tlv: array of pointers to configuration TLVs for debug 701 * @dbg_trigger_tlv: array of pointers to triggers TLVs for debug 702 * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv 703 * @paging_req_addr: The location were the FW will upload / download the pages 704 * from. The address is set by the opmode 705 * @paging_db: Pointer to the opmode paging data base, the pointer is set by 706 * the opmode. 707 * @paging_download_buf: Buffer used for copying all of the pages before 708 * downloading them to the FW. The buffer is allocated in the opmode 709 * @system_pm_mode: the system-wide power management mode in use. 710 * This mode is set dynamically, depending on the WoWLAN values 711 * configured from the userspace at runtime. 712 * @runtime_pm_mode: the runtime power management mode in use. This 713 * mode is set during the initialization phase and is not 714 * supposed to change during runtime. 715 */ 716 struct iwl_trans { 717 const struct iwl_trans_ops *ops; 718 struct iwl_op_mode *op_mode; 719 const struct iwl_cfg *cfg; 720 struct iwl_drv *drv; 721 enum iwl_trans_state state; 722 unsigned long status; 723 724 struct device *dev; 725 u32 max_skb_frags; 726 u32 hw_rev; 727 u32 hw_rf_id; 728 u32 hw_id; 729 char hw_id_str[52]; 730 731 u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size; 732 733 bool pm_support; 734 bool ltr_enabled; 735 736 const struct iwl_hcmd_arr *command_groups; 737 int command_groups_size; 738 bool wide_cmd_header; 739 740 u8 num_rx_queues; 741 742 /* The following fields are internal only */ 743 struct kmem_cache *dev_cmd_pool; 744 char dev_cmd_pool_name[50]; 745 746 struct dentry *dbgfs_dir; 747 748 #ifdef CONFIG_LOCKDEP 749 struct lockdep_map sync_cmd_lockdep_map; 750 #endif 751 752 u64 dflt_pwr_limit; 753 754 const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv; 755 const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX]; 756 struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv; 757 u8 dbg_dest_reg_num; 758 759 /* 760 * Paging parameters - All of the parameters should be set by the 761 * opmode when paging is enabled 762 */ 763 u32 paging_req_addr; 764 struct iwl_fw_paging *paging_db; 765 void *paging_download_buf; 766 767 enum iwl_plat_pm_mode system_pm_mode; 768 enum iwl_plat_pm_mode runtime_pm_mode; 769 bool suspending; 770 771 /* pointer to trans specific struct */ 772 /*Ensure that this pointer will always be aligned to sizeof pointer */ 773 char trans_specific[0] __aligned(sizeof(void *)); 774 }; 775 776 const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id); 777 int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans); 778 779 static inline void iwl_trans_configure(struct iwl_trans *trans, 780 const struct iwl_trans_config *trans_cfg) 781 { 782 trans->op_mode = trans_cfg->op_mode; 783 784 trans->ops->configure(trans, trans_cfg); 785 WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg)); 786 } 787 788 static inline int _iwl_trans_start_hw(struct iwl_trans *trans, bool low_power) 789 { 790 might_sleep(); 791 792 return trans->ops->start_hw(trans, low_power); 793 } 794 795 static inline int iwl_trans_start_hw(struct iwl_trans *trans) 796 { 797 return trans->ops->start_hw(trans, true); 798 } 799 800 static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans) 801 { 802 might_sleep(); 803 804 if (trans->ops->op_mode_leave) 805 trans->ops->op_mode_leave(trans); 806 807 trans->op_mode = NULL; 808 809 trans->state = IWL_TRANS_NO_FW; 810 } 811 812 static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr) 813 { 814 might_sleep(); 815 816 trans->state = IWL_TRANS_FW_ALIVE; 817 818 trans->ops->fw_alive(trans, scd_addr); 819 } 820 821 static inline int iwl_trans_start_fw(struct iwl_trans *trans, 822 const struct fw_img *fw, 823 bool run_in_rfkill) 824 { 825 might_sleep(); 826 827 WARN_ON_ONCE(!trans->rx_mpdu_cmd); 828 829 clear_bit(STATUS_FW_ERROR, &trans->status); 830 return trans->ops->start_fw(trans, fw, run_in_rfkill); 831 } 832 833 static inline int iwl_trans_update_sf(struct iwl_trans *trans, 834 struct iwl_sf_region *st_fwrd_space) 835 { 836 might_sleep(); 837 838 if (trans->ops->update_sf) 839 return trans->ops->update_sf(trans, st_fwrd_space); 840 841 return 0; 842 } 843 844 static inline void _iwl_trans_stop_device(struct iwl_trans *trans, 845 bool low_power) 846 { 847 might_sleep(); 848 849 trans->ops->stop_device(trans, low_power); 850 851 trans->state = IWL_TRANS_NO_FW; 852 } 853 854 static inline void iwl_trans_stop_device(struct iwl_trans *trans) 855 { 856 _iwl_trans_stop_device(trans, true); 857 } 858 859 static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, 860 bool reset) 861 { 862 might_sleep(); 863 if (trans->ops->d3_suspend) 864 trans->ops->d3_suspend(trans, test, reset); 865 } 866 867 static inline int iwl_trans_d3_resume(struct iwl_trans *trans, 868 enum iwl_d3_status *status, 869 bool test, bool reset) 870 { 871 might_sleep(); 872 if (!trans->ops->d3_resume) 873 return 0; 874 875 return trans->ops->d3_resume(trans, status, test, reset); 876 } 877 878 static inline void iwl_trans_ref(struct iwl_trans *trans) 879 { 880 if (trans->ops->ref) 881 trans->ops->ref(trans); 882 } 883 884 static inline void iwl_trans_unref(struct iwl_trans *trans) 885 { 886 if (trans->ops->unref) 887 trans->ops->unref(trans); 888 } 889 890 static inline int iwl_trans_suspend(struct iwl_trans *trans) 891 { 892 if (!trans->ops->suspend) 893 return 0; 894 895 return trans->ops->suspend(trans); 896 } 897 898 static inline void iwl_trans_resume(struct iwl_trans *trans) 899 { 900 if (trans->ops->resume) 901 trans->ops->resume(trans); 902 } 903 904 static inline struct iwl_trans_dump_data * 905 iwl_trans_dump_data(struct iwl_trans *trans, 906 const struct iwl_fw_dbg_trigger_tlv *trigger) 907 { 908 if (!trans->ops->dump_data) 909 return NULL; 910 return trans->ops->dump_data(trans, trigger); 911 } 912 913 static inline struct iwl_device_cmd * 914 iwl_trans_alloc_tx_cmd(struct iwl_trans *trans) 915 { 916 return kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC); 917 } 918 919 int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); 920 921 static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans, 922 struct iwl_device_cmd *dev_cmd) 923 { 924 kmem_cache_free(trans->dev_cmd_pool, dev_cmd); 925 } 926 927 static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, 928 struct iwl_device_cmd *dev_cmd, int queue) 929 { 930 if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) 931 return -EIO; 932 933 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { 934 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 935 return -EIO; 936 } 937 938 return trans->ops->tx(trans, skb, dev_cmd, queue); 939 } 940 941 static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue, 942 int ssn, struct sk_buff_head *skbs) 943 { 944 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { 945 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 946 return; 947 } 948 949 trans->ops->reclaim(trans, queue, ssn, skbs); 950 } 951 952 static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue, 953 bool configure_scd) 954 { 955 trans->ops->txq_disable(trans, queue, configure_scd); 956 } 957 958 static inline bool 959 iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn, 960 const struct iwl_trans_txq_scd_cfg *cfg, 961 unsigned int queue_wdg_timeout) 962 { 963 might_sleep(); 964 965 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { 966 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 967 return false; 968 } 969 970 return trans->ops->txq_enable(trans, queue, ssn, 971 cfg, queue_wdg_timeout); 972 } 973 974 static inline void 975 iwl_trans_txq_free(struct iwl_trans *trans, int queue) 976 { 977 if (WARN_ON_ONCE(!trans->ops->txq_free)) 978 return; 979 980 trans->ops->txq_free(trans, queue); 981 } 982 983 static inline int 984 iwl_trans_txq_alloc(struct iwl_trans *trans, 985 struct iwl_tx_queue_cfg_cmd *cmd, 986 int cmd_id, 987 unsigned int queue_wdg_timeout) 988 { 989 might_sleep(); 990 991 if (WARN_ON_ONCE(!trans->ops->txq_alloc)) 992 return -ENOTSUPP; 993 994 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { 995 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 996 return -EIO; 997 } 998 999 return trans->ops->txq_alloc(trans, cmd, cmd_id, queue_wdg_timeout); 1000 } 1001 1002 static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans, 1003 int queue, bool shared_mode) 1004 { 1005 if (trans->ops->txq_set_shared_mode) 1006 trans->ops->txq_set_shared_mode(trans, queue, shared_mode); 1007 } 1008 1009 static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue, 1010 int fifo, int sta_id, int tid, 1011 int frame_limit, u16 ssn, 1012 unsigned int queue_wdg_timeout) 1013 { 1014 struct iwl_trans_txq_scd_cfg cfg = { 1015 .fifo = fifo, 1016 .sta_id = sta_id, 1017 .tid = tid, 1018 .frame_limit = frame_limit, 1019 .aggregate = sta_id >= 0, 1020 }; 1021 1022 iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout); 1023 } 1024 1025 static inline 1026 void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo, 1027 unsigned int queue_wdg_timeout) 1028 { 1029 struct iwl_trans_txq_scd_cfg cfg = { 1030 .fifo = fifo, 1031 .sta_id = -1, 1032 .tid = IWL_MAX_TID_COUNT, 1033 .frame_limit = IWL_FRAME_LIMIT, 1034 .aggregate = false, 1035 }; 1036 1037 iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout); 1038 } 1039 1040 static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans, 1041 unsigned long txqs, 1042 bool freeze) 1043 { 1044 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { 1045 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 1046 return; 1047 } 1048 1049 if (trans->ops->freeze_txq_timer) 1050 trans->ops->freeze_txq_timer(trans, txqs, freeze); 1051 } 1052 1053 static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans, 1054 bool block) 1055 { 1056 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { 1057 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 1058 return; 1059 } 1060 1061 if (trans->ops->block_txq_ptrs) 1062 trans->ops->block_txq_ptrs(trans, block); 1063 } 1064 1065 static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans, 1066 u32 txqs) 1067 { 1068 if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty)) 1069 return -ENOTSUPP; 1070 1071 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { 1072 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 1073 return -EIO; 1074 } 1075 1076 return trans->ops->wait_tx_queues_empty(trans, txqs); 1077 } 1078 1079 static inline int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue) 1080 { 1081 if (WARN_ON_ONCE(!trans->ops->wait_txq_empty)) 1082 return -ENOTSUPP; 1083 1084 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { 1085 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 1086 return -EIO; 1087 } 1088 1089 return trans->ops->wait_txq_empty(trans, queue); 1090 } 1091 1092 static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val) 1093 { 1094 trans->ops->write8(trans, ofs, val); 1095 } 1096 1097 static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val) 1098 { 1099 trans->ops->write32(trans, ofs, val); 1100 } 1101 1102 static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs) 1103 { 1104 return trans->ops->read32(trans, ofs); 1105 } 1106 1107 static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs) 1108 { 1109 return trans->ops->read_prph(trans, ofs); 1110 } 1111 1112 static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs, 1113 u32 val) 1114 { 1115 return trans->ops->write_prph(trans, ofs, val); 1116 } 1117 1118 static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr, 1119 void *buf, int dwords) 1120 { 1121 return trans->ops->read_mem(trans, addr, buf, dwords); 1122 } 1123 1124 #define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \ 1125 do { \ 1126 if (__builtin_constant_p(bufsize)) \ 1127 BUILD_BUG_ON((bufsize) % sizeof(u32)); \ 1128 iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\ 1129 } while (0) 1130 1131 static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr) 1132 { 1133 u32 value; 1134 1135 if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1))) 1136 return 0xa5a5a5a5; 1137 1138 return value; 1139 } 1140 1141 static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr, 1142 const void *buf, int dwords) 1143 { 1144 return trans->ops->write_mem(trans, addr, buf, dwords); 1145 } 1146 1147 static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr, 1148 u32 val) 1149 { 1150 return iwl_trans_write_mem(trans, addr, &val, 1); 1151 } 1152 1153 static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state) 1154 { 1155 if (trans->ops->set_pmi) 1156 trans->ops->set_pmi(trans, state); 1157 } 1158 1159 static inline void 1160 iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value) 1161 { 1162 trans->ops->set_bits_mask(trans, reg, mask, value); 1163 } 1164 1165 #define iwl_trans_grab_nic_access(trans, flags) \ 1166 __cond_lock(nic_access, \ 1167 likely((trans)->ops->grab_nic_access(trans, flags))) 1168 1169 static inline void __releases(nic_access) 1170 iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags) 1171 { 1172 trans->ops->release_nic_access(trans, flags); 1173 __release(nic_access); 1174 } 1175 1176 static inline void iwl_trans_fw_error(struct iwl_trans *trans) 1177 { 1178 if (WARN_ON_ONCE(!trans->op_mode)) 1179 return; 1180 1181 /* prevent double restarts due to the same erroneous FW */ 1182 if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) 1183 iwl_op_mode_nic_error(trans->op_mode); 1184 } 1185 1186 /***************************************************** 1187 * transport helper functions 1188 *****************************************************/ 1189 struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, 1190 struct device *dev, 1191 const struct iwl_cfg *cfg, 1192 const struct iwl_trans_ops *ops); 1193 void iwl_trans_free(struct iwl_trans *trans); 1194 1195 /***************************************************** 1196 * driver (transport) register/unregister functions 1197 ******************************************************/ 1198 int __must_check iwl_pci_register_driver(void); 1199 void iwl_pci_unregister_driver(void); 1200 1201 #endif /* __iwl_trans_h__ */ 1202