1 /****************************************************************************** 2 * 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * GPL LICENSE SUMMARY 7 * 8 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 * Copyright(c) 2016 Intel Deutschland GmbH 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of version 2 of the GNU General Public License as 14 * published by the Free Software Foundation. 15 * 16 * This program is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, 24 * USA 25 * 26 * The full GNU General Public License is included in this distribution 27 * in the file called COPYING. 28 * 29 * Contact Information: 30 * Intel Linux Wireless <linuxwifi@intel.com> 31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 32 * 33 * BSD LICENSE 34 * 35 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 37 * Copyright(c) 2016 Intel Deutschland GmbH 38 * All rights reserved. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 44 * * Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * * Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in 48 * the documentation and/or other materials provided with the 49 * distribution. 50 * * Neither the name Intel Corporation nor the names of its 51 * contributors may be used to endorse or promote products derived 52 * from this software without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 * 66 *****************************************************************************/ 67 #ifndef __iwl_trans_h__ 68 #define __iwl_trans_h__ 69 70 #include <linux/ieee80211.h> 71 #include <linux/mm.h> /* for page_address */ 72 #include <linux/lockdep.h> 73 #include <linux/kernel.h> 74 75 #include "iwl-debug.h" 76 #include "iwl-config.h" 77 #include "iwl-fw.h" 78 #include "iwl-op-mode.h" 79 80 /** 81 * DOC: Transport layer - what is it ? 82 * 83 * The transport layer is the layer that deals with the HW directly. It provides 84 * an abstraction of the underlying HW to the upper layer. The transport layer 85 * doesn't provide any policy, algorithm or anything of this kind, but only 86 * mechanisms to make the HW do something. It is not completely stateless but 87 * close to it. 88 * We will have an implementation for each different supported bus. 89 */ 90 91 /** 92 * DOC: Life cycle of the transport layer 93 * 94 * The transport layer has a very precise life cycle. 95 * 96 * 1) A helper function is called during the module initialization and 97 * registers the bus driver's ops with the transport's alloc function. 98 * 2) Bus's probe calls to the transport layer's allocation functions. 99 * Of course this function is bus specific. 100 * 3) This allocation functions will spawn the upper layer which will 101 * register mac80211. 102 * 103 * 4) At some point (i.e. mac80211's start call), the op_mode will call 104 * the following sequence: 105 * start_hw 106 * start_fw 107 * 108 * 5) Then when finished (or reset): 109 * stop_device 110 * 111 * 6) Eventually, the free function will be called. 112 */ 113 114 /** 115 * DOC: Host command section 116 * 117 * A host command is a command issued by the upper layer to the fw. There are 118 * several versions of fw that have several APIs. The transport layer is 119 * completely agnostic to these differences. 120 * The transport does provide helper functionality (i.e. SYNC / ASYNC mode), 121 */ 122 #define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f) 123 #define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8) 124 #define SEQ_TO_INDEX(s) ((s) & 0xff) 125 #define INDEX_TO_SEQ(i) ((i) & 0xff) 126 #define SEQ_RX_FRAME cpu_to_le16(0x8000) 127 128 /* 129 * those functions retrieve specific information from 130 * the id field in the iwl_host_cmd struct which contains 131 * the command id, the group id and the version of the command 132 * and vice versa 133 */ 134 static inline u8 iwl_cmd_opcode(u32 cmdid) 135 { 136 return cmdid & 0xFF; 137 } 138 139 static inline u8 iwl_cmd_groupid(u32 cmdid) 140 { 141 return ((cmdid & 0xFF00) >> 8); 142 } 143 144 static inline u8 iwl_cmd_version(u32 cmdid) 145 { 146 return ((cmdid & 0xFF0000) >> 16); 147 } 148 149 static inline u32 iwl_cmd_id(u8 opcode, u8 groupid, u8 version) 150 { 151 return opcode + (groupid << 8) + (version << 16); 152 } 153 154 /* make u16 wide id out of u8 group and opcode */ 155 #define WIDE_ID(grp, opcode) ((grp << 8) | opcode) 156 157 /* due to the conversion, this group is special; new groups 158 * should be defined in the appropriate fw-api header files 159 */ 160 #define IWL_ALWAYS_LONG_GROUP 1 161 162 /** 163 * struct iwl_cmd_header 164 * 165 * This header format appears in the beginning of each command sent from the 166 * driver, and each response/notification received from uCode. 167 */ 168 struct iwl_cmd_header { 169 u8 cmd; /* Command ID: REPLY_RXON, etc. */ 170 u8 group_id; 171 /* 172 * The driver sets up the sequence number to values of its choosing. 173 * uCode does not use this value, but passes it back to the driver 174 * when sending the response to each driver-originated command, so 175 * the driver can match the response to the command. Since the values 176 * don't get used by uCode, the driver may set up an arbitrary format. 177 * 178 * There is one exception: uCode sets bit 15 when it originates 179 * the response/notification, i.e. when the response/notification 180 * is not a direct response to a command sent by the driver. For 181 * example, uCode issues REPLY_RX when it sends a received frame 182 * to the driver; it is not a direct response to any driver command. 183 * 184 * The Linux driver uses the following format: 185 * 186 * 0:7 tfd index - position within TX queue 187 * 8:12 TX queue id 188 * 13:14 reserved 189 * 15 unsolicited RX or uCode-originated notification 190 */ 191 __le16 sequence; 192 } __packed; 193 194 /** 195 * struct iwl_cmd_header_wide 196 * 197 * This header format appears in the beginning of each command sent from the 198 * driver, and each response/notification received from uCode. 199 * this is the wide version that contains more information about the command 200 * like length, version and command type 201 */ 202 struct iwl_cmd_header_wide { 203 u8 cmd; 204 u8 group_id; 205 __le16 sequence; 206 __le16 length; 207 u8 reserved; 208 u8 version; 209 } __packed; 210 211 #define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */ 212 #define FH_RSCSR_FRAME_INVALID 0x55550000 213 #define FH_RSCSR_FRAME_ALIGN 0x40 214 215 struct iwl_rx_packet { 216 /* 217 * The first 4 bytes of the RX frame header contain both the RX frame 218 * size and some flags. 219 * Bit fields: 220 * 31: flag flush RB request 221 * 30: flag ignore TC (terminal counter) request 222 * 29: flag fast IRQ request 223 * 28-14: Reserved 224 * 13-00: RX frame size 225 */ 226 __le32 len_n_flags; 227 struct iwl_cmd_header hdr; 228 u8 data[]; 229 } __packed; 230 231 static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt) 232 { 233 return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; 234 } 235 236 static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt) 237 { 238 return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr); 239 } 240 241 /** 242 * enum CMD_MODE - how to send the host commands ? 243 * 244 * @CMD_ASYNC: Return right away and don't wait for the response 245 * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of 246 * the response. The caller needs to call iwl_free_resp when done. 247 * @CMD_HIGH_PRIO: The command is high priority - it goes to the front of the 248 * command queue, but after other high priority commands. Valid only 249 * with CMD_ASYNC. 250 * @CMD_SEND_IN_IDLE: The command should be sent even when the trans is idle. 251 * @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle. 252 * @CMD_WAKE_UP_TRANS: The command response should wake up the trans 253 * (i.e. mark it as non-idle). 254 * @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be 255 * called after this command completes. Valid only with CMD_ASYNC. 256 * @CMD_TB_BITMAP_POS: Position of the first bit for the TB bitmap. We need to 257 * check that we leave enough room for the TBs bitmap which needs 20 bits. 258 */ 259 enum CMD_MODE { 260 CMD_ASYNC = BIT(0), 261 CMD_WANT_SKB = BIT(1), 262 CMD_SEND_IN_RFKILL = BIT(2), 263 CMD_HIGH_PRIO = BIT(3), 264 CMD_SEND_IN_IDLE = BIT(4), 265 CMD_MAKE_TRANS_IDLE = BIT(5), 266 CMD_WAKE_UP_TRANS = BIT(6), 267 CMD_WANT_ASYNC_CALLBACK = BIT(7), 268 269 CMD_TB_BITMAP_POS = 11, 270 }; 271 272 #define DEF_CMD_PAYLOAD_SIZE 320 273 274 /** 275 * struct iwl_device_cmd 276 * 277 * For allocation of the command and tx queues, this establishes the overall 278 * size of the largest command we send to uCode, except for commands that 279 * aren't fully copied and use other TFD space. 280 */ 281 struct iwl_device_cmd { 282 union { 283 struct { 284 struct iwl_cmd_header hdr; /* uCode API */ 285 u8 payload[DEF_CMD_PAYLOAD_SIZE]; 286 }; 287 struct { 288 struct iwl_cmd_header_wide hdr_wide; 289 u8 payload_wide[DEF_CMD_PAYLOAD_SIZE - 290 sizeof(struct iwl_cmd_header_wide) + 291 sizeof(struct iwl_cmd_header)]; 292 }; 293 }; 294 } __packed; 295 296 #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd)) 297 298 /* 299 * number of transfer buffers (fragments) per transmit frame descriptor; 300 * this is just the driver's idea, the hardware supports 20 301 */ 302 #define IWL_MAX_CMD_TBS_PER_TFD 2 303 304 /** 305 * struct iwl_hcmd_dataflag - flag for each one of the chunks of the command 306 * 307 * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's 308 * ring. The transport layer doesn't map the command's buffer to DMA, but 309 * rather copies it to a previously allocated DMA buffer. This flag tells 310 * the transport layer not to copy the command, but to map the existing 311 * buffer (that is passed in) instead. This saves the memcpy and allows 312 * commands that are bigger than the fixed buffer to be submitted. 313 * Note that a TFD entry after a NOCOPY one cannot be a normal copied one. 314 * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this 315 * chunk internally and free it again after the command completes. This 316 * can (currently) be used only once per command. 317 * Note that a TFD entry after a DUP one cannot be a normal copied one. 318 */ 319 enum iwl_hcmd_dataflag { 320 IWL_HCMD_DFL_NOCOPY = BIT(0), 321 IWL_HCMD_DFL_DUP = BIT(1), 322 }; 323 324 /** 325 * struct iwl_host_cmd - Host command to the uCode 326 * 327 * @data: array of chunks that composes the data of the host command 328 * @resp_pkt: response packet, if %CMD_WANT_SKB was set 329 * @_rx_page_order: (internally used to free response packet) 330 * @_rx_page_addr: (internally used to free response packet) 331 * @flags: can be CMD_* 332 * @len: array of the lengths of the chunks in data 333 * @dataflags: IWL_HCMD_DFL_* 334 * @id: command id of the host command, for wide commands encoding the 335 * version and group as well 336 */ 337 struct iwl_host_cmd { 338 const void *data[IWL_MAX_CMD_TBS_PER_TFD]; 339 struct iwl_rx_packet *resp_pkt; 340 unsigned long _rx_page_addr; 341 u32 _rx_page_order; 342 343 u32 flags; 344 u32 id; 345 u16 len[IWL_MAX_CMD_TBS_PER_TFD]; 346 u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD]; 347 }; 348 349 static inline void iwl_free_resp(struct iwl_host_cmd *cmd) 350 { 351 free_pages(cmd->_rx_page_addr, cmd->_rx_page_order); 352 } 353 354 struct iwl_rx_cmd_buffer { 355 struct page *_page; 356 int _offset; 357 bool _page_stolen; 358 u32 _rx_page_order; 359 unsigned int truesize; 360 }; 361 362 static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r) 363 { 364 return (void *)((unsigned long)page_address(r->_page) + r->_offset); 365 } 366 367 static inline int rxb_offset(struct iwl_rx_cmd_buffer *r) 368 { 369 return r->_offset; 370 } 371 372 static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r) 373 { 374 r->_page_stolen = true; 375 get_page(r->_page); 376 return r->_page; 377 } 378 379 static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r) 380 { 381 __free_pages(r->_page, r->_rx_page_order); 382 } 383 384 #define MAX_NO_RECLAIM_CMDS 6 385 386 /* 387 * The first entry in driver_data array in ieee80211_tx_info 388 * that can be used by the transport. 389 */ 390 #define IWL_TRANS_FIRST_DRIVER_DATA 2 391 #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo)))) 392 393 /* 394 * Maximum number of HW queues the transport layer 395 * currently supports 396 */ 397 #define IWL_MAX_HW_QUEUES 32 398 #define IWL_MAX_TID_COUNT 8 399 #define IWL_FRAME_LIMIT 64 400 #define IWL_MAX_RX_HW_QUEUES 16 401 402 /** 403 * enum iwl_wowlan_status - WoWLAN image/device status 404 * @IWL_D3_STATUS_ALIVE: firmware is still running after resume 405 * @IWL_D3_STATUS_RESET: device was reset while suspended 406 */ 407 enum iwl_d3_status { 408 IWL_D3_STATUS_ALIVE, 409 IWL_D3_STATUS_RESET, 410 }; 411 412 /** 413 * enum iwl_trans_status: transport status flags 414 * @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed 415 * @STATUS_DEVICE_ENABLED: APM is enabled 416 * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up) 417 * @STATUS_INT_ENABLED: interrupts are enabled 418 * @STATUS_RFKILL: the HW RFkill switch is in KILL position 419 * @STATUS_FW_ERROR: the fw is in error state 420 * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands 421 * are sent 422 * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent 423 * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation 424 */ 425 enum iwl_trans_status { 426 STATUS_SYNC_HCMD_ACTIVE, 427 STATUS_DEVICE_ENABLED, 428 STATUS_TPOWER_PMI, 429 STATUS_INT_ENABLED, 430 STATUS_RFKILL, 431 STATUS_FW_ERROR, 432 STATUS_TRANS_GOING_IDLE, 433 STATUS_TRANS_IDLE, 434 STATUS_TRANS_DEAD, 435 }; 436 437 static inline int 438 iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size) 439 { 440 switch (rb_size) { 441 case IWL_AMSDU_4K: 442 return get_order(4 * 1024); 443 case IWL_AMSDU_8K: 444 return get_order(8 * 1024); 445 case IWL_AMSDU_12K: 446 return get_order(12 * 1024); 447 default: 448 WARN_ON(1); 449 return -1; 450 } 451 } 452 453 struct iwl_hcmd_names { 454 u8 cmd_id; 455 const char *const cmd_name; 456 }; 457 458 #define HCMD_NAME(x) \ 459 { .cmd_id = x, .cmd_name = #x } 460 461 struct iwl_hcmd_arr { 462 const struct iwl_hcmd_names *arr; 463 int size; 464 }; 465 466 #define HCMD_ARR(x) \ 467 { .arr = x, .size = ARRAY_SIZE(x) } 468 469 /** 470 * struct iwl_trans_config - transport configuration 471 * 472 * @op_mode: pointer to the upper layer. 473 * @cmd_queue: the index of the command queue. 474 * Must be set before start_fw. 475 * @cmd_fifo: the fifo for host commands 476 * @cmd_q_wdg_timeout: the timeout of the watchdog timer for the command queue. 477 * @no_reclaim_cmds: Some devices erroneously don't set the 478 * SEQ_RX_FRAME bit on some notifications, this is the 479 * list of such notifications to filter. Max length is 480 * %MAX_NO_RECLAIM_CMDS. 481 * @n_no_reclaim_cmds: # of commands in list 482 * @rx_buf_size: RX buffer size needed for A-MSDUs 483 * if unset 4k will be the RX buffer size 484 * @bc_table_dword: set to true if the BC table expects the byte count to be 485 * in DWORD (as opposed to bytes) 486 * @scd_set_active: should the transport configure the SCD for HCMD queue 487 * @wide_cmd_header: firmware supports wide host command header 488 * @sw_csum_tx: transport should compute the TCP checksum 489 * @command_groups: array of command groups, each member is an array of the 490 * commands in the group; for debugging only 491 * @command_groups_size: number of command groups, to avoid illegal access 492 * @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until 493 * we get the ALIVE from the uCode 494 */ 495 struct iwl_trans_config { 496 struct iwl_op_mode *op_mode; 497 498 u8 cmd_queue; 499 u8 cmd_fifo; 500 unsigned int cmd_q_wdg_timeout; 501 const u8 *no_reclaim_cmds; 502 unsigned int n_no_reclaim_cmds; 503 504 enum iwl_amsdu_size rx_buf_size; 505 bool bc_table_dword; 506 bool scd_set_active; 507 bool wide_cmd_header; 508 bool sw_csum_tx; 509 const struct iwl_hcmd_arr *command_groups; 510 int command_groups_size; 511 512 u32 sdio_adma_addr; 513 }; 514 515 struct iwl_trans_dump_data { 516 u32 len; 517 u8 data[]; 518 }; 519 520 struct iwl_trans; 521 522 struct iwl_trans_txq_scd_cfg { 523 u8 fifo; 524 u8 sta_id; 525 u8 tid; 526 bool aggregate; 527 int frame_limit; 528 }; 529 530 /** 531 * struct iwl_trans_ops - transport specific operations 532 * 533 * All the handlers MUST be implemented 534 * 535 * @start_hw: starts the HW. If low_power is true, the NIC needs to be taken 536 * out of a low power state. From that point on, the HW can send 537 * interrupts. May sleep. 538 * @op_mode_leave: Turn off the HW RF kill indication if on 539 * May sleep 540 * @start_fw: allocates and inits all the resources for the transport 541 * layer. Also kick a fw image. 542 * May sleep 543 * @fw_alive: called when the fw sends alive notification. If the fw provides 544 * the SCD base address in SRAM, then provide it here, or 0 otherwise. 545 * May sleep 546 * @stop_device: stops the whole device (embedded CPU put to reset) and stops 547 * the HW. If low_power is true, the NIC will be put in low power state. 548 * From that point on, the HW will be stopped but will still issue an 549 * interrupt if the HW RF kill switch is triggered. 550 * This callback must do the right thing and not crash even if %start_hw() 551 * was called but not &start_fw(). May sleep. 552 * @d3_suspend: put the device into the correct mode for WoWLAN during 553 * suspend. This is optional, if not implemented WoWLAN will not be 554 * supported. This callback may sleep. 555 * @d3_resume: resume the device after WoWLAN, enabling the opmode to 556 * talk to the WoWLAN image to get its status. This is optional, if not 557 * implemented WoWLAN will not be supported. This callback may sleep. 558 * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted. 559 * If RFkill is asserted in the middle of a SYNC host command, it must 560 * return -ERFKILL straight away. 561 * May sleep only if CMD_ASYNC is not set 562 * @tx: send an skb. The transport relies on the op_mode to zero the 563 * the ieee80211_tx_info->driver_data. If the MPDU is an A-MSDU, all 564 * the CSUM will be taken care of (TCP CSUM and IP header in case of 565 * IPv4). If the MPDU is a single MSDU, the op_mode must compute the IP 566 * header if it is IPv4. 567 * Must be atomic 568 * @reclaim: free packet until ssn. Returns a list of freed packets. 569 * Must be atomic 570 * @txq_enable: setup a queue. To setup an AC queue, use the 571 * iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before 572 * this one. The op_mode must not configure the HCMD queue. The scheduler 573 * configuration may be %NULL, in which case the hardware will not be 574 * configured. May sleep. 575 * @txq_disable: de-configure a Tx queue to send AMPDUs 576 * Must be atomic 577 * @wait_tx_queue_empty: wait until tx queues are empty. May sleep. 578 * @freeze_txq_timer: prevents the timer of the queue from firing until the 579 * queue is set to awake. Must be atomic. 580 * @block_txq_ptrs: stop updating the write pointers of the Tx queues. Note 581 * that the transport needs to refcount the calls since this function 582 * will be called several times with block = true, and then the queues 583 * need to be unblocked only after the same number of calls with 584 * block = false. 585 * @write8: write a u8 to a register at offset ofs from the BAR 586 * @write32: write a u32 to a register at offset ofs from the BAR 587 * @read32: read a u32 register at offset ofs from the BAR 588 * @read_prph: read a DWORD from a periphery register 589 * @write_prph: write a DWORD to a periphery register 590 * @read_mem: read device's SRAM in DWORD 591 * @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory 592 * will be zeroed. 593 * @configure: configure parameters required by the transport layer from 594 * the op_mode. May be called several times before start_fw, can't be 595 * called after that. 596 * @set_pmi: set the power pmi state 597 * @grab_nic_access: wake the NIC to be able to access non-HBUS regs. 598 * Sleeping is not allowed between grab_nic_access and 599 * release_nic_access. 600 * @release_nic_access: let the NIC go to sleep. The "flags" parameter 601 * must be the same one that was sent before to the grab_nic_access. 602 * @set_bits_mask - set SRAM register according to value and mask. 603 * @ref: grab a reference to the transport/FW layers, disallowing 604 * certain low power states 605 * @unref: release a reference previously taken with @ref. Note that 606 * initially the reference count is 1, making an initial @unref 607 * necessary to allow low power states. 608 * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last 609 * TX'ed commands and similar. The buffer will be vfree'd by the caller. 610 * Note that the transport must fill in the proper file headers. 611 */ 612 struct iwl_trans_ops { 613 614 int (*start_hw)(struct iwl_trans *iwl_trans, bool low_power); 615 void (*op_mode_leave)(struct iwl_trans *iwl_trans); 616 int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw, 617 bool run_in_rfkill); 618 int (*update_sf)(struct iwl_trans *trans, 619 struct iwl_sf_region *st_fwrd_space); 620 void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr); 621 void (*stop_device)(struct iwl_trans *trans, bool low_power); 622 623 void (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset); 624 int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status, 625 bool test, bool reset); 626 627 int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd); 628 629 int (*tx)(struct iwl_trans *trans, struct sk_buff *skb, 630 struct iwl_device_cmd *dev_cmd, int queue); 631 void (*reclaim)(struct iwl_trans *trans, int queue, int ssn, 632 struct sk_buff_head *skbs); 633 634 void (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn, 635 const struct iwl_trans_txq_scd_cfg *cfg, 636 unsigned int queue_wdg_timeout); 637 void (*txq_disable)(struct iwl_trans *trans, int queue, 638 bool configure_scd); 639 640 int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm); 641 void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs, 642 bool freeze); 643 void (*block_txq_ptrs)(struct iwl_trans *trans, bool block); 644 645 void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val); 646 void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val); 647 u32 (*read32)(struct iwl_trans *trans, u32 ofs); 648 u32 (*read_prph)(struct iwl_trans *trans, u32 ofs); 649 void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val); 650 int (*read_mem)(struct iwl_trans *trans, u32 addr, 651 void *buf, int dwords); 652 int (*write_mem)(struct iwl_trans *trans, u32 addr, 653 const void *buf, int dwords); 654 void (*configure)(struct iwl_trans *trans, 655 const struct iwl_trans_config *trans_cfg); 656 void (*set_pmi)(struct iwl_trans *trans, bool state); 657 bool (*grab_nic_access)(struct iwl_trans *trans, unsigned long *flags); 658 void (*release_nic_access)(struct iwl_trans *trans, 659 unsigned long *flags); 660 void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask, 661 u32 value); 662 void (*ref)(struct iwl_trans *trans); 663 void (*unref)(struct iwl_trans *trans); 664 int (*suspend)(struct iwl_trans *trans); 665 void (*resume)(struct iwl_trans *trans); 666 667 struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans, 668 const struct iwl_fw_dbg_trigger_tlv 669 *trigger); 670 }; 671 672 /** 673 * enum iwl_trans_state - state of the transport layer 674 * 675 * @IWL_TRANS_NO_FW: no fw has sent an alive response 676 * @IWL_TRANS_FW_ALIVE: a fw has sent an alive response 677 */ 678 enum iwl_trans_state { 679 IWL_TRANS_NO_FW = 0, 680 IWL_TRANS_FW_ALIVE = 1, 681 }; 682 683 /** 684 * DOC: Platform power management 685 * 686 * There are two types of platform power management: system-wide 687 * (WoWLAN) and runtime. 688 * 689 * In system-wide power management the entire platform goes into a low 690 * power state (e.g. idle or suspend to RAM) at the same time and the 691 * device is configured as a wakeup source for the entire platform. 692 * This is usually triggered by userspace activity (e.g. the user 693 * presses the suspend button or a power management daemon decides to 694 * put the platform in low power mode). The device's behavior in this 695 * mode is dictated by the wake-on-WLAN configuration. 696 * 697 * In runtime power management, only the devices which are themselves 698 * idle enter a low power state. This is done at runtime, which means 699 * that the entire system is still running normally. This mode is 700 * usually triggered automatically by the device driver and requires 701 * the ability to enter and exit the low power modes in a very short 702 * time, so there is not much impact in usability. 703 * 704 * The terms used for the device's behavior are as follows: 705 * 706 * - D0: the device is fully powered and the host is awake; 707 * - D3: the device is in low power mode and only reacts to 708 * specific events (e.g. magic-packet received or scan 709 * results found); 710 * - D0I3: the device is in low power mode and reacts to any 711 * activity (e.g. RX); 712 * 713 * These terms reflect the power modes in the firmware and are not to 714 * be confused with the physical device power state. The NIC can be 715 * in D0I3 mode even if, for instance, the PCI device is in D3 state. 716 */ 717 718 /** 719 * enum iwl_plat_pm_mode - platform power management mode 720 * 721 * This enumeration describes the device's platform power management 722 * behavior when in idle mode (i.e. runtime power management) or when 723 * in system-wide suspend (i.e WoWLAN). 724 * 725 * @IWL_PLAT_PM_MODE_DISABLED: power management is disabled for this 726 * device. At runtime, this means that nothing happens and the 727 * device always remains in active. In system-wide suspend mode, 728 * it means that the all connections will be closed automatically 729 * by mac80211 before the platform is suspended. 730 * @IWL_PLAT_PM_MODE_D3: the device goes into D3 mode (i.e. WoWLAN). 731 * For runtime power management, this mode is not officially 732 * supported. 733 * @IWL_PLAT_PM_MODE_D0I3: the device goes into D0I3 mode. 734 */ 735 enum iwl_plat_pm_mode { 736 IWL_PLAT_PM_MODE_DISABLED, 737 IWL_PLAT_PM_MODE_D3, 738 IWL_PLAT_PM_MODE_D0I3, 739 }; 740 741 /* Max time to wait for trans to become idle/non-idle on d0i3 742 * enter/exit (in msecs). 743 */ 744 #define IWL_TRANS_IDLE_TIMEOUT 2000 745 746 /** 747 * struct iwl_trans - transport common data 748 * 749 * @ops - pointer to iwl_trans_ops 750 * @op_mode - pointer to the op_mode 751 * @cfg - pointer to the configuration 752 * @status: a bit-mask of transport status flags 753 * @dev - pointer to struct device * that represents the device 754 * @max_skb_frags: maximum number of fragments an SKB can have when transmitted. 755 * 0 indicates that frag SKBs (NETIF_F_SG) aren't supported. 756 * @hw_rf_id a u32 with the device RF ID 757 * @hw_id: a u32 with the ID of the device / sub-device. 758 * Set during transport allocation. 759 * @hw_id_str: a string with info about HW ID. Set during transport allocation. 760 * @pm_support: set to true in start_hw if link pm is supported 761 * @ltr_enabled: set to true if the LTR is enabled 762 * @num_rx_queues: number of RX queues allocated by the transport; 763 * the transport must set this before calling iwl_drv_start() 764 * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only. 765 * The user should use iwl_trans_{alloc,free}_tx_cmd. 766 * @dev_cmd_headroom: room needed for the transport's private use before the 767 * device_cmd for Tx - for internal use only 768 * The user should use iwl_trans_{alloc,free}_tx_cmd. 769 * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before 770 * starting the firmware, used for tracing 771 * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the 772 * start of the 802.11 header in the @rx_mpdu_cmd 773 * @dflt_pwr_limit: default power limit fetched from the platform (ACPI) 774 * @dbg_dest_tlv: points to the destination TLV for debug 775 * @dbg_conf_tlv: array of pointers to configuration TLVs for debug 776 * @dbg_trigger_tlv: array of pointers to triggers TLVs for debug 777 * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv 778 * @paging_req_addr: The location were the FW will upload / download the pages 779 * from. The address is set by the opmode 780 * @paging_db: Pointer to the opmode paging data base, the pointer is set by 781 * the opmode. 782 * @paging_download_buf: Buffer used for copying all of the pages before 783 * downloading them to the FW. The buffer is allocated in the opmode 784 * @system_pm_mode: the system-wide power management mode in use. 785 * This mode is set dynamically, depending on the WoWLAN values 786 * configured from the userspace at runtime. 787 * @runtime_pm_mode: the runtime power management mode in use. This 788 * mode is set during the initialization phase and is not 789 * supposed to change during runtime. 790 */ 791 struct iwl_trans { 792 const struct iwl_trans_ops *ops; 793 struct iwl_op_mode *op_mode; 794 const struct iwl_cfg *cfg; 795 enum iwl_trans_state state; 796 unsigned long status; 797 798 struct device *dev; 799 u32 max_skb_frags; 800 u32 hw_rev; 801 u32 hw_rf_id; 802 u32 hw_id; 803 char hw_id_str[52]; 804 805 u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size; 806 807 bool pm_support; 808 bool ltr_enabled; 809 810 const struct iwl_hcmd_arr *command_groups; 811 int command_groups_size; 812 813 u8 num_rx_queues; 814 815 /* The following fields are internal only */ 816 struct kmem_cache *dev_cmd_pool; 817 size_t dev_cmd_headroom; 818 char dev_cmd_pool_name[50]; 819 820 struct dentry *dbgfs_dir; 821 822 #ifdef CONFIG_LOCKDEP 823 struct lockdep_map sync_cmd_lockdep_map; 824 #endif 825 826 u64 dflt_pwr_limit; 827 828 const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv; 829 const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX]; 830 struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv; 831 u8 dbg_dest_reg_num; 832 833 /* 834 * Paging parameters - All of the parameters should be set by the 835 * opmode when paging is enabled 836 */ 837 u32 paging_req_addr; 838 struct iwl_fw_paging *paging_db; 839 void *paging_download_buf; 840 841 enum iwl_plat_pm_mode system_pm_mode; 842 enum iwl_plat_pm_mode runtime_pm_mode; 843 bool suspending; 844 845 /* pointer to trans specific struct */ 846 /*Ensure that this pointer will always be aligned to sizeof pointer */ 847 char trans_specific[0] __aligned(sizeof(void *)); 848 }; 849 850 const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id); 851 int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans); 852 853 static inline void iwl_trans_configure(struct iwl_trans *trans, 854 const struct iwl_trans_config *trans_cfg) 855 { 856 trans->op_mode = trans_cfg->op_mode; 857 858 trans->ops->configure(trans, trans_cfg); 859 WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg)); 860 } 861 862 static inline int _iwl_trans_start_hw(struct iwl_trans *trans, bool low_power) 863 { 864 might_sleep(); 865 866 return trans->ops->start_hw(trans, low_power); 867 } 868 869 static inline int iwl_trans_start_hw(struct iwl_trans *trans) 870 { 871 return trans->ops->start_hw(trans, true); 872 } 873 874 static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans) 875 { 876 might_sleep(); 877 878 if (trans->ops->op_mode_leave) 879 trans->ops->op_mode_leave(trans); 880 881 trans->op_mode = NULL; 882 883 trans->state = IWL_TRANS_NO_FW; 884 } 885 886 static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr) 887 { 888 might_sleep(); 889 890 trans->state = IWL_TRANS_FW_ALIVE; 891 892 trans->ops->fw_alive(trans, scd_addr); 893 } 894 895 static inline int iwl_trans_start_fw(struct iwl_trans *trans, 896 const struct fw_img *fw, 897 bool run_in_rfkill) 898 { 899 might_sleep(); 900 901 WARN_ON_ONCE(!trans->rx_mpdu_cmd); 902 903 clear_bit(STATUS_FW_ERROR, &trans->status); 904 return trans->ops->start_fw(trans, fw, run_in_rfkill); 905 } 906 907 static inline int iwl_trans_update_sf(struct iwl_trans *trans, 908 struct iwl_sf_region *st_fwrd_space) 909 { 910 might_sleep(); 911 912 if (trans->ops->update_sf) 913 return trans->ops->update_sf(trans, st_fwrd_space); 914 915 return 0; 916 } 917 918 static inline void _iwl_trans_stop_device(struct iwl_trans *trans, 919 bool low_power) 920 { 921 might_sleep(); 922 923 trans->ops->stop_device(trans, low_power); 924 925 trans->state = IWL_TRANS_NO_FW; 926 } 927 928 static inline void iwl_trans_stop_device(struct iwl_trans *trans) 929 { 930 _iwl_trans_stop_device(trans, true); 931 } 932 933 static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, 934 bool reset) 935 { 936 might_sleep(); 937 if (trans->ops->d3_suspend) 938 trans->ops->d3_suspend(trans, test, reset); 939 } 940 941 static inline int iwl_trans_d3_resume(struct iwl_trans *trans, 942 enum iwl_d3_status *status, 943 bool test, bool reset) 944 { 945 might_sleep(); 946 if (!trans->ops->d3_resume) 947 return 0; 948 949 return trans->ops->d3_resume(trans, status, test, reset); 950 } 951 952 static inline void iwl_trans_ref(struct iwl_trans *trans) 953 { 954 if (trans->ops->ref) 955 trans->ops->ref(trans); 956 } 957 958 static inline void iwl_trans_unref(struct iwl_trans *trans) 959 { 960 if (trans->ops->unref) 961 trans->ops->unref(trans); 962 } 963 964 static inline int iwl_trans_suspend(struct iwl_trans *trans) 965 { 966 if (!trans->ops->suspend) 967 return 0; 968 969 return trans->ops->suspend(trans); 970 } 971 972 static inline void iwl_trans_resume(struct iwl_trans *trans) 973 { 974 if (trans->ops->resume) 975 trans->ops->resume(trans); 976 } 977 978 static inline struct iwl_trans_dump_data * 979 iwl_trans_dump_data(struct iwl_trans *trans, 980 const struct iwl_fw_dbg_trigger_tlv *trigger) 981 { 982 if (!trans->ops->dump_data) 983 return NULL; 984 return trans->ops->dump_data(trans, trigger); 985 } 986 987 static inline struct iwl_device_cmd * 988 iwl_trans_alloc_tx_cmd(struct iwl_trans *trans) 989 { 990 u8 *dev_cmd_ptr = kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC); 991 992 if (unlikely(dev_cmd_ptr == NULL)) 993 return NULL; 994 995 return (struct iwl_device_cmd *) 996 (dev_cmd_ptr + trans->dev_cmd_headroom); 997 } 998 999 int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); 1000 1001 static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans, 1002 struct iwl_device_cmd *dev_cmd) 1003 { 1004 u8 *dev_cmd_ptr = (u8 *)dev_cmd - trans->dev_cmd_headroom; 1005 1006 kmem_cache_free(trans->dev_cmd_pool, dev_cmd_ptr); 1007 } 1008 1009 static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, 1010 struct iwl_device_cmd *dev_cmd, int queue) 1011 { 1012 if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) 1013 return -EIO; 1014 1015 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { 1016 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 1017 return -EIO; 1018 } 1019 1020 return trans->ops->tx(trans, skb, dev_cmd, queue); 1021 } 1022 1023 static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue, 1024 int ssn, struct sk_buff_head *skbs) 1025 { 1026 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { 1027 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 1028 return; 1029 } 1030 1031 trans->ops->reclaim(trans, queue, ssn, skbs); 1032 } 1033 1034 static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue, 1035 bool configure_scd) 1036 { 1037 trans->ops->txq_disable(trans, queue, configure_scd); 1038 } 1039 1040 static inline void 1041 iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn, 1042 const struct iwl_trans_txq_scd_cfg *cfg, 1043 unsigned int queue_wdg_timeout) 1044 { 1045 might_sleep(); 1046 1047 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { 1048 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 1049 return; 1050 } 1051 1052 trans->ops->txq_enable(trans, queue, ssn, cfg, queue_wdg_timeout); 1053 } 1054 1055 static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue, 1056 int fifo, int sta_id, int tid, 1057 int frame_limit, u16 ssn, 1058 unsigned int queue_wdg_timeout) 1059 { 1060 struct iwl_trans_txq_scd_cfg cfg = { 1061 .fifo = fifo, 1062 .sta_id = sta_id, 1063 .tid = tid, 1064 .frame_limit = frame_limit, 1065 .aggregate = sta_id >= 0, 1066 }; 1067 1068 iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout); 1069 } 1070 1071 static inline 1072 void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo, 1073 unsigned int queue_wdg_timeout) 1074 { 1075 struct iwl_trans_txq_scd_cfg cfg = { 1076 .fifo = fifo, 1077 .sta_id = -1, 1078 .tid = IWL_MAX_TID_COUNT, 1079 .frame_limit = IWL_FRAME_LIMIT, 1080 .aggregate = false, 1081 }; 1082 1083 iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout); 1084 } 1085 1086 static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans, 1087 unsigned long txqs, 1088 bool freeze) 1089 { 1090 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { 1091 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 1092 return; 1093 } 1094 1095 if (trans->ops->freeze_txq_timer) 1096 trans->ops->freeze_txq_timer(trans, txqs, freeze); 1097 } 1098 1099 static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans, 1100 bool block) 1101 { 1102 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { 1103 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 1104 return; 1105 } 1106 1107 if (trans->ops->block_txq_ptrs) 1108 trans->ops->block_txq_ptrs(trans, block); 1109 } 1110 1111 static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans, 1112 u32 txqs) 1113 { 1114 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { 1115 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 1116 return -EIO; 1117 } 1118 1119 return trans->ops->wait_tx_queue_empty(trans, txqs); 1120 } 1121 1122 static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val) 1123 { 1124 trans->ops->write8(trans, ofs, val); 1125 } 1126 1127 static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val) 1128 { 1129 trans->ops->write32(trans, ofs, val); 1130 } 1131 1132 static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs) 1133 { 1134 return trans->ops->read32(trans, ofs); 1135 } 1136 1137 static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs) 1138 { 1139 return trans->ops->read_prph(trans, ofs); 1140 } 1141 1142 static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs, 1143 u32 val) 1144 { 1145 return trans->ops->write_prph(trans, ofs, val); 1146 } 1147 1148 static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr, 1149 void *buf, int dwords) 1150 { 1151 return trans->ops->read_mem(trans, addr, buf, dwords); 1152 } 1153 1154 #define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \ 1155 do { \ 1156 if (__builtin_constant_p(bufsize)) \ 1157 BUILD_BUG_ON((bufsize) % sizeof(u32)); \ 1158 iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\ 1159 } while (0) 1160 1161 static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr) 1162 { 1163 u32 value; 1164 1165 if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1))) 1166 return 0xa5a5a5a5; 1167 1168 return value; 1169 } 1170 1171 static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr, 1172 const void *buf, int dwords) 1173 { 1174 return trans->ops->write_mem(trans, addr, buf, dwords); 1175 } 1176 1177 static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr, 1178 u32 val) 1179 { 1180 return iwl_trans_write_mem(trans, addr, &val, 1); 1181 } 1182 1183 static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state) 1184 { 1185 if (trans->ops->set_pmi) 1186 trans->ops->set_pmi(trans, state); 1187 } 1188 1189 static inline void 1190 iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value) 1191 { 1192 trans->ops->set_bits_mask(trans, reg, mask, value); 1193 } 1194 1195 #define iwl_trans_grab_nic_access(trans, flags) \ 1196 __cond_lock(nic_access, \ 1197 likely((trans)->ops->grab_nic_access(trans, flags))) 1198 1199 static inline void __releases(nic_access) 1200 iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags) 1201 { 1202 trans->ops->release_nic_access(trans, flags); 1203 __release(nic_access); 1204 } 1205 1206 static inline void iwl_trans_fw_error(struct iwl_trans *trans) 1207 { 1208 if (WARN_ON_ONCE(!trans->op_mode)) 1209 return; 1210 1211 /* prevent double restarts due to the same erroneous FW */ 1212 if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) 1213 iwl_op_mode_nic_error(trans->op_mode); 1214 } 1215 1216 /***************************************************** 1217 * transport helper functions 1218 *****************************************************/ 1219 struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, 1220 struct device *dev, 1221 const struct iwl_cfg *cfg, 1222 const struct iwl_trans_ops *ops, 1223 size_t dev_cmd_headroom); 1224 void iwl_trans_free(struct iwl_trans *trans); 1225 1226 /***************************************************** 1227 * driver (transport) register/unregister functions 1228 ******************************************************/ 1229 int __must_check iwl_pci_register_driver(void); 1230 void iwl_pci_unregister_driver(void); 1231 1232 #endif /* __iwl_trans_h__ */ 1233