1 /****************************************************************************** 2 * 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * GPL LICENSE SUMMARY 7 * 8 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 * Copyright(c) 2016 Intel Deutschland GmbH 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of version 2 of the GNU General Public License as 14 * published by the Free Software Foundation. 15 * 16 * This program is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, 24 * USA 25 * 26 * The full GNU General Public License is included in this distribution 27 * in the file called COPYING. 28 * 29 * Contact Information: 30 * Intel Linux Wireless <linuxwifi@intel.com> 31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 32 * 33 * BSD LICENSE 34 * 35 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 37 * Copyright(c) 2016 Intel Deutschland GmbH 38 * All rights reserved. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 44 * * Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * * Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in 48 * the documentation and/or other materials provided with the 49 * distribution. 50 * * Neither the name Intel Corporation nor the names of its 51 * contributors may be used to endorse or promote products derived 52 * from this software without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 * 66 *****************************************************************************/ 67 #ifndef __iwl_trans_h__ 68 #define __iwl_trans_h__ 69 70 #include <linux/ieee80211.h> 71 #include <linux/mm.h> /* for page_address */ 72 #include <linux/lockdep.h> 73 #include <linux/kernel.h> 74 75 #include "iwl-debug.h" 76 #include "iwl-config.h" 77 #include "iwl-fw.h" 78 #include "iwl-op-mode.h" 79 80 /** 81 * DOC: Transport layer - what is it ? 82 * 83 * The transport layer is the layer that deals with the HW directly. It provides 84 * an abstraction of the underlying HW to the upper layer. The transport layer 85 * doesn't provide any policy, algorithm or anything of this kind, but only 86 * mechanisms to make the HW do something. It is not completely stateless but 87 * close to it. 88 * We will have an implementation for each different supported bus. 89 */ 90 91 /** 92 * DOC: Life cycle of the transport layer 93 * 94 * The transport layer has a very precise life cycle. 95 * 96 * 1) A helper function is called during the module initialization and 97 * registers the bus driver's ops with the transport's alloc function. 98 * 2) Bus's probe calls to the transport layer's allocation functions. 99 * Of course this function is bus specific. 100 * 3) This allocation functions will spawn the upper layer which will 101 * register mac80211. 102 * 103 * 4) At some point (i.e. mac80211's start call), the op_mode will call 104 * the following sequence: 105 * start_hw 106 * start_fw 107 * 108 * 5) Then when finished (or reset): 109 * stop_device 110 * 111 * 6) Eventually, the free function will be called. 112 */ 113 114 /** 115 * DOC: Host command section 116 * 117 * A host command is a command issued by the upper layer to the fw. There are 118 * several versions of fw that have several APIs. The transport layer is 119 * completely agnostic to these differences. 120 * The transport does provide helper functionality (i.e. SYNC / ASYNC mode), 121 */ 122 #define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f) 123 #define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8) 124 #define SEQ_TO_INDEX(s) ((s) & 0xff) 125 #define INDEX_TO_SEQ(i) ((i) & 0xff) 126 #define SEQ_RX_FRAME cpu_to_le16(0x8000) 127 128 /* 129 * those functions retrieve specific information from 130 * the id field in the iwl_host_cmd struct which contains 131 * the command id, the group id and the version of the command 132 * and vice versa 133 */ 134 static inline u8 iwl_cmd_opcode(u32 cmdid) 135 { 136 return cmdid & 0xFF; 137 } 138 139 static inline u8 iwl_cmd_groupid(u32 cmdid) 140 { 141 return ((cmdid & 0xFF00) >> 8); 142 } 143 144 static inline u8 iwl_cmd_version(u32 cmdid) 145 { 146 return ((cmdid & 0xFF0000) >> 16); 147 } 148 149 static inline u32 iwl_cmd_id(u8 opcode, u8 groupid, u8 version) 150 { 151 return opcode + (groupid << 8) + (version << 16); 152 } 153 154 /* make u16 wide id out of u8 group and opcode */ 155 #define WIDE_ID(grp, opcode) ((grp << 8) | opcode) 156 157 /* due to the conversion, this group is special; new groups 158 * should be defined in the appropriate fw-api header files 159 */ 160 #define IWL_ALWAYS_LONG_GROUP 1 161 162 /** 163 * struct iwl_cmd_header 164 * 165 * This header format appears in the beginning of each command sent from the 166 * driver, and each response/notification received from uCode. 167 */ 168 struct iwl_cmd_header { 169 u8 cmd; /* Command ID: REPLY_RXON, etc. */ 170 u8 group_id; 171 /* 172 * The driver sets up the sequence number to values of its choosing. 173 * uCode does not use this value, but passes it back to the driver 174 * when sending the response to each driver-originated command, so 175 * the driver can match the response to the command. Since the values 176 * don't get used by uCode, the driver may set up an arbitrary format. 177 * 178 * There is one exception: uCode sets bit 15 when it originates 179 * the response/notification, i.e. when the response/notification 180 * is not a direct response to a command sent by the driver. For 181 * example, uCode issues REPLY_RX when it sends a received frame 182 * to the driver; it is not a direct response to any driver command. 183 * 184 * The Linux driver uses the following format: 185 * 186 * 0:7 tfd index - position within TX queue 187 * 8:12 TX queue id 188 * 13:14 reserved 189 * 15 unsolicited RX or uCode-originated notification 190 */ 191 __le16 sequence; 192 } __packed; 193 194 /** 195 * struct iwl_cmd_header_wide 196 * 197 * This header format appears in the beginning of each command sent from the 198 * driver, and each response/notification received from uCode. 199 * this is the wide version that contains more information about the command 200 * like length, version and command type 201 */ 202 struct iwl_cmd_header_wide { 203 u8 cmd; 204 u8 group_id; 205 __le16 sequence; 206 __le16 length; 207 u8 reserved; 208 u8 version; 209 } __packed; 210 211 #define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */ 212 #define FH_RSCSR_FRAME_INVALID 0x55550000 213 #define FH_RSCSR_FRAME_ALIGN 0x40 214 #define FH_RSCSR_RPA_EN BIT(25) 215 #define FH_RSCSR_RXQ_POS 16 216 #define FH_RSCSR_RXQ_MASK 0x3F0000 217 218 struct iwl_rx_packet { 219 /* 220 * The first 4 bytes of the RX frame header contain both the RX frame 221 * size and some flags. 222 * Bit fields: 223 * 31: flag flush RB request 224 * 30: flag ignore TC (terminal counter) request 225 * 29: flag fast IRQ request 226 * 28-26: Reserved 227 * 25: Offload enabled 228 * 24: RPF enabled 229 * 23: RSS enabled 230 * 22: Checksum enabled 231 * 21-16: RX queue 232 * 15-14: Reserved 233 * 13-00: RX frame size 234 */ 235 __le32 len_n_flags; 236 struct iwl_cmd_header hdr; 237 u8 data[]; 238 } __packed; 239 240 static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt) 241 { 242 return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; 243 } 244 245 static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt) 246 { 247 return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr); 248 } 249 250 /** 251 * enum CMD_MODE - how to send the host commands ? 252 * 253 * @CMD_ASYNC: Return right away and don't wait for the response 254 * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of 255 * the response. The caller needs to call iwl_free_resp when done. 256 * @CMD_HIGH_PRIO: The command is high priority - it goes to the front of the 257 * command queue, but after other high priority commands. Valid only 258 * with CMD_ASYNC. 259 * @CMD_SEND_IN_IDLE: The command should be sent even when the trans is idle. 260 * @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle. 261 * @CMD_WAKE_UP_TRANS: The command response should wake up the trans 262 * (i.e. mark it as non-idle). 263 * @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be 264 * called after this command completes. Valid only with CMD_ASYNC. 265 * @CMD_TB_BITMAP_POS: Position of the first bit for the TB bitmap. We need to 266 * check that we leave enough room for the TBs bitmap which needs 20 bits. 267 */ 268 enum CMD_MODE { 269 CMD_ASYNC = BIT(0), 270 CMD_WANT_SKB = BIT(1), 271 CMD_SEND_IN_RFKILL = BIT(2), 272 CMD_HIGH_PRIO = BIT(3), 273 CMD_SEND_IN_IDLE = BIT(4), 274 CMD_MAKE_TRANS_IDLE = BIT(5), 275 CMD_WAKE_UP_TRANS = BIT(6), 276 CMD_WANT_ASYNC_CALLBACK = BIT(7), 277 278 CMD_TB_BITMAP_POS = 11, 279 }; 280 281 #define DEF_CMD_PAYLOAD_SIZE 320 282 283 /** 284 * struct iwl_device_cmd 285 * 286 * For allocation of the command and tx queues, this establishes the overall 287 * size of the largest command we send to uCode, except for commands that 288 * aren't fully copied and use other TFD space. 289 */ 290 struct iwl_device_cmd { 291 union { 292 struct { 293 struct iwl_cmd_header hdr; /* uCode API */ 294 u8 payload[DEF_CMD_PAYLOAD_SIZE]; 295 }; 296 struct { 297 struct iwl_cmd_header_wide hdr_wide; 298 u8 payload_wide[DEF_CMD_PAYLOAD_SIZE - 299 sizeof(struct iwl_cmd_header_wide) + 300 sizeof(struct iwl_cmd_header)]; 301 }; 302 }; 303 } __packed; 304 305 #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd)) 306 307 /* 308 * number of transfer buffers (fragments) per transmit frame descriptor; 309 * this is just the driver's idea, the hardware supports 20 310 */ 311 #define IWL_MAX_CMD_TBS_PER_TFD 2 312 313 /** 314 * struct iwl_hcmd_dataflag - flag for each one of the chunks of the command 315 * 316 * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's 317 * ring. The transport layer doesn't map the command's buffer to DMA, but 318 * rather copies it to a previously allocated DMA buffer. This flag tells 319 * the transport layer not to copy the command, but to map the existing 320 * buffer (that is passed in) instead. This saves the memcpy and allows 321 * commands that are bigger than the fixed buffer to be submitted. 322 * Note that a TFD entry after a NOCOPY one cannot be a normal copied one. 323 * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this 324 * chunk internally and free it again after the command completes. This 325 * can (currently) be used only once per command. 326 * Note that a TFD entry after a DUP one cannot be a normal copied one. 327 */ 328 enum iwl_hcmd_dataflag { 329 IWL_HCMD_DFL_NOCOPY = BIT(0), 330 IWL_HCMD_DFL_DUP = BIT(1), 331 }; 332 333 /** 334 * struct iwl_host_cmd - Host command to the uCode 335 * 336 * @data: array of chunks that composes the data of the host command 337 * @resp_pkt: response packet, if %CMD_WANT_SKB was set 338 * @_rx_page_order: (internally used to free response packet) 339 * @_rx_page_addr: (internally used to free response packet) 340 * @flags: can be CMD_* 341 * @len: array of the lengths of the chunks in data 342 * @dataflags: IWL_HCMD_DFL_* 343 * @id: command id of the host command, for wide commands encoding the 344 * version and group as well 345 */ 346 struct iwl_host_cmd { 347 const void *data[IWL_MAX_CMD_TBS_PER_TFD]; 348 struct iwl_rx_packet *resp_pkt; 349 unsigned long _rx_page_addr; 350 u32 _rx_page_order; 351 352 u32 flags; 353 u32 id; 354 u16 len[IWL_MAX_CMD_TBS_PER_TFD]; 355 u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD]; 356 }; 357 358 static inline void iwl_free_resp(struct iwl_host_cmd *cmd) 359 { 360 free_pages(cmd->_rx_page_addr, cmd->_rx_page_order); 361 } 362 363 struct iwl_rx_cmd_buffer { 364 struct page *_page; 365 int _offset; 366 bool _page_stolen; 367 u32 _rx_page_order; 368 unsigned int truesize; 369 }; 370 371 static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r) 372 { 373 return (void *)((unsigned long)page_address(r->_page) + r->_offset); 374 } 375 376 static inline int rxb_offset(struct iwl_rx_cmd_buffer *r) 377 { 378 return r->_offset; 379 } 380 381 static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r) 382 { 383 r->_page_stolen = true; 384 get_page(r->_page); 385 return r->_page; 386 } 387 388 static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r) 389 { 390 __free_pages(r->_page, r->_rx_page_order); 391 } 392 393 #define MAX_NO_RECLAIM_CMDS 6 394 395 #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo)))) 396 397 /* 398 * Maximum number of HW queues the transport layer 399 * currently supports 400 */ 401 #define IWL_MAX_HW_QUEUES 32 402 #define IWL_MAX_TID_COUNT 8 403 #define IWL_FRAME_LIMIT 64 404 #define IWL_MAX_RX_HW_QUEUES 16 405 406 /** 407 * enum iwl_wowlan_status - WoWLAN image/device status 408 * @IWL_D3_STATUS_ALIVE: firmware is still running after resume 409 * @IWL_D3_STATUS_RESET: device was reset while suspended 410 */ 411 enum iwl_d3_status { 412 IWL_D3_STATUS_ALIVE, 413 IWL_D3_STATUS_RESET, 414 }; 415 416 /** 417 * enum iwl_trans_status: transport status flags 418 * @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed 419 * @STATUS_DEVICE_ENABLED: APM is enabled 420 * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up) 421 * @STATUS_INT_ENABLED: interrupts are enabled 422 * @STATUS_RFKILL: the HW RFkill switch is in KILL position 423 * @STATUS_FW_ERROR: the fw is in error state 424 * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands 425 * are sent 426 * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent 427 * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation 428 */ 429 enum iwl_trans_status { 430 STATUS_SYNC_HCMD_ACTIVE, 431 STATUS_DEVICE_ENABLED, 432 STATUS_TPOWER_PMI, 433 STATUS_INT_ENABLED, 434 STATUS_RFKILL, 435 STATUS_FW_ERROR, 436 STATUS_TRANS_GOING_IDLE, 437 STATUS_TRANS_IDLE, 438 STATUS_TRANS_DEAD, 439 }; 440 441 static inline int 442 iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size) 443 { 444 switch (rb_size) { 445 case IWL_AMSDU_4K: 446 return get_order(4 * 1024); 447 case IWL_AMSDU_8K: 448 return get_order(8 * 1024); 449 case IWL_AMSDU_12K: 450 return get_order(12 * 1024); 451 default: 452 WARN_ON(1); 453 return -1; 454 } 455 } 456 457 struct iwl_hcmd_names { 458 u8 cmd_id; 459 const char *const cmd_name; 460 }; 461 462 #define HCMD_NAME(x) \ 463 { .cmd_id = x, .cmd_name = #x } 464 465 struct iwl_hcmd_arr { 466 const struct iwl_hcmd_names *arr; 467 int size; 468 }; 469 470 #define HCMD_ARR(x) \ 471 { .arr = x, .size = ARRAY_SIZE(x) } 472 473 /** 474 * struct iwl_trans_config - transport configuration 475 * 476 * @op_mode: pointer to the upper layer. 477 * @cmd_queue: the index of the command queue. 478 * Must be set before start_fw. 479 * @cmd_fifo: the fifo for host commands 480 * @cmd_q_wdg_timeout: the timeout of the watchdog timer for the command queue. 481 * @no_reclaim_cmds: Some devices erroneously don't set the 482 * SEQ_RX_FRAME bit on some notifications, this is the 483 * list of such notifications to filter. Max length is 484 * %MAX_NO_RECLAIM_CMDS. 485 * @n_no_reclaim_cmds: # of commands in list 486 * @rx_buf_size: RX buffer size needed for A-MSDUs 487 * if unset 4k will be the RX buffer size 488 * @bc_table_dword: set to true if the BC table expects the byte count to be 489 * in DWORD (as opposed to bytes) 490 * @scd_set_active: should the transport configure the SCD for HCMD queue 491 * @wide_cmd_header: firmware supports wide host command header 492 * @sw_csum_tx: transport should compute the TCP checksum 493 * @command_groups: array of command groups, each member is an array of the 494 * commands in the group; for debugging only 495 * @command_groups_size: number of command groups, to avoid illegal access 496 * @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until 497 * we get the ALIVE from the uCode 498 * @cb_data_offs: offset inside skb->cb to store transport data at, must have 499 * space for at least two pointers 500 */ 501 struct iwl_trans_config { 502 struct iwl_op_mode *op_mode; 503 504 u8 cmd_queue; 505 u8 cmd_fifo; 506 unsigned int cmd_q_wdg_timeout; 507 const u8 *no_reclaim_cmds; 508 unsigned int n_no_reclaim_cmds; 509 510 enum iwl_amsdu_size rx_buf_size; 511 bool bc_table_dword; 512 bool scd_set_active; 513 bool wide_cmd_header; 514 bool sw_csum_tx; 515 const struct iwl_hcmd_arr *command_groups; 516 int command_groups_size; 517 518 u32 sdio_adma_addr; 519 520 u8 cb_data_offs; 521 }; 522 523 struct iwl_trans_dump_data { 524 u32 len; 525 u8 data[]; 526 }; 527 528 struct iwl_trans; 529 530 struct iwl_trans_txq_scd_cfg { 531 u8 fifo; 532 u8 sta_id; 533 u8 tid; 534 bool aggregate; 535 int frame_limit; 536 }; 537 538 /** 539 * struct iwl_trans_ops - transport specific operations 540 * 541 * All the handlers MUST be implemented 542 * 543 * @start_hw: starts the HW. If low_power is true, the NIC needs to be taken 544 * out of a low power state. From that point on, the HW can send 545 * interrupts. May sleep. 546 * @op_mode_leave: Turn off the HW RF kill indication if on 547 * May sleep 548 * @start_fw: allocates and inits all the resources for the transport 549 * layer. Also kick a fw image. 550 * May sleep 551 * @fw_alive: called when the fw sends alive notification. If the fw provides 552 * the SCD base address in SRAM, then provide it here, or 0 otherwise. 553 * May sleep 554 * @stop_device: stops the whole device (embedded CPU put to reset) and stops 555 * the HW. If low_power is true, the NIC will be put in low power state. 556 * From that point on, the HW will be stopped but will still issue an 557 * interrupt if the HW RF kill switch is triggered. 558 * This callback must do the right thing and not crash even if %start_hw() 559 * was called but not &start_fw(). May sleep. 560 * @d3_suspend: put the device into the correct mode for WoWLAN during 561 * suspend. This is optional, if not implemented WoWLAN will not be 562 * supported. This callback may sleep. 563 * @d3_resume: resume the device after WoWLAN, enabling the opmode to 564 * talk to the WoWLAN image to get its status. This is optional, if not 565 * implemented WoWLAN will not be supported. This callback may sleep. 566 * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted. 567 * If RFkill is asserted in the middle of a SYNC host command, it must 568 * return -ERFKILL straight away. 569 * May sleep only if CMD_ASYNC is not set 570 * @tx: send an skb. The transport relies on the op_mode to zero the 571 * the ieee80211_tx_info->driver_data. If the MPDU is an A-MSDU, all 572 * the CSUM will be taken care of (TCP CSUM and IP header in case of 573 * IPv4). If the MPDU is a single MSDU, the op_mode must compute the IP 574 * header if it is IPv4. 575 * Must be atomic 576 * @reclaim: free packet until ssn. Returns a list of freed packets. 577 * Must be atomic 578 * @txq_enable: setup a queue. To setup an AC queue, use the 579 * iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before 580 * this one. The op_mode must not configure the HCMD queue. The scheduler 581 * configuration may be %NULL, in which case the hardware will not be 582 * configured. May sleep. 583 * @txq_disable: de-configure a Tx queue to send AMPDUs 584 * Must be atomic 585 * @txq_set_shared_mode: change Tx queue shared/unshared marking 586 * @wait_tx_queue_empty: wait until tx queues are empty. May sleep. 587 * @freeze_txq_timer: prevents the timer of the queue from firing until the 588 * queue is set to awake. Must be atomic. 589 * @block_txq_ptrs: stop updating the write pointers of the Tx queues. Note 590 * that the transport needs to refcount the calls since this function 591 * will be called several times with block = true, and then the queues 592 * need to be unblocked only after the same number of calls with 593 * block = false. 594 * @write8: write a u8 to a register at offset ofs from the BAR 595 * @write32: write a u32 to a register at offset ofs from the BAR 596 * @read32: read a u32 register at offset ofs from the BAR 597 * @read_prph: read a DWORD from a periphery register 598 * @write_prph: write a DWORD to a periphery register 599 * @read_mem: read device's SRAM in DWORD 600 * @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory 601 * will be zeroed. 602 * @configure: configure parameters required by the transport layer from 603 * the op_mode. May be called several times before start_fw, can't be 604 * called after that. 605 * @set_pmi: set the power pmi state 606 * @grab_nic_access: wake the NIC to be able to access non-HBUS regs. 607 * Sleeping is not allowed between grab_nic_access and 608 * release_nic_access. 609 * @release_nic_access: let the NIC go to sleep. The "flags" parameter 610 * must be the same one that was sent before to the grab_nic_access. 611 * @set_bits_mask - set SRAM register according to value and mask. 612 * @ref: grab a reference to the transport/FW layers, disallowing 613 * certain low power states 614 * @unref: release a reference previously taken with @ref. Note that 615 * initially the reference count is 1, making an initial @unref 616 * necessary to allow low power states. 617 * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last 618 * TX'ed commands and similar. The buffer will be vfree'd by the caller. 619 * Note that the transport must fill in the proper file headers. 620 */ 621 struct iwl_trans_ops { 622 623 int (*start_hw)(struct iwl_trans *iwl_trans, bool low_power); 624 void (*op_mode_leave)(struct iwl_trans *iwl_trans); 625 int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw, 626 bool run_in_rfkill); 627 int (*update_sf)(struct iwl_trans *trans, 628 struct iwl_sf_region *st_fwrd_space); 629 void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr); 630 void (*stop_device)(struct iwl_trans *trans, bool low_power); 631 632 void (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset); 633 int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status, 634 bool test, bool reset); 635 636 int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd); 637 638 int (*tx)(struct iwl_trans *trans, struct sk_buff *skb, 639 struct iwl_device_cmd *dev_cmd, int queue); 640 void (*reclaim)(struct iwl_trans *trans, int queue, int ssn, 641 struct sk_buff_head *skbs); 642 643 void (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn, 644 const struct iwl_trans_txq_scd_cfg *cfg, 645 unsigned int queue_wdg_timeout); 646 void (*txq_disable)(struct iwl_trans *trans, int queue, 647 bool configure_scd); 648 649 void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id, 650 bool shared); 651 652 int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm); 653 void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs, 654 bool freeze); 655 void (*block_txq_ptrs)(struct iwl_trans *trans, bool block); 656 657 void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val); 658 void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val); 659 u32 (*read32)(struct iwl_trans *trans, u32 ofs); 660 u32 (*read_prph)(struct iwl_trans *trans, u32 ofs); 661 void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val); 662 int (*read_mem)(struct iwl_trans *trans, u32 addr, 663 void *buf, int dwords); 664 int (*write_mem)(struct iwl_trans *trans, u32 addr, 665 const void *buf, int dwords); 666 void (*configure)(struct iwl_trans *trans, 667 const struct iwl_trans_config *trans_cfg); 668 void (*set_pmi)(struct iwl_trans *trans, bool state); 669 bool (*grab_nic_access)(struct iwl_trans *trans, unsigned long *flags); 670 void (*release_nic_access)(struct iwl_trans *trans, 671 unsigned long *flags); 672 void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask, 673 u32 value); 674 void (*ref)(struct iwl_trans *trans); 675 void (*unref)(struct iwl_trans *trans); 676 int (*suspend)(struct iwl_trans *trans); 677 void (*resume)(struct iwl_trans *trans); 678 679 struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans, 680 const struct iwl_fw_dbg_trigger_tlv 681 *trigger); 682 }; 683 684 /** 685 * enum iwl_trans_state - state of the transport layer 686 * 687 * @IWL_TRANS_NO_FW: no fw has sent an alive response 688 * @IWL_TRANS_FW_ALIVE: a fw has sent an alive response 689 */ 690 enum iwl_trans_state { 691 IWL_TRANS_NO_FW = 0, 692 IWL_TRANS_FW_ALIVE = 1, 693 }; 694 695 /** 696 * DOC: Platform power management 697 * 698 * There are two types of platform power management: system-wide 699 * (WoWLAN) and runtime. 700 * 701 * In system-wide power management the entire platform goes into a low 702 * power state (e.g. idle or suspend to RAM) at the same time and the 703 * device is configured as a wakeup source for the entire platform. 704 * This is usually triggered by userspace activity (e.g. the user 705 * presses the suspend button or a power management daemon decides to 706 * put the platform in low power mode). The device's behavior in this 707 * mode is dictated by the wake-on-WLAN configuration. 708 * 709 * In runtime power management, only the devices which are themselves 710 * idle enter a low power state. This is done at runtime, which means 711 * that the entire system is still running normally. This mode is 712 * usually triggered automatically by the device driver and requires 713 * the ability to enter and exit the low power modes in a very short 714 * time, so there is not much impact in usability. 715 * 716 * The terms used for the device's behavior are as follows: 717 * 718 * - D0: the device is fully powered and the host is awake; 719 * - D3: the device is in low power mode and only reacts to 720 * specific events (e.g. magic-packet received or scan 721 * results found); 722 * - D0I3: the device is in low power mode and reacts to any 723 * activity (e.g. RX); 724 * 725 * These terms reflect the power modes in the firmware and are not to 726 * be confused with the physical device power state. The NIC can be 727 * in D0I3 mode even if, for instance, the PCI device is in D3 state. 728 */ 729 730 /** 731 * enum iwl_plat_pm_mode - platform power management mode 732 * 733 * This enumeration describes the device's platform power management 734 * behavior when in idle mode (i.e. runtime power management) or when 735 * in system-wide suspend (i.e WoWLAN). 736 * 737 * @IWL_PLAT_PM_MODE_DISABLED: power management is disabled for this 738 * device. At runtime, this means that nothing happens and the 739 * device always remains in active. In system-wide suspend mode, 740 * it means that the all connections will be closed automatically 741 * by mac80211 before the platform is suspended. 742 * @IWL_PLAT_PM_MODE_D3: the device goes into D3 mode (i.e. WoWLAN). 743 * For runtime power management, this mode is not officially 744 * supported. 745 * @IWL_PLAT_PM_MODE_D0I3: the device goes into D0I3 mode. 746 */ 747 enum iwl_plat_pm_mode { 748 IWL_PLAT_PM_MODE_DISABLED, 749 IWL_PLAT_PM_MODE_D3, 750 IWL_PLAT_PM_MODE_D0I3, 751 }; 752 753 /* Max time to wait for trans to become idle/non-idle on d0i3 754 * enter/exit (in msecs). 755 */ 756 #define IWL_TRANS_IDLE_TIMEOUT 2000 757 758 /** 759 * struct iwl_trans - transport common data 760 * 761 * @ops - pointer to iwl_trans_ops 762 * @op_mode - pointer to the op_mode 763 * @cfg - pointer to the configuration 764 * @drv - pointer to iwl_drv 765 * @status: a bit-mask of transport status flags 766 * @dev - pointer to struct device * that represents the device 767 * @max_skb_frags: maximum number of fragments an SKB can have when transmitted. 768 * 0 indicates that frag SKBs (NETIF_F_SG) aren't supported. 769 * @hw_rf_id a u32 with the device RF ID 770 * @hw_id: a u32 with the ID of the device / sub-device. 771 * Set during transport allocation. 772 * @hw_id_str: a string with info about HW ID. Set during transport allocation. 773 * @pm_support: set to true in start_hw if link pm is supported 774 * @ltr_enabled: set to true if the LTR is enabled 775 * @num_rx_queues: number of RX queues allocated by the transport; 776 * the transport must set this before calling iwl_drv_start() 777 * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only. 778 * The user should use iwl_trans_{alloc,free}_tx_cmd. 779 * @dev_cmd_headroom: room needed for the transport's private use before the 780 * device_cmd for Tx - for internal use only 781 * The user should use iwl_trans_{alloc,free}_tx_cmd. 782 * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before 783 * starting the firmware, used for tracing 784 * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the 785 * start of the 802.11 header in the @rx_mpdu_cmd 786 * @dflt_pwr_limit: default power limit fetched from the platform (ACPI) 787 * @dbg_dest_tlv: points to the destination TLV for debug 788 * @dbg_conf_tlv: array of pointers to configuration TLVs for debug 789 * @dbg_trigger_tlv: array of pointers to triggers TLVs for debug 790 * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv 791 * @paging_req_addr: The location were the FW will upload / download the pages 792 * from. The address is set by the opmode 793 * @paging_db: Pointer to the opmode paging data base, the pointer is set by 794 * the opmode. 795 * @paging_download_buf: Buffer used for copying all of the pages before 796 * downloading them to the FW. The buffer is allocated in the opmode 797 * @system_pm_mode: the system-wide power management mode in use. 798 * This mode is set dynamically, depending on the WoWLAN values 799 * configured from the userspace at runtime. 800 * @runtime_pm_mode: the runtime power management mode in use. This 801 * mode is set during the initialization phase and is not 802 * supposed to change during runtime. 803 */ 804 struct iwl_trans { 805 const struct iwl_trans_ops *ops; 806 struct iwl_op_mode *op_mode; 807 const struct iwl_cfg *cfg; 808 struct iwl_drv *drv; 809 enum iwl_trans_state state; 810 unsigned long status; 811 812 struct device *dev; 813 u32 max_skb_frags; 814 u32 hw_rev; 815 u32 hw_rf_id; 816 u32 hw_id; 817 char hw_id_str[52]; 818 819 u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size; 820 821 bool pm_support; 822 bool ltr_enabled; 823 824 const struct iwl_hcmd_arr *command_groups; 825 int command_groups_size; 826 827 u8 num_rx_queues; 828 829 /* The following fields are internal only */ 830 struct kmem_cache *dev_cmd_pool; 831 size_t dev_cmd_headroom; 832 char dev_cmd_pool_name[50]; 833 834 struct dentry *dbgfs_dir; 835 836 #ifdef CONFIG_LOCKDEP 837 struct lockdep_map sync_cmd_lockdep_map; 838 #endif 839 840 u64 dflt_pwr_limit; 841 842 const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv; 843 const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX]; 844 struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv; 845 u8 dbg_dest_reg_num; 846 847 /* 848 * Paging parameters - All of the parameters should be set by the 849 * opmode when paging is enabled 850 */ 851 u32 paging_req_addr; 852 struct iwl_fw_paging *paging_db; 853 void *paging_download_buf; 854 855 enum iwl_plat_pm_mode system_pm_mode; 856 enum iwl_plat_pm_mode runtime_pm_mode; 857 bool suspending; 858 859 /* pointer to trans specific struct */ 860 /*Ensure that this pointer will always be aligned to sizeof pointer */ 861 char trans_specific[0] __aligned(sizeof(void *)); 862 }; 863 864 const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id); 865 int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans); 866 867 static inline void iwl_trans_configure(struct iwl_trans *trans, 868 const struct iwl_trans_config *trans_cfg) 869 { 870 trans->op_mode = trans_cfg->op_mode; 871 872 trans->ops->configure(trans, trans_cfg); 873 WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg)); 874 } 875 876 static inline int _iwl_trans_start_hw(struct iwl_trans *trans, bool low_power) 877 { 878 might_sleep(); 879 880 return trans->ops->start_hw(trans, low_power); 881 } 882 883 static inline int iwl_trans_start_hw(struct iwl_trans *trans) 884 { 885 return trans->ops->start_hw(trans, true); 886 } 887 888 static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans) 889 { 890 might_sleep(); 891 892 if (trans->ops->op_mode_leave) 893 trans->ops->op_mode_leave(trans); 894 895 trans->op_mode = NULL; 896 897 trans->state = IWL_TRANS_NO_FW; 898 } 899 900 static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr) 901 { 902 might_sleep(); 903 904 trans->state = IWL_TRANS_FW_ALIVE; 905 906 trans->ops->fw_alive(trans, scd_addr); 907 } 908 909 static inline int iwl_trans_start_fw(struct iwl_trans *trans, 910 const struct fw_img *fw, 911 bool run_in_rfkill) 912 { 913 might_sleep(); 914 915 WARN_ON_ONCE(!trans->rx_mpdu_cmd); 916 917 clear_bit(STATUS_FW_ERROR, &trans->status); 918 return trans->ops->start_fw(trans, fw, run_in_rfkill); 919 } 920 921 static inline int iwl_trans_update_sf(struct iwl_trans *trans, 922 struct iwl_sf_region *st_fwrd_space) 923 { 924 might_sleep(); 925 926 if (trans->ops->update_sf) 927 return trans->ops->update_sf(trans, st_fwrd_space); 928 929 return 0; 930 } 931 932 static inline void _iwl_trans_stop_device(struct iwl_trans *trans, 933 bool low_power) 934 { 935 might_sleep(); 936 937 trans->ops->stop_device(trans, low_power); 938 939 trans->state = IWL_TRANS_NO_FW; 940 } 941 942 static inline void iwl_trans_stop_device(struct iwl_trans *trans) 943 { 944 _iwl_trans_stop_device(trans, true); 945 } 946 947 static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, 948 bool reset) 949 { 950 might_sleep(); 951 if (trans->ops->d3_suspend) 952 trans->ops->d3_suspend(trans, test, reset); 953 } 954 955 static inline int iwl_trans_d3_resume(struct iwl_trans *trans, 956 enum iwl_d3_status *status, 957 bool test, bool reset) 958 { 959 might_sleep(); 960 if (!trans->ops->d3_resume) 961 return 0; 962 963 return trans->ops->d3_resume(trans, status, test, reset); 964 } 965 966 static inline void iwl_trans_ref(struct iwl_trans *trans) 967 { 968 if (trans->ops->ref) 969 trans->ops->ref(trans); 970 } 971 972 static inline void iwl_trans_unref(struct iwl_trans *trans) 973 { 974 if (trans->ops->unref) 975 trans->ops->unref(trans); 976 } 977 978 static inline int iwl_trans_suspend(struct iwl_trans *trans) 979 { 980 if (!trans->ops->suspend) 981 return 0; 982 983 return trans->ops->suspend(trans); 984 } 985 986 static inline void iwl_trans_resume(struct iwl_trans *trans) 987 { 988 if (trans->ops->resume) 989 trans->ops->resume(trans); 990 } 991 992 static inline struct iwl_trans_dump_data * 993 iwl_trans_dump_data(struct iwl_trans *trans, 994 const struct iwl_fw_dbg_trigger_tlv *trigger) 995 { 996 if (!trans->ops->dump_data) 997 return NULL; 998 return trans->ops->dump_data(trans, trigger); 999 } 1000 1001 static inline struct iwl_device_cmd * 1002 iwl_trans_alloc_tx_cmd(struct iwl_trans *trans) 1003 { 1004 u8 *dev_cmd_ptr = kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC); 1005 1006 if (unlikely(dev_cmd_ptr == NULL)) 1007 return NULL; 1008 1009 return (struct iwl_device_cmd *) 1010 (dev_cmd_ptr + trans->dev_cmd_headroom); 1011 } 1012 1013 int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); 1014 1015 static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans, 1016 struct iwl_device_cmd *dev_cmd) 1017 { 1018 u8 *dev_cmd_ptr = (u8 *)dev_cmd - trans->dev_cmd_headroom; 1019 1020 kmem_cache_free(trans->dev_cmd_pool, dev_cmd_ptr); 1021 } 1022 1023 static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, 1024 struct iwl_device_cmd *dev_cmd, int queue) 1025 { 1026 if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) 1027 return -EIO; 1028 1029 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { 1030 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 1031 return -EIO; 1032 } 1033 1034 return trans->ops->tx(trans, skb, dev_cmd, queue); 1035 } 1036 1037 static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue, 1038 int ssn, struct sk_buff_head *skbs) 1039 { 1040 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { 1041 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 1042 return; 1043 } 1044 1045 trans->ops->reclaim(trans, queue, ssn, skbs); 1046 } 1047 1048 static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue, 1049 bool configure_scd) 1050 { 1051 trans->ops->txq_disable(trans, queue, configure_scd); 1052 } 1053 1054 static inline void 1055 iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn, 1056 const struct iwl_trans_txq_scd_cfg *cfg, 1057 unsigned int queue_wdg_timeout) 1058 { 1059 might_sleep(); 1060 1061 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { 1062 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 1063 return; 1064 } 1065 1066 trans->ops->txq_enable(trans, queue, ssn, cfg, queue_wdg_timeout); 1067 } 1068 1069 static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans, 1070 int queue, bool shared_mode) 1071 { 1072 if (trans->ops->txq_set_shared_mode) 1073 trans->ops->txq_set_shared_mode(trans, queue, shared_mode); 1074 } 1075 1076 static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue, 1077 int fifo, int sta_id, int tid, 1078 int frame_limit, u16 ssn, 1079 unsigned int queue_wdg_timeout) 1080 { 1081 struct iwl_trans_txq_scd_cfg cfg = { 1082 .fifo = fifo, 1083 .sta_id = sta_id, 1084 .tid = tid, 1085 .frame_limit = frame_limit, 1086 .aggregate = sta_id >= 0, 1087 }; 1088 1089 iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout); 1090 } 1091 1092 static inline 1093 void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo, 1094 unsigned int queue_wdg_timeout) 1095 { 1096 struct iwl_trans_txq_scd_cfg cfg = { 1097 .fifo = fifo, 1098 .sta_id = -1, 1099 .tid = IWL_MAX_TID_COUNT, 1100 .frame_limit = IWL_FRAME_LIMIT, 1101 .aggregate = false, 1102 }; 1103 1104 iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout); 1105 } 1106 1107 static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans, 1108 unsigned long txqs, 1109 bool freeze) 1110 { 1111 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { 1112 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 1113 return; 1114 } 1115 1116 if (trans->ops->freeze_txq_timer) 1117 trans->ops->freeze_txq_timer(trans, txqs, freeze); 1118 } 1119 1120 static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans, 1121 bool block) 1122 { 1123 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { 1124 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 1125 return; 1126 } 1127 1128 if (trans->ops->block_txq_ptrs) 1129 trans->ops->block_txq_ptrs(trans, block); 1130 } 1131 1132 static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans, 1133 u32 txqs) 1134 { 1135 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { 1136 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 1137 return -EIO; 1138 } 1139 1140 return trans->ops->wait_tx_queue_empty(trans, txqs); 1141 } 1142 1143 static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val) 1144 { 1145 trans->ops->write8(trans, ofs, val); 1146 } 1147 1148 static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val) 1149 { 1150 trans->ops->write32(trans, ofs, val); 1151 } 1152 1153 static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs) 1154 { 1155 return trans->ops->read32(trans, ofs); 1156 } 1157 1158 static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs) 1159 { 1160 return trans->ops->read_prph(trans, ofs); 1161 } 1162 1163 static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs, 1164 u32 val) 1165 { 1166 return trans->ops->write_prph(trans, ofs, val); 1167 } 1168 1169 static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr, 1170 void *buf, int dwords) 1171 { 1172 return trans->ops->read_mem(trans, addr, buf, dwords); 1173 } 1174 1175 #define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \ 1176 do { \ 1177 if (__builtin_constant_p(bufsize)) \ 1178 BUILD_BUG_ON((bufsize) % sizeof(u32)); \ 1179 iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\ 1180 } while (0) 1181 1182 static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr) 1183 { 1184 u32 value; 1185 1186 if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1))) 1187 return 0xa5a5a5a5; 1188 1189 return value; 1190 } 1191 1192 static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr, 1193 const void *buf, int dwords) 1194 { 1195 return trans->ops->write_mem(trans, addr, buf, dwords); 1196 } 1197 1198 static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr, 1199 u32 val) 1200 { 1201 return iwl_trans_write_mem(trans, addr, &val, 1); 1202 } 1203 1204 static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state) 1205 { 1206 if (trans->ops->set_pmi) 1207 trans->ops->set_pmi(trans, state); 1208 } 1209 1210 static inline void 1211 iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value) 1212 { 1213 trans->ops->set_bits_mask(trans, reg, mask, value); 1214 } 1215 1216 #define iwl_trans_grab_nic_access(trans, flags) \ 1217 __cond_lock(nic_access, \ 1218 likely((trans)->ops->grab_nic_access(trans, flags))) 1219 1220 static inline void __releases(nic_access) 1221 iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags) 1222 { 1223 trans->ops->release_nic_access(trans, flags); 1224 __release(nic_access); 1225 } 1226 1227 static inline void iwl_trans_fw_error(struct iwl_trans *trans) 1228 { 1229 if (WARN_ON_ONCE(!trans->op_mode)) 1230 return; 1231 1232 /* prevent double restarts due to the same erroneous FW */ 1233 if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) 1234 iwl_op_mode_nic_error(trans->op_mode); 1235 } 1236 1237 /***************************************************** 1238 * transport helper functions 1239 *****************************************************/ 1240 struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, 1241 struct device *dev, 1242 const struct iwl_cfg *cfg, 1243 const struct iwl_trans_ops *ops, 1244 size_t dev_cmd_headroom); 1245 void iwl_trans_free(struct iwl_trans *trans); 1246 1247 /***************************************************** 1248 * driver (transport) register/unregister functions 1249 ******************************************************/ 1250 int __must_check iwl_pci_register_driver(void); 1251 void iwl_pci_unregister_driver(void); 1252 1253 #endif /* __iwl_trans_h__ */ 1254