1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ 2 /* 3 * Copyright (C) 2005-2014, 2018-2020 Intel Corporation 4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5 * Copyright (C) 2016-2017 Intel Deutschland GmbH 6 */ 7 #ifndef __iwl_trans_h__ 8 #define __iwl_trans_h__ 9 10 #include <linux/ieee80211.h> 11 #include <linux/mm.h> /* for page_address */ 12 #include <linux/lockdep.h> 13 #include <linux/kernel.h> 14 15 #include "iwl-debug.h" 16 #include "iwl-config.h" 17 #include "fw/img.h" 18 #include "iwl-op-mode.h" 19 #include <linux/firmware.h> 20 #include "fw/api/cmdhdr.h" 21 #include "fw/api/txq.h" 22 #include "fw/api/dbg-tlv.h" 23 #include "iwl-dbg-tlv.h" 24 25 /** 26 * DOC: Transport layer - what is it ? 27 * 28 * The transport layer is the layer that deals with the HW directly. It provides 29 * an abstraction of the underlying HW to the upper layer. The transport layer 30 * doesn't provide any policy, algorithm or anything of this kind, but only 31 * mechanisms to make the HW do something. It is not completely stateless but 32 * close to it. 33 * We will have an implementation for each different supported bus. 34 */ 35 36 /** 37 * DOC: Life cycle of the transport layer 38 * 39 * The transport layer has a very precise life cycle. 40 * 41 * 1) A helper function is called during the module initialization and 42 * registers the bus driver's ops with the transport's alloc function. 43 * 2) Bus's probe calls to the transport layer's allocation functions. 44 * Of course this function is bus specific. 45 * 3) This allocation functions will spawn the upper layer which will 46 * register mac80211. 47 * 48 * 4) At some point (i.e. mac80211's start call), the op_mode will call 49 * the following sequence: 50 * start_hw 51 * start_fw 52 * 53 * 5) Then when finished (or reset): 54 * stop_device 55 * 56 * 6) Eventually, the free function will be called. 57 */ 58 59 #define IWL_TRANS_FW_DBG_DOMAIN(trans) IWL_FW_INI_DOMAIN_ALWAYS_ON 60 61 #define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */ 62 #define FH_RSCSR_FRAME_INVALID 0x55550000 63 #define FH_RSCSR_FRAME_ALIGN 0x40 64 #define FH_RSCSR_RPA_EN BIT(25) 65 #define FH_RSCSR_RADA_EN BIT(26) 66 #define FH_RSCSR_RXQ_POS 16 67 #define FH_RSCSR_RXQ_MASK 0x3F0000 68 69 struct iwl_rx_packet { 70 /* 71 * The first 4 bytes of the RX frame header contain both the RX frame 72 * size and some flags. 73 * Bit fields: 74 * 31: flag flush RB request 75 * 30: flag ignore TC (terminal counter) request 76 * 29: flag fast IRQ request 77 * 28-27: Reserved 78 * 26: RADA enabled 79 * 25: Offload enabled 80 * 24: RPF enabled 81 * 23: RSS enabled 82 * 22: Checksum enabled 83 * 21-16: RX queue 84 * 15-14: Reserved 85 * 13-00: RX frame size 86 */ 87 __le32 len_n_flags; 88 struct iwl_cmd_header hdr; 89 u8 data[]; 90 } __packed; 91 92 static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt) 93 { 94 return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; 95 } 96 97 static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt) 98 { 99 return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr); 100 } 101 102 /** 103 * enum CMD_MODE - how to send the host commands ? 104 * 105 * @CMD_ASYNC: Return right away and don't wait for the response 106 * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of 107 * the response. The caller needs to call iwl_free_resp when done. 108 * @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be 109 * called after this command completes. Valid only with CMD_ASYNC. 110 */ 111 enum CMD_MODE { 112 CMD_ASYNC = BIT(0), 113 CMD_WANT_SKB = BIT(1), 114 CMD_SEND_IN_RFKILL = BIT(2), 115 CMD_WANT_ASYNC_CALLBACK = BIT(3), 116 }; 117 118 #define DEF_CMD_PAYLOAD_SIZE 320 119 120 /** 121 * struct iwl_device_cmd 122 * 123 * For allocation of the command and tx queues, this establishes the overall 124 * size of the largest command we send to uCode, except for commands that 125 * aren't fully copied and use other TFD space. 126 */ 127 struct iwl_device_cmd { 128 union { 129 struct { 130 struct iwl_cmd_header hdr; /* uCode API */ 131 u8 payload[DEF_CMD_PAYLOAD_SIZE]; 132 }; 133 struct { 134 struct iwl_cmd_header_wide hdr_wide; 135 u8 payload_wide[DEF_CMD_PAYLOAD_SIZE - 136 sizeof(struct iwl_cmd_header_wide) + 137 sizeof(struct iwl_cmd_header)]; 138 }; 139 }; 140 } __packed; 141 142 /** 143 * struct iwl_device_tx_cmd - buffer for TX command 144 * @hdr: the header 145 * @payload: the payload placeholder 146 * 147 * The actual structure is sized dynamically according to need. 148 */ 149 struct iwl_device_tx_cmd { 150 struct iwl_cmd_header hdr; 151 u8 payload[]; 152 } __packed; 153 154 #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd)) 155 156 /* 157 * number of transfer buffers (fragments) per transmit frame descriptor; 158 * this is just the driver's idea, the hardware supports 20 159 */ 160 #define IWL_MAX_CMD_TBS_PER_TFD 2 161 162 /* We need 2 entries for the TX command and header, and another one might 163 * be needed for potential data in the SKB's head. The remaining ones can 164 * be used for frags. 165 */ 166 #define IWL_TRANS_MAX_FRAGS(trans) ((trans)->txqs.tfd.max_tbs - 3) 167 168 /** 169 * enum iwl_hcmd_dataflag - flag for each one of the chunks of the command 170 * 171 * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's 172 * ring. The transport layer doesn't map the command's buffer to DMA, but 173 * rather copies it to a previously allocated DMA buffer. This flag tells 174 * the transport layer not to copy the command, but to map the existing 175 * buffer (that is passed in) instead. This saves the memcpy and allows 176 * commands that are bigger than the fixed buffer to be submitted. 177 * Note that a TFD entry after a NOCOPY one cannot be a normal copied one. 178 * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this 179 * chunk internally and free it again after the command completes. This 180 * can (currently) be used only once per command. 181 * Note that a TFD entry after a DUP one cannot be a normal copied one. 182 */ 183 enum iwl_hcmd_dataflag { 184 IWL_HCMD_DFL_NOCOPY = BIT(0), 185 IWL_HCMD_DFL_DUP = BIT(1), 186 }; 187 188 enum iwl_error_event_table_status { 189 IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0), 190 IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1), 191 IWL_ERROR_EVENT_TABLE_UMAC = BIT(2), 192 }; 193 194 /** 195 * struct iwl_host_cmd - Host command to the uCode 196 * 197 * @data: array of chunks that composes the data of the host command 198 * @resp_pkt: response packet, if %CMD_WANT_SKB was set 199 * @_rx_page_order: (internally used to free response packet) 200 * @_rx_page_addr: (internally used to free response packet) 201 * @flags: can be CMD_* 202 * @len: array of the lengths of the chunks in data 203 * @dataflags: IWL_HCMD_DFL_* 204 * @id: command id of the host command, for wide commands encoding the 205 * version and group as well 206 */ 207 struct iwl_host_cmd { 208 const void *data[IWL_MAX_CMD_TBS_PER_TFD]; 209 struct iwl_rx_packet *resp_pkt; 210 unsigned long _rx_page_addr; 211 u32 _rx_page_order; 212 213 u32 flags; 214 u32 id; 215 u16 len[IWL_MAX_CMD_TBS_PER_TFD]; 216 u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD]; 217 }; 218 219 static inline void iwl_free_resp(struct iwl_host_cmd *cmd) 220 { 221 free_pages(cmd->_rx_page_addr, cmd->_rx_page_order); 222 } 223 224 struct iwl_rx_cmd_buffer { 225 struct page *_page; 226 int _offset; 227 bool _page_stolen; 228 u32 _rx_page_order; 229 unsigned int truesize; 230 }; 231 232 static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r) 233 { 234 return (void *)((unsigned long)page_address(r->_page) + r->_offset); 235 } 236 237 static inline int rxb_offset(struct iwl_rx_cmd_buffer *r) 238 { 239 return r->_offset; 240 } 241 242 static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r) 243 { 244 r->_page_stolen = true; 245 get_page(r->_page); 246 return r->_page; 247 } 248 249 static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r) 250 { 251 __free_pages(r->_page, r->_rx_page_order); 252 } 253 254 #define MAX_NO_RECLAIM_CMDS 6 255 256 #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo)))) 257 258 /* 259 * Maximum number of HW queues the transport layer 260 * currently supports 261 */ 262 #define IWL_MAX_HW_QUEUES 32 263 #define IWL_MAX_TVQM_QUEUES 512 264 265 #define IWL_MAX_TID_COUNT 8 266 #define IWL_MGMT_TID 15 267 #define IWL_FRAME_LIMIT 64 268 #define IWL_MAX_RX_HW_QUEUES 16 269 #define IWL_9000_MAX_RX_HW_QUEUES 6 270 271 /** 272 * enum iwl_wowlan_status - WoWLAN image/device status 273 * @IWL_D3_STATUS_ALIVE: firmware is still running after resume 274 * @IWL_D3_STATUS_RESET: device was reset while suspended 275 */ 276 enum iwl_d3_status { 277 IWL_D3_STATUS_ALIVE, 278 IWL_D3_STATUS_RESET, 279 }; 280 281 /** 282 * enum iwl_trans_status: transport status flags 283 * @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed 284 * @STATUS_DEVICE_ENABLED: APM is enabled 285 * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up) 286 * @STATUS_INT_ENABLED: interrupts are enabled 287 * @STATUS_RFKILL_HW: the actual HW state of the RF-kill switch 288 * @STATUS_RFKILL_OPMODE: RF-kill state reported to opmode 289 * @STATUS_FW_ERROR: the fw is in error state 290 * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands 291 * are sent 292 * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent 293 * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation 294 */ 295 enum iwl_trans_status { 296 STATUS_SYNC_HCMD_ACTIVE, 297 STATUS_DEVICE_ENABLED, 298 STATUS_TPOWER_PMI, 299 STATUS_INT_ENABLED, 300 STATUS_RFKILL_HW, 301 STATUS_RFKILL_OPMODE, 302 STATUS_FW_ERROR, 303 STATUS_TRANS_GOING_IDLE, 304 STATUS_TRANS_IDLE, 305 STATUS_TRANS_DEAD, 306 }; 307 308 static inline int 309 iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size) 310 { 311 switch (rb_size) { 312 case IWL_AMSDU_2K: 313 return get_order(2 * 1024); 314 case IWL_AMSDU_4K: 315 return get_order(4 * 1024); 316 case IWL_AMSDU_8K: 317 return get_order(8 * 1024); 318 case IWL_AMSDU_12K: 319 return get_order(16 * 1024); 320 default: 321 WARN_ON(1); 322 return -1; 323 } 324 } 325 326 static inline int 327 iwl_trans_get_rb_size(enum iwl_amsdu_size rb_size) 328 { 329 switch (rb_size) { 330 case IWL_AMSDU_2K: 331 return 2 * 1024; 332 case IWL_AMSDU_4K: 333 return 4 * 1024; 334 case IWL_AMSDU_8K: 335 return 8 * 1024; 336 case IWL_AMSDU_12K: 337 return 16 * 1024; 338 default: 339 WARN_ON(1); 340 return 0; 341 } 342 } 343 344 struct iwl_hcmd_names { 345 u8 cmd_id; 346 const char *const cmd_name; 347 }; 348 349 #define HCMD_NAME(x) \ 350 { .cmd_id = x, .cmd_name = #x } 351 352 struct iwl_hcmd_arr { 353 const struct iwl_hcmd_names *arr; 354 int size; 355 }; 356 357 #define HCMD_ARR(x) \ 358 { .arr = x, .size = ARRAY_SIZE(x) } 359 360 /** 361 * struct iwl_trans_config - transport configuration 362 * 363 * @op_mode: pointer to the upper layer. 364 * @cmd_queue: the index of the command queue. 365 * Must be set before start_fw. 366 * @cmd_fifo: the fifo for host commands 367 * @cmd_q_wdg_timeout: the timeout of the watchdog timer for the command queue. 368 * @no_reclaim_cmds: Some devices erroneously don't set the 369 * SEQ_RX_FRAME bit on some notifications, this is the 370 * list of such notifications to filter. Max length is 371 * %MAX_NO_RECLAIM_CMDS. 372 * @n_no_reclaim_cmds: # of commands in list 373 * @rx_buf_size: RX buffer size needed for A-MSDUs 374 * if unset 4k will be the RX buffer size 375 * @bc_table_dword: set to true if the BC table expects the byte count to be 376 * in DWORD (as opposed to bytes) 377 * @scd_set_active: should the transport configure the SCD for HCMD queue 378 * @command_groups: array of command groups, each member is an array of the 379 * commands in the group; for debugging only 380 * @command_groups_size: number of command groups, to avoid illegal access 381 * @cb_data_offs: offset inside skb->cb to store transport data at, must have 382 * space for at least two pointers 383 * @fw_reset_handshake: firmware supports reset flow handshake 384 */ 385 struct iwl_trans_config { 386 struct iwl_op_mode *op_mode; 387 388 u8 cmd_queue; 389 u8 cmd_fifo; 390 unsigned int cmd_q_wdg_timeout; 391 const u8 *no_reclaim_cmds; 392 unsigned int n_no_reclaim_cmds; 393 394 enum iwl_amsdu_size rx_buf_size; 395 bool bc_table_dword; 396 bool scd_set_active; 397 const struct iwl_hcmd_arr *command_groups; 398 int command_groups_size; 399 400 u8 cb_data_offs; 401 bool fw_reset_handshake; 402 }; 403 404 struct iwl_trans_dump_data { 405 u32 len; 406 u8 data[]; 407 }; 408 409 struct iwl_trans; 410 411 struct iwl_trans_txq_scd_cfg { 412 u8 fifo; 413 u8 sta_id; 414 u8 tid; 415 bool aggregate; 416 int frame_limit; 417 }; 418 419 /** 420 * struct iwl_trans_rxq_dma_data - RX queue DMA data 421 * @fr_bd_cb: DMA address of free BD cyclic buffer 422 * @fr_bd_wid: Initial write index of the free BD cyclic buffer 423 * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr 424 * @ur_bd_cb: DMA address of used BD cyclic buffer 425 */ 426 struct iwl_trans_rxq_dma_data { 427 u64 fr_bd_cb; 428 u32 fr_bd_wid; 429 u64 urbd_stts_wrptr; 430 u64 ur_bd_cb; 431 }; 432 433 /** 434 * struct iwl_trans_ops - transport specific operations 435 * 436 * All the handlers MUST be implemented 437 * 438 * @start_hw: starts the HW. From that point on, the HW can send interrupts. 439 * May sleep. 440 * @op_mode_leave: Turn off the HW RF kill indication if on 441 * May sleep 442 * @start_fw: allocates and inits all the resources for the transport 443 * layer. Also kick a fw image. 444 * May sleep 445 * @fw_alive: called when the fw sends alive notification. If the fw provides 446 * the SCD base address in SRAM, then provide it here, or 0 otherwise. 447 * May sleep 448 * @stop_device: stops the whole device (embedded CPU put to reset) and stops 449 * the HW. From that point on, the HW will be stopped but will still issue 450 * an interrupt if the HW RF kill switch is triggered. 451 * This callback must do the right thing and not crash even if %start_hw() 452 * was called but not &start_fw(). May sleep. 453 * @d3_suspend: put the device into the correct mode for WoWLAN during 454 * suspend. This is optional, if not implemented WoWLAN will not be 455 * supported. This callback may sleep. 456 * @d3_resume: resume the device after WoWLAN, enabling the opmode to 457 * talk to the WoWLAN image to get its status. This is optional, if not 458 * implemented WoWLAN will not be supported. This callback may sleep. 459 * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted. 460 * If RFkill is asserted in the middle of a SYNC host command, it must 461 * return -ERFKILL straight away. 462 * May sleep only if CMD_ASYNC is not set 463 * @tx: send an skb. The transport relies on the op_mode to zero the 464 * the ieee80211_tx_info->driver_data. If the MPDU is an A-MSDU, all 465 * the CSUM will be taken care of (TCP CSUM and IP header in case of 466 * IPv4). If the MPDU is a single MSDU, the op_mode must compute the IP 467 * header if it is IPv4. 468 * Must be atomic 469 * @reclaim: free packet until ssn. Returns a list of freed packets. 470 * Must be atomic 471 * @txq_enable: setup a queue. To setup an AC queue, use the 472 * iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before 473 * this one. The op_mode must not configure the HCMD queue. The scheduler 474 * configuration may be %NULL, in which case the hardware will not be 475 * configured. If true is returned, the operation mode needs to increment 476 * the sequence number of the packets routed to this queue because of a 477 * hardware scheduler bug. May sleep. 478 * @txq_disable: de-configure a Tx queue to send AMPDUs 479 * Must be atomic 480 * @txq_set_shared_mode: change Tx queue shared/unshared marking 481 * @wait_tx_queues_empty: wait until tx queues are empty. May sleep. 482 * @wait_txq_empty: wait until specific tx queue is empty. May sleep. 483 * @freeze_txq_timer: prevents the timer of the queue from firing until the 484 * queue is set to awake. Must be atomic. 485 * @block_txq_ptrs: stop updating the write pointers of the Tx queues. Note 486 * that the transport needs to refcount the calls since this function 487 * will be called several times with block = true, and then the queues 488 * need to be unblocked only after the same number of calls with 489 * block = false. 490 * @write8: write a u8 to a register at offset ofs from the BAR 491 * @write32: write a u32 to a register at offset ofs from the BAR 492 * @read32: read a u32 register at offset ofs from the BAR 493 * @read_prph: read a DWORD from a periphery register 494 * @write_prph: write a DWORD to a periphery register 495 * @read_mem: read device's SRAM in DWORD 496 * @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory 497 * will be zeroed. 498 * @read_config32: read a u32 value from the device's config space at 499 * the given offset. 500 * @configure: configure parameters required by the transport layer from 501 * the op_mode. May be called several times before start_fw, can't be 502 * called after that. 503 * @set_pmi: set the power pmi state 504 * @grab_nic_access: wake the NIC to be able to access non-HBUS regs. 505 * Sleeping is not allowed between grab_nic_access and 506 * release_nic_access. 507 * @release_nic_access: let the NIC go to sleep. The "flags" parameter 508 * must be the same one that was sent before to the grab_nic_access. 509 * @set_bits_mask - set SRAM register according to value and mask. 510 * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last 511 * TX'ed commands and similar. The buffer will be vfree'd by the caller. 512 * Note that the transport must fill in the proper file headers. 513 * @debugfs_cleanup: used in the driver unload flow to make a proper cleanup 514 * of the trans debugfs 515 * @set_pnvm: set the pnvm data in the prph scratch buffer, inside the 516 * context info. 517 */ 518 struct iwl_trans_ops { 519 520 int (*start_hw)(struct iwl_trans *iwl_trans); 521 void (*op_mode_leave)(struct iwl_trans *iwl_trans); 522 int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw, 523 bool run_in_rfkill); 524 void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr); 525 void (*stop_device)(struct iwl_trans *trans); 526 527 int (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset); 528 int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status, 529 bool test, bool reset); 530 531 int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd); 532 533 int (*tx)(struct iwl_trans *trans, struct sk_buff *skb, 534 struct iwl_device_tx_cmd *dev_cmd, int queue); 535 void (*reclaim)(struct iwl_trans *trans, int queue, int ssn, 536 struct sk_buff_head *skbs); 537 538 void (*set_q_ptrs)(struct iwl_trans *trans, int queue, int ptr); 539 540 bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn, 541 const struct iwl_trans_txq_scd_cfg *cfg, 542 unsigned int queue_wdg_timeout); 543 void (*txq_disable)(struct iwl_trans *trans, int queue, 544 bool configure_scd); 545 /* 22000 functions */ 546 int (*txq_alloc)(struct iwl_trans *trans, 547 __le16 flags, u8 sta_id, u8 tid, 548 int cmd_id, int size, 549 unsigned int queue_wdg_timeout); 550 void (*txq_free)(struct iwl_trans *trans, int queue); 551 int (*rxq_dma_data)(struct iwl_trans *trans, int queue, 552 struct iwl_trans_rxq_dma_data *data); 553 554 void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id, 555 bool shared); 556 557 int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm); 558 int (*wait_txq_empty)(struct iwl_trans *trans, int queue); 559 void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs, 560 bool freeze); 561 void (*block_txq_ptrs)(struct iwl_trans *trans, bool block); 562 563 void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val); 564 void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val); 565 u32 (*read32)(struct iwl_trans *trans, u32 ofs); 566 u32 (*read_prph)(struct iwl_trans *trans, u32 ofs); 567 void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val); 568 int (*read_mem)(struct iwl_trans *trans, u32 addr, 569 void *buf, int dwords); 570 int (*write_mem)(struct iwl_trans *trans, u32 addr, 571 const void *buf, int dwords); 572 int (*read_config32)(struct iwl_trans *trans, u32 ofs, u32 *val); 573 void (*configure)(struct iwl_trans *trans, 574 const struct iwl_trans_config *trans_cfg); 575 void (*set_pmi)(struct iwl_trans *trans, bool state); 576 void (*sw_reset)(struct iwl_trans *trans); 577 bool (*grab_nic_access)(struct iwl_trans *trans, unsigned long *flags); 578 void (*release_nic_access)(struct iwl_trans *trans, 579 unsigned long *flags); 580 void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask, 581 u32 value); 582 int (*suspend)(struct iwl_trans *trans); 583 void (*resume)(struct iwl_trans *trans); 584 585 struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans, 586 u32 dump_mask); 587 void (*debugfs_cleanup)(struct iwl_trans *trans); 588 void (*sync_nmi)(struct iwl_trans *trans); 589 int (*set_pnvm)(struct iwl_trans *trans, const void *data, u32 len); 590 }; 591 592 /** 593 * enum iwl_trans_state - state of the transport layer 594 * 595 * @IWL_TRANS_NO_FW: firmware wasn't started yet, or crashed 596 * @IWL_TRANS_FW_STARTED: FW was started, but not alive yet 597 * @IWL_TRANS_FW_ALIVE: FW has sent an alive response 598 */ 599 enum iwl_trans_state { 600 IWL_TRANS_NO_FW, 601 IWL_TRANS_FW_STARTED, 602 IWL_TRANS_FW_ALIVE, 603 }; 604 605 /** 606 * DOC: Platform power management 607 * 608 * In system-wide power management the entire platform goes into a low 609 * power state (e.g. idle or suspend to RAM) at the same time and the 610 * device is configured as a wakeup source for the entire platform. 611 * This is usually triggered by userspace activity (e.g. the user 612 * presses the suspend button or a power management daemon decides to 613 * put the platform in low power mode). The device's behavior in this 614 * mode is dictated by the wake-on-WLAN configuration. 615 * 616 * The terms used for the device's behavior are as follows: 617 * 618 * - D0: the device is fully powered and the host is awake; 619 * - D3: the device is in low power mode and only reacts to 620 * specific events (e.g. magic-packet received or scan 621 * results found); 622 * 623 * These terms reflect the power modes in the firmware and are not to 624 * be confused with the physical device power state. 625 */ 626 627 /** 628 * enum iwl_plat_pm_mode - platform power management mode 629 * 630 * This enumeration describes the device's platform power management 631 * behavior when in system-wide suspend (i.e WoWLAN). 632 * 633 * @IWL_PLAT_PM_MODE_DISABLED: power management is disabled for this 634 * device. In system-wide suspend mode, it means that the all 635 * connections will be closed automatically by mac80211 before 636 * the platform is suspended. 637 * @IWL_PLAT_PM_MODE_D3: the device goes into D3 mode (i.e. WoWLAN). 638 */ 639 enum iwl_plat_pm_mode { 640 IWL_PLAT_PM_MODE_DISABLED, 641 IWL_PLAT_PM_MODE_D3, 642 }; 643 644 /** 645 * enum iwl_ini_cfg_state 646 * @IWL_INI_CFG_STATE_NOT_LOADED: no debug cfg was given 647 * @IWL_INI_CFG_STATE_LOADED: debug cfg was found and loaded 648 * @IWL_INI_CFG_STATE_CORRUPTED: debug cfg was found and some of the TLVs 649 * are corrupted. The rest of the debug TLVs will still be used 650 */ 651 enum iwl_ini_cfg_state { 652 IWL_INI_CFG_STATE_NOT_LOADED, 653 IWL_INI_CFG_STATE_LOADED, 654 IWL_INI_CFG_STATE_CORRUPTED, 655 }; 656 657 /* Max time to wait for nmi interrupt */ 658 #define IWL_TRANS_NMI_TIMEOUT (HZ / 4) 659 660 /** 661 * struct iwl_dram_data 662 * @physical: page phy pointer 663 * @block: pointer to the allocated block/page 664 * @size: size of the block/page 665 */ 666 struct iwl_dram_data { 667 dma_addr_t physical; 668 void *block; 669 int size; 670 }; 671 672 /** 673 * struct iwl_fw_mon - fw monitor per allocation id 674 * @num_frags: number of fragments 675 * @frags: an array of DRAM buffer fragments 676 */ 677 struct iwl_fw_mon { 678 u32 num_frags; 679 struct iwl_dram_data *frags; 680 }; 681 682 /** 683 * struct iwl_self_init_dram - dram data used by self init process 684 * @fw: lmac and umac dram data 685 * @fw_cnt: total number of items in array 686 * @paging: paging dram data 687 * @paging_cnt: total number of items in array 688 */ 689 struct iwl_self_init_dram { 690 struct iwl_dram_data *fw; 691 int fw_cnt; 692 struct iwl_dram_data *paging; 693 int paging_cnt; 694 }; 695 696 /** 697 * struct iwl_trans_debug - transport debug related data 698 * 699 * @n_dest_reg: num of reg_ops in %dbg_dest_tlv 700 * @rec_on: true iff there is a fw debug recording currently active 701 * @dest_tlv: points to the destination TLV for debug 702 * @conf_tlv: array of pointers to configuration TLVs for debug 703 * @trigger_tlv: array of pointers to triggers TLVs for debug 704 * @lmac_error_event_table: addrs of lmacs error tables 705 * @umac_error_event_table: addr of umac error table 706 * @error_event_table_tlv_status: bitmap that indicates what error table 707 * pointers was recevied via TLV. uses enum &iwl_error_event_table_status 708 * @internal_ini_cfg: internal debug cfg state. Uses &enum iwl_ini_cfg_state 709 * @external_ini_cfg: external debug cfg state. Uses &enum iwl_ini_cfg_state 710 * @fw_mon_cfg: debug buffer allocation configuration 711 * @fw_mon_ini: DRAM buffer fragments per allocation id 712 * @fw_mon: DRAM buffer for firmware monitor 713 * @hw_error: equals true if hw error interrupt was received from the FW 714 * @ini_dest: debug monitor destination uses &enum iwl_fw_ini_buffer_location 715 * @active_regions: active regions 716 * @debug_info_tlv_list: list of debug info TLVs 717 * @time_point: array of debug time points 718 * @periodic_trig_list: periodic triggers list 719 * @domains_bitmap: bitmap of active domains other than 720 * &IWL_FW_INI_DOMAIN_ALWAYS_ON 721 */ 722 struct iwl_trans_debug { 723 u8 n_dest_reg; 724 bool rec_on; 725 726 const struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv; 727 const struct iwl_fw_dbg_conf_tlv *conf_tlv[FW_DBG_CONF_MAX]; 728 struct iwl_fw_dbg_trigger_tlv * const *trigger_tlv; 729 730 u32 lmac_error_event_table[2]; 731 u32 umac_error_event_table; 732 unsigned int error_event_table_tlv_status; 733 734 enum iwl_ini_cfg_state internal_ini_cfg; 735 enum iwl_ini_cfg_state external_ini_cfg; 736 737 struct iwl_fw_ini_allocation_tlv fw_mon_cfg[IWL_FW_INI_ALLOCATION_NUM]; 738 struct iwl_fw_mon fw_mon_ini[IWL_FW_INI_ALLOCATION_NUM]; 739 740 struct iwl_dram_data fw_mon; 741 742 bool hw_error; 743 enum iwl_fw_ini_buffer_location ini_dest; 744 745 struct iwl_ucode_tlv *active_regions[IWL_FW_INI_MAX_REGION_ID]; 746 struct list_head debug_info_tlv_list; 747 struct iwl_dbg_tlv_time_point_data 748 time_point[IWL_FW_INI_TIME_POINT_NUM]; 749 struct list_head periodic_trig_list; 750 751 u32 domains_bitmap; 752 }; 753 754 struct iwl_dma_ptr { 755 dma_addr_t dma; 756 void *addr; 757 size_t size; 758 }; 759 760 struct iwl_cmd_meta { 761 /* only for SYNC commands, iff the reply skb is wanted */ 762 struct iwl_host_cmd *source; 763 u32 flags; 764 u32 tbs; 765 }; 766 767 /* 768 * The FH will write back to the first TB only, so we need to copy some data 769 * into the buffer regardless of whether it should be mapped or not. 770 * This indicates how big the first TB must be to include the scratch buffer 771 * and the assigned PN. 772 * Since PN location is 8 bytes at offset 12, it's 20 now. 773 * If we make it bigger then allocations will be bigger and copy slower, so 774 * that's probably not useful. 775 */ 776 #define IWL_FIRST_TB_SIZE 20 777 #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64) 778 779 struct iwl_pcie_txq_entry { 780 void *cmd; 781 struct sk_buff *skb; 782 /* buffer to free after command completes */ 783 const void *free_buf; 784 struct iwl_cmd_meta meta; 785 }; 786 787 struct iwl_pcie_first_tb_buf { 788 u8 buf[IWL_FIRST_TB_SIZE_ALIGN]; 789 }; 790 791 /** 792 * struct iwl_txq - Tx Queue for DMA 793 * @q: generic Rx/Tx queue descriptor 794 * @tfds: transmit frame descriptors (DMA memory) 795 * @first_tb_bufs: start of command headers, including scratch buffers, for 796 * the writeback -- this is DMA memory and an array holding one buffer 797 * for each command on the queue 798 * @first_tb_dma: DMA address for the first_tb_bufs start 799 * @entries: transmit entries (driver state) 800 * @lock: queue lock 801 * @stuck_timer: timer that fires if queue gets stuck 802 * @trans: pointer back to transport (for timer) 803 * @need_update: indicates need to update read/write index 804 * @ampdu: true if this queue is an ampdu queue for an specific RA/TID 805 * @wd_timeout: queue watchdog timeout (jiffies) - per queue 806 * @frozen: tx stuck queue timer is frozen 807 * @frozen_expiry_remainder: remember how long until the timer fires 808 * @bc_tbl: byte count table of the queue (relevant only for gen2 transport) 809 * @write_ptr: 1-st empty entry (index) host_w 810 * @read_ptr: last used entry (index) host_r 811 * @dma_addr: physical addr for BD's 812 * @n_window: safe queue window 813 * @id: queue id 814 * @low_mark: low watermark, resume queue if free space more than this 815 * @high_mark: high watermark, stop queue if free space less than this 816 * 817 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame 818 * descriptors) and required locking structures. 819 * 820 * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware 821 * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless 822 * there might be HW changes in the future). For the normal TX 823 * queues, n_window, which is the size of the software queue data 824 * is also 256; however, for the command queue, n_window is only 825 * 32 since we don't need so many commands pending. Since the HW 826 * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. 827 * This means that we end up with the following: 828 * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 | 829 * SW entries: | 0 | ... | 31 | 830 * where N is a number between 0 and 7. This means that the SW 831 * data is a window overlayed over the HW queue. 832 */ 833 struct iwl_txq { 834 void *tfds; 835 struct iwl_pcie_first_tb_buf *first_tb_bufs; 836 dma_addr_t first_tb_dma; 837 struct iwl_pcie_txq_entry *entries; 838 /* lock for syncing changes on the queue */ 839 spinlock_t lock; 840 unsigned long frozen_expiry_remainder; 841 struct timer_list stuck_timer; 842 struct iwl_trans *trans; 843 bool need_update; 844 bool frozen; 845 bool ampdu; 846 int block; 847 unsigned long wd_timeout; 848 struct sk_buff_head overflow_q; 849 struct iwl_dma_ptr bc_tbl; 850 851 int write_ptr; 852 int read_ptr; 853 dma_addr_t dma_addr; 854 int n_window; 855 u32 id; 856 int low_mark; 857 int high_mark; 858 859 bool overflow_tx; 860 }; 861 862 /** 863 * struct iwl_trans_txqs - transport tx queues data 864 * 865 * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) 866 * @page_offs: offset from skb->cb to mac header page pointer 867 * @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer 868 * @queue_used - bit mask of used queues 869 * @queue_stopped - bit mask of stopped queues 870 * @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler 871 */ 872 struct iwl_trans_txqs { 873 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; 874 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; 875 struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES]; 876 struct dma_pool *bc_pool; 877 size_t bc_tbl_size; 878 bool bc_table_dword; 879 u8 page_offs; 880 u8 dev_cmd_offs; 881 struct __percpu iwl_tso_hdr_page * tso_hdr_page; 882 883 struct { 884 u8 fifo; 885 u8 q_id; 886 unsigned int wdg_timeout; 887 } cmd; 888 889 struct { 890 u8 max_tbs; 891 u16 size; 892 u8 addr_size; 893 } tfd; 894 895 struct iwl_dma_ptr scd_bc_tbls; 896 }; 897 898 /** 899 * struct iwl_trans - transport common data 900 * 901 * @ops - pointer to iwl_trans_ops 902 * @op_mode - pointer to the op_mode 903 * @trans_cfg: the trans-specific configuration part 904 * @cfg - pointer to the configuration 905 * @drv - pointer to iwl_drv 906 * @status: a bit-mask of transport status flags 907 * @dev - pointer to struct device * that represents the device 908 * @max_skb_frags: maximum number of fragments an SKB can have when transmitted. 909 * 0 indicates that frag SKBs (NETIF_F_SG) aren't supported. 910 * @hw_rf_id a u32 with the device RF ID 911 * @hw_id: a u32 with the ID of the device / sub-device. 912 * Set during transport allocation. 913 * @hw_id_str: a string with info about HW ID. Set during transport allocation. 914 * @pm_support: set to true in start_hw if link pm is supported 915 * @ltr_enabled: set to true if the LTR is enabled 916 * @wide_cmd_header: true when ucode supports wide command header format 917 * @num_rx_queues: number of RX queues allocated by the transport; 918 * the transport must set this before calling iwl_drv_start() 919 * @iml_len: the length of the image loader 920 * @iml: a pointer to the image loader itself 921 * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only. 922 * The user should use iwl_trans_{alloc,free}_tx_cmd. 923 * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before 924 * starting the firmware, used for tracing 925 * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the 926 * start of the 802.11 header in the @rx_mpdu_cmd 927 * @dflt_pwr_limit: default power limit fetched from the platform (ACPI) 928 * @system_pm_mode: the system-wide power management mode in use. 929 * This mode is set dynamically, depending on the WoWLAN values 930 * configured from the userspace at runtime. 931 * @iwl_trans_txqs: transport tx queues data. 932 */ 933 struct iwl_trans { 934 const struct iwl_trans_ops *ops; 935 struct iwl_op_mode *op_mode; 936 const struct iwl_cfg_trans_params *trans_cfg; 937 const struct iwl_cfg *cfg; 938 struct iwl_drv *drv; 939 enum iwl_trans_state state; 940 unsigned long status; 941 942 struct device *dev; 943 u32 max_skb_frags; 944 u32 hw_rev; 945 u32 hw_rf_id; 946 u32 hw_id; 947 char hw_id_str[52]; 948 u32 sku_id[3]; 949 950 u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size; 951 952 bool pm_support; 953 bool ltr_enabled; 954 u8 pnvm_loaded:1; 955 956 const struct iwl_hcmd_arr *command_groups; 957 int command_groups_size; 958 bool wide_cmd_header; 959 960 u8 num_rx_queues; 961 962 size_t iml_len; 963 u8 *iml; 964 965 /* The following fields are internal only */ 966 struct kmem_cache *dev_cmd_pool; 967 char dev_cmd_pool_name[50]; 968 969 struct dentry *dbgfs_dir; 970 971 #ifdef CONFIG_LOCKDEP 972 struct lockdep_map sync_cmd_lockdep_map; 973 #endif 974 975 struct iwl_trans_debug dbg; 976 struct iwl_self_init_dram init_dram; 977 978 enum iwl_plat_pm_mode system_pm_mode; 979 980 const char *name; 981 struct iwl_trans_txqs txqs; 982 983 /* pointer to trans specific struct */ 984 /*Ensure that this pointer will always be aligned to sizeof pointer */ 985 char trans_specific[] __aligned(sizeof(void *)); 986 }; 987 988 const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id); 989 int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans); 990 991 static inline void iwl_trans_configure(struct iwl_trans *trans, 992 const struct iwl_trans_config *trans_cfg) 993 { 994 trans->op_mode = trans_cfg->op_mode; 995 996 trans->ops->configure(trans, trans_cfg); 997 WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg)); 998 } 999 1000 static inline int iwl_trans_start_hw(struct iwl_trans *trans) 1001 { 1002 might_sleep(); 1003 1004 return trans->ops->start_hw(trans); 1005 } 1006 1007 static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans) 1008 { 1009 might_sleep(); 1010 1011 if (trans->ops->op_mode_leave) 1012 trans->ops->op_mode_leave(trans); 1013 1014 trans->op_mode = NULL; 1015 1016 trans->state = IWL_TRANS_NO_FW; 1017 } 1018 1019 static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr) 1020 { 1021 might_sleep(); 1022 1023 trans->state = IWL_TRANS_FW_ALIVE; 1024 1025 trans->ops->fw_alive(trans, scd_addr); 1026 } 1027 1028 static inline int iwl_trans_start_fw(struct iwl_trans *trans, 1029 const struct fw_img *fw, 1030 bool run_in_rfkill) 1031 { 1032 int ret; 1033 1034 might_sleep(); 1035 1036 WARN_ON_ONCE(!trans->rx_mpdu_cmd); 1037 1038 clear_bit(STATUS_FW_ERROR, &trans->status); 1039 ret = trans->ops->start_fw(trans, fw, run_in_rfkill); 1040 if (ret == 0) 1041 trans->state = IWL_TRANS_FW_STARTED; 1042 1043 return ret; 1044 } 1045 1046 static inline void iwl_trans_stop_device(struct iwl_trans *trans) 1047 { 1048 might_sleep(); 1049 1050 trans->ops->stop_device(trans); 1051 1052 trans->state = IWL_TRANS_NO_FW; 1053 } 1054 1055 static inline int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, 1056 bool reset) 1057 { 1058 might_sleep(); 1059 if (!trans->ops->d3_suspend) 1060 return 0; 1061 1062 return trans->ops->d3_suspend(trans, test, reset); 1063 } 1064 1065 static inline int iwl_trans_d3_resume(struct iwl_trans *trans, 1066 enum iwl_d3_status *status, 1067 bool test, bool reset) 1068 { 1069 might_sleep(); 1070 if (!trans->ops->d3_resume) 1071 return 0; 1072 1073 return trans->ops->d3_resume(trans, status, test, reset); 1074 } 1075 1076 static inline int iwl_trans_suspend(struct iwl_trans *trans) 1077 { 1078 if (!trans->ops->suspend) 1079 return 0; 1080 1081 return trans->ops->suspend(trans); 1082 } 1083 1084 static inline void iwl_trans_resume(struct iwl_trans *trans) 1085 { 1086 if (trans->ops->resume) 1087 trans->ops->resume(trans); 1088 } 1089 1090 static inline struct iwl_trans_dump_data * 1091 iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask) 1092 { 1093 if (!trans->ops->dump_data) 1094 return NULL; 1095 return trans->ops->dump_data(trans, dump_mask); 1096 } 1097 1098 static inline struct iwl_device_tx_cmd * 1099 iwl_trans_alloc_tx_cmd(struct iwl_trans *trans) 1100 { 1101 return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC); 1102 } 1103 1104 int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); 1105 1106 static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans, 1107 struct iwl_device_tx_cmd *dev_cmd) 1108 { 1109 kmem_cache_free(trans->dev_cmd_pool, dev_cmd); 1110 } 1111 1112 static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, 1113 struct iwl_device_tx_cmd *dev_cmd, int queue) 1114 { 1115 if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) 1116 return -EIO; 1117 1118 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { 1119 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 1120 return -EIO; 1121 } 1122 1123 return trans->ops->tx(trans, skb, dev_cmd, queue); 1124 } 1125 1126 static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue, 1127 int ssn, struct sk_buff_head *skbs) 1128 { 1129 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { 1130 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 1131 return; 1132 } 1133 1134 trans->ops->reclaim(trans, queue, ssn, skbs); 1135 } 1136 1137 static inline void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue, 1138 int ptr) 1139 { 1140 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { 1141 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 1142 return; 1143 } 1144 1145 trans->ops->set_q_ptrs(trans, queue, ptr); 1146 } 1147 1148 static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue, 1149 bool configure_scd) 1150 { 1151 trans->ops->txq_disable(trans, queue, configure_scd); 1152 } 1153 1154 static inline bool 1155 iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn, 1156 const struct iwl_trans_txq_scd_cfg *cfg, 1157 unsigned int queue_wdg_timeout) 1158 { 1159 might_sleep(); 1160 1161 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { 1162 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 1163 return false; 1164 } 1165 1166 return trans->ops->txq_enable(trans, queue, ssn, 1167 cfg, queue_wdg_timeout); 1168 } 1169 1170 static inline int 1171 iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue, 1172 struct iwl_trans_rxq_dma_data *data) 1173 { 1174 if (WARN_ON_ONCE(!trans->ops->rxq_dma_data)) 1175 return -ENOTSUPP; 1176 1177 return trans->ops->rxq_dma_data(trans, queue, data); 1178 } 1179 1180 static inline void 1181 iwl_trans_txq_free(struct iwl_trans *trans, int queue) 1182 { 1183 if (WARN_ON_ONCE(!trans->ops->txq_free)) 1184 return; 1185 1186 trans->ops->txq_free(trans, queue); 1187 } 1188 1189 static inline int 1190 iwl_trans_txq_alloc(struct iwl_trans *trans, 1191 __le16 flags, u8 sta_id, u8 tid, 1192 int cmd_id, int size, 1193 unsigned int wdg_timeout) 1194 { 1195 might_sleep(); 1196 1197 if (WARN_ON_ONCE(!trans->ops->txq_alloc)) 1198 return -ENOTSUPP; 1199 1200 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { 1201 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 1202 return -EIO; 1203 } 1204 1205 return trans->ops->txq_alloc(trans, flags, sta_id, tid, 1206 cmd_id, size, wdg_timeout); 1207 } 1208 1209 static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans, 1210 int queue, bool shared_mode) 1211 { 1212 if (trans->ops->txq_set_shared_mode) 1213 trans->ops->txq_set_shared_mode(trans, queue, shared_mode); 1214 } 1215 1216 static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue, 1217 int fifo, int sta_id, int tid, 1218 int frame_limit, u16 ssn, 1219 unsigned int queue_wdg_timeout) 1220 { 1221 struct iwl_trans_txq_scd_cfg cfg = { 1222 .fifo = fifo, 1223 .sta_id = sta_id, 1224 .tid = tid, 1225 .frame_limit = frame_limit, 1226 .aggregate = sta_id >= 0, 1227 }; 1228 1229 iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout); 1230 } 1231 1232 static inline 1233 void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo, 1234 unsigned int queue_wdg_timeout) 1235 { 1236 struct iwl_trans_txq_scd_cfg cfg = { 1237 .fifo = fifo, 1238 .sta_id = -1, 1239 .tid = IWL_MAX_TID_COUNT, 1240 .frame_limit = IWL_FRAME_LIMIT, 1241 .aggregate = false, 1242 }; 1243 1244 iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout); 1245 } 1246 1247 static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans, 1248 unsigned long txqs, 1249 bool freeze) 1250 { 1251 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { 1252 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 1253 return; 1254 } 1255 1256 if (trans->ops->freeze_txq_timer) 1257 trans->ops->freeze_txq_timer(trans, txqs, freeze); 1258 } 1259 1260 static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans, 1261 bool block) 1262 { 1263 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { 1264 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 1265 return; 1266 } 1267 1268 if (trans->ops->block_txq_ptrs) 1269 trans->ops->block_txq_ptrs(trans, block); 1270 } 1271 1272 static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans, 1273 u32 txqs) 1274 { 1275 if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty)) 1276 return -ENOTSUPP; 1277 1278 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { 1279 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 1280 return -EIO; 1281 } 1282 1283 return trans->ops->wait_tx_queues_empty(trans, txqs); 1284 } 1285 1286 static inline int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue) 1287 { 1288 if (WARN_ON_ONCE(!trans->ops->wait_txq_empty)) 1289 return -ENOTSUPP; 1290 1291 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { 1292 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 1293 return -EIO; 1294 } 1295 1296 return trans->ops->wait_txq_empty(trans, queue); 1297 } 1298 1299 static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val) 1300 { 1301 trans->ops->write8(trans, ofs, val); 1302 } 1303 1304 static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val) 1305 { 1306 trans->ops->write32(trans, ofs, val); 1307 } 1308 1309 static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs) 1310 { 1311 return trans->ops->read32(trans, ofs); 1312 } 1313 1314 static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs) 1315 { 1316 return trans->ops->read_prph(trans, ofs); 1317 } 1318 1319 static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs, 1320 u32 val) 1321 { 1322 return trans->ops->write_prph(trans, ofs, val); 1323 } 1324 1325 static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr, 1326 void *buf, int dwords) 1327 { 1328 return trans->ops->read_mem(trans, addr, buf, dwords); 1329 } 1330 1331 #define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \ 1332 do { \ 1333 if (__builtin_constant_p(bufsize)) \ 1334 BUILD_BUG_ON((bufsize) % sizeof(u32)); \ 1335 iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\ 1336 } while (0) 1337 1338 static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr) 1339 { 1340 u32 value; 1341 1342 if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1))) 1343 return 0xa5a5a5a5; 1344 1345 return value; 1346 } 1347 1348 static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr, 1349 const void *buf, int dwords) 1350 { 1351 return trans->ops->write_mem(trans, addr, buf, dwords); 1352 } 1353 1354 static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr, 1355 u32 val) 1356 { 1357 return iwl_trans_write_mem(trans, addr, &val, 1); 1358 } 1359 1360 static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state) 1361 { 1362 if (trans->ops->set_pmi) 1363 trans->ops->set_pmi(trans, state); 1364 } 1365 1366 static inline void iwl_trans_sw_reset(struct iwl_trans *trans) 1367 { 1368 if (trans->ops->sw_reset) 1369 trans->ops->sw_reset(trans); 1370 } 1371 1372 static inline void 1373 iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value) 1374 { 1375 trans->ops->set_bits_mask(trans, reg, mask, value); 1376 } 1377 1378 #define iwl_trans_grab_nic_access(trans, flags) \ 1379 __cond_lock(nic_access, \ 1380 likely((trans)->ops->grab_nic_access(trans, flags))) 1381 1382 static inline void __releases(nic_access) 1383 iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags) 1384 { 1385 trans->ops->release_nic_access(trans, flags); 1386 __release(nic_access); 1387 } 1388 1389 static inline void iwl_trans_fw_error(struct iwl_trans *trans) 1390 { 1391 if (WARN_ON_ONCE(!trans->op_mode)) 1392 return; 1393 1394 /* prevent double restarts due to the same erroneous FW */ 1395 if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) { 1396 iwl_op_mode_nic_error(trans->op_mode); 1397 trans->state = IWL_TRANS_NO_FW; 1398 } 1399 } 1400 1401 static inline bool iwl_trans_fw_running(struct iwl_trans *trans) 1402 { 1403 return trans->state == IWL_TRANS_FW_ALIVE; 1404 } 1405 1406 static inline void iwl_trans_sync_nmi(struct iwl_trans *trans) 1407 { 1408 if (trans->ops->sync_nmi) 1409 trans->ops->sync_nmi(trans); 1410 } 1411 1412 static inline int iwl_trans_set_pnvm(struct iwl_trans *trans, 1413 const void *data, u32 len) 1414 { 1415 if (trans->ops->set_pnvm) { 1416 int ret = trans->ops->set_pnvm(trans, data, len); 1417 1418 if (ret) 1419 return ret; 1420 } 1421 1422 trans->pnvm_loaded = true; 1423 1424 return 0; 1425 } 1426 1427 static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans) 1428 { 1429 return trans->dbg.internal_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED || 1430 trans->dbg.external_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED; 1431 } 1432 1433 /***************************************************** 1434 * transport helper functions 1435 *****************************************************/ 1436 struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, 1437 struct device *dev, 1438 const struct iwl_trans_ops *ops, 1439 const struct iwl_cfg_trans_params *cfg_trans); 1440 void iwl_trans_free(struct iwl_trans *trans); 1441 1442 /***************************************************** 1443 * driver (transport) register/unregister functions 1444 ******************************************************/ 1445 int __must_check iwl_pci_register_driver(void); 1446 void iwl_pci_unregister_driver(void); 1447 1448 #endif /* __iwl_trans_h__ */ 1449