Lines Matching +full:layer +full:- +full:buffer +full:- +full:offset

1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
3 * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
15 #include "iwl-debug.h"
16 #include "iwl-config.h"
18 #include "iwl-op-mode.h"
22 #include "fw/api/dbg-tlv.h"
23 #include "iwl-dbg-tlv.h"
26 * DOC: Transport layer - what is it ?
28 * The transport layer is the layer that deals with the HW directly. It provides
29 * an abstraction of the underlying HW to the upper layer. The transport layer
37 * DOC: Life cycle of the transport layer
39 * The transport layer has a very precise life cycle.
43 * 2) Bus's probe calls to the transport layer's allocation functions.
45 * 3) This allocation functions will spawn the upper layer which will
65 #define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */
81 * 28-27: Reserved
87 * 21-16: RX queue
88 * 15-14: Reserved
89 * 13-00: RX frame size
98 return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; in iwl_rx_packet_len()
103 return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr); in iwl_rx_packet_payload_len()
107 * enum CMD_MODE - how to send the host commands ?
110 * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of
116 * trans->system_pm_mode to IWL_PLAT_PM_MODE_D3.
143 u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
151 * struct iwl_device_tx_cmd - buffer for TX command
174 #define IWL_TRANS_MAX_FRAGS(trans) ((trans)->txqs.tfd.max_tbs - 3)
177 * enum iwl_hcmd_dataflag - flag for each one of the chunks of the command
180 * ring. The transport layer doesn't map the command's buffer to DMA, but
181 * rather copies it to a previously allocated DMA buffer. This flag tells
182 * the transport layer not to copy the command, but to map the existing
183 * buffer (that is passed in) instead. This saves the memcpy and allows
184 * commands that are bigger than the fixed buffer to be submitted.
207 * struct iwl_host_cmd - Host command to the uCode
233 free_pages(cmd->_rx_page_addr, cmd->_rx_page_order); in iwl_free_resp()
246 return (void *)((unsigned long)page_address(r->_page) + r->_offset); in rxb_addr()
251 return r->_offset; in rxb_offset()
256 r->_page_stolen = true; in rxb_steal_page()
257 get_page(r->_page); in rxb_steal_page()
258 return r->_page; in rxb_steal_page()
263 __free_pages(r->_page, r->_rx_page_order); in iwl_free_rxb()
268 #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
271 * Maximum number of HW queues the transport layer
284 * enum iwl_wowlan_status - WoWLAN image/device status
299 * @STATUS_RFKILL_HW: the actual HW state of the RF-kill switch
300 * @STATUS_RFKILL_OPMODE: RF-kill state reported to opmode
304 * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
305 * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
337 return -1; in iwl_trans_get_rb_size_order()
376 * struct iwl_dump_sanitize_ops - dump sanitization operations
390 * struct iwl_trans_config - transport configuration
392 * @op_mode: pointer to the upper layer.
402 * @rx_buf_size: RX buffer size needed for A-MSDUs
403 * if unset 4k will be the RX buffer size
410 * @cb_data_offs: offset inside skb->cb to store transport data at, must have
453 * struct iwl_trans_rxq_dma_data - RX queue DMA data
454 * @fr_bd_cb: DMA address of free BD cyclic buffer
455 * @fr_bd_wid: Initial write index of the free BD cyclic buffer
457 * @ur_bd_cb: DMA address of used BD cyclic buffer
470 * struct iwl_pnvm_image - contains info about the parsed pnvm image
485 * struct iwl_trans_ops - transport specific operations
494 * layer. Also kick a fw image.
510 * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
512 * return -ERFKILL straight away.
515 * the ieee80211_tx_info->driver_data. If the MPDU is an A-MSDU, all
529 * @txq_disable: de-configure a Tx queue to send AMPDUs
541 * @write8: write a u8 to a register at offset ofs from the BAR
542 * @write32: write a u32 to a register at offset ofs from the BAR
543 * @read32: read a u32 register at offset ofs from the BAR
550 * the given offset.
551 * @configure: configure parameters required by the transport layer from
555 * @grab_nic_access: wake the NIC to be able to access non-HBUS regs.
560 * @set_bits_mask - set SRAM register according to value and mask.
561 * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last
562 * TX'ed commands and similar. The buffer will be vfree'd by the caller.
567 * @set_pnvm: set the pnvm data in the prph scratch buffer, inside the
570 * @set_reduce_power: set reduce power table addresses in the sratch buffer
661 * enum iwl_trans_state - state of the transport layer
676 * In system-wide power management the entire platform goes into a low
682 * mode is dictated by the wake-on-WLAN configuration.
686 * - D0: the device is fully powered and the host is awake;
687 * - D3: the device is in low power mode and only reacts to
688 * specific events (e.g. magic-packet received or scan
696 * enum iwl_plat_pm_mode - platform power management mode
699 * behavior when in system-wide suspend (i.e WoWLAN).
702 * device. In system-wide suspend mode, it means that the all
754 * struct iwl_fw_mon - fw monitor per allocation id
756 * @frags: an array of DRAM buffer fragments
764 * struct iwl_self_init_dram - dram data used by self init process
778 * struct iwl_imr_data - imr dram data used during debug process
800 * struct iwl_pc_data - program counter details
810 * struct iwl_trans_debug - transport debug related data
825 * @fw_mon_cfg: debug buffer allocation configuration
826 * @fw_mon_ini: DRAM buffer fragments per allocation id
827 * @fw_mon: DRAM buffer for firmware monitor
899 * into the buffer regardless of whether it should be mapped or not.
900 * This indicates how big the first TB must be to include the scratch buffer
902 * Since PN location is 8 bytes at offset 12, it's 20 now.
912 /* buffer to free after command completes */
922 * struct iwl_txq - Tx Queue for DMA
926 * the writeback -- this is DMA memory and an array holding one buffer
935 * @wd_timeout: queue watchdog timeout (jiffies) - per queue
939 * @write_ptr: 1-st empty entry (index) host_w
947 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
993 * struct iwl_trans_txqs - transport tx queues data
996 * @page_offs: offset from skb->cb to mac header page pointer
997 * @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer
998 * @queue_used - bit mask of used queues
999 * @queue_stopped - bit mask of stopped queues
1032 * struct iwl_trans - transport common data
1034 * @csme_own - true if we couldn't get ownership on the device
1035 * @ops - pointer to iwl_trans_ops
1036 * @op_mode - pointer to the op_mode
1037 * @trans_cfg: the trans-specific configuration part
1038 * @cfg - pointer to the configuration
1039 * @drv - pointer to iwl_drv
1040 * @status: a bit-mask of transport status flags
1041 * @dev - pointer to struct device * that represents the device
1047 * @hw_id: a u32 with the ID of the device / sub-device.
1061 * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
1068 * @system_pm_mode: the system-wide power management mode in use.
1076 * @invalid_tx_cmd: invalid TX command buffer
1154 trans->op_mode = trans_cfg->op_mode; in iwl_trans_configure()
1156 trans->ops->configure(trans, trans_cfg); in iwl_trans_configure()
1164 return trans->ops->start_hw(trans); in iwl_trans_start_hw()
1171 if (trans->ops->op_mode_leave) in iwl_trans_op_mode_leave()
1172 trans->ops->op_mode_leave(trans); in iwl_trans_op_mode_leave()
1174 trans->op_mode = NULL; in iwl_trans_op_mode_leave()
1176 trans->state = IWL_TRANS_NO_FW; in iwl_trans_op_mode_leave()
1183 trans->state = IWL_TRANS_FW_ALIVE; in iwl_trans_fw_alive()
1185 trans->ops->fw_alive(trans, scd_addr); in iwl_trans_fw_alive()
1196 WARN_ON_ONCE(!trans->rx_mpdu_cmd); in iwl_trans_start_fw()
1198 clear_bit(STATUS_FW_ERROR, &trans->status); in iwl_trans_start_fw()
1199 ret = trans->ops->start_fw(trans, fw, run_in_rfkill); in iwl_trans_start_fw()
1201 trans->state = IWL_TRANS_FW_STARTED; in iwl_trans_start_fw()
1210 trans->ops->stop_device(trans); in iwl_trans_stop_device()
1212 trans->state = IWL_TRANS_NO_FW; in iwl_trans_stop_device()
1219 if (!trans->ops->d3_suspend) in iwl_trans_d3_suspend()
1220 return -EOPNOTSUPP; in iwl_trans_d3_suspend()
1222 return trans->ops->d3_suspend(trans, test, reset); in iwl_trans_d3_suspend()
1230 if (!trans->ops->d3_resume) in iwl_trans_d3_resume()
1231 return -EOPNOTSUPP; in iwl_trans_d3_resume()
1233 return trans->ops->d3_resume(trans, status, test, reset); in iwl_trans_d3_resume()
1241 if (!trans->ops->dump_data) in iwl_trans_dump_data()
1243 return trans->ops->dump_data(trans, dump_mask, in iwl_trans_dump_data()
1250 return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC); in iwl_trans_alloc_tx_cmd()
1258 kmem_cache_free(trans->dev_cmd_pool, dev_cmd); in iwl_trans_free_tx_cmd()
1264 if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) in iwl_trans_tx()
1265 return -EIO; in iwl_trans_tx()
1267 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { in iwl_trans_tx()
1268 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); in iwl_trans_tx()
1269 return -EIO; in iwl_trans_tx()
1272 return trans->ops->tx(trans, skb, dev_cmd, queue); in iwl_trans_tx()
1279 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { in iwl_trans_reclaim()
1280 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); in iwl_trans_reclaim()
1284 trans->ops->reclaim(trans, queue, ssn, skbs, is_flush); in iwl_trans_reclaim()
1290 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { in iwl_trans_set_q_ptrs()
1291 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); in iwl_trans_set_q_ptrs()
1295 trans->ops->set_q_ptrs(trans, queue, ptr); in iwl_trans_set_q_ptrs()
1301 trans->ops->txq_disable(trans, queue, configure_scd); in iwl_trans_txq_disable()
1311 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { in iwl_trans_txq_enable_cfg()
1312 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); in iwl_trans_txq_enable_cfg()
1316 return trans->ops->txq_enable(trans, queue, ssn, in iwl_trans_txq_enable_cfg()
1324 if (WARN_ON_ONCE(!trans->ops->rxq_dma_data)) in iwl_trans_get_rxq_dma_data()
1325 return -ENOTSUPP; in iwl_trans_get_rxq_dma_data()
1327 return trans->ops->rxq_dma_data(trans, queue, data); in iwl_trans_get_rxq_dma_data()
1333 if (WARN_ON_ONCE(!trans->ops->txq_free)) in iwl_trans_txq_free()
1336 trans->ops->txq_free(trans, queue); in iwl_trans_txq_free()
1346 if (WARN_ON_ONCE(!trans->ops->txq_alloc)) in iwl_trans_txq_alloc()
1347 return -ENOTSUPP; in iwl_trans_txq_alloc()
1349 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { in iwl_trans_txq_alloc()
1350 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); in iwl_trans_txq_alloc()
1351 return -EIO; in iwl_trans_txq_alloc()
1354 return trans->ops->txq_alloc(trans, flags, sta_mask, tid, in iwl_trans_txq_alloc()
1361 if (trans->ops->txq_set_shared_mode) in iwl_trans_txq_set_shared_mode()
1362 trans->ops->txq_set_shared_mode(trans, queue, shared_mode); in iwl_trans_txq_set_shared_mode()
1387 .sta_id = -1, in iwl_trans_ac_txq_enable()
1400 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { in iwl_trans_freeze_txq_timer()
1401 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); in iwl_trans_freeze_txq_timer()
1405 if (trans->ops->freeze_txq_timer) in iwl_trans_freeze_txq_timer()
1406 trans->ops->freeze_txq_timer(trans, txqs, freeze); in iwl_trans_freeze_txq_timer()
1412 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { in iwl_trans_block_txq_ptrs()
1413 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); in iwl_trans_block_txq_ptrs()
1417 if (trans->ops->block_txq_ptrs) in iwl_trans_block_txq_ptrs()
1418 trans->ops->block_txq_ptrs(trans, block); in iwl_trans_block_txq_ptrs()
1424 if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty)) in iwl_trans_wait_tx_queues_empty()
1425 return -ENOTSUPP; in iwl_trans_wait_tx_queues_empty()
1428 if (trans->state != IWL_TRANS_FW_ALIVE) { in iwl_trans_wait_tx_queues_empty()
1429 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); in iwl_trans_wait_tx_queues_empty()
1430 return -EIO; in iwl_trans_wait_tx_queues_empty()
1433 return trans->ops->wait_tx_queues_empty(trans, txqs); in iwl_trans_wait_tx_queues_empty()
1438 if (WARN_ON_ONCE(!trans->ops->wait_txq_empty)) in iwl_trans_wait_txq_empty()
1439 return -ENOTSUPP; in iwl_trans_wait_txq_empty()
1441 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { in iwl_trans_wait_txq_empty()
1442 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); in iwl_trans_wait_txq_empty()
1443 return -EIO; in iwl_trans_wait_txq_empty()
1446 return trans->ops->wait_txq_empty(trans, queue); in iwl_trans_wait_txq_empty()
1451 trans->ops->write8(trans, ofs, val); in iwl_trans_write8()
1456 trans->ops->write32(trans, ofs, val); in iwl_trans_write32()
1461 return trans->ops->read32(trans, ofs); in iwl_trans_read32()
1466 return trans->ops->read_prph(trans, ofs); in iwl_trans_read_prph()
1472 return trans->ops->write_prph(trans, ofs, val); in iwl_trans_write_prph()
1478 return trans->ops->read_mem(trans, addr, buf, dwords); in iwl_trans_read_mem()
1492 if (trans->ops->imr_dma_data) in iwl_trans_write_imr_mem()
1493 return trans->ops->imr_dma_data(trans, dst_addr, src_addr, byte_cnt); in iwl_trans_write_imr_mem()
1510 return trans->ops->write_mem(trans, addr, buf, dwords); in iwl_trans_write_mem()
1521 if (trans->ops->set_pmi) in iwl_trans_set_pmi()
1522 trans->ops->set_pmi(trans, state); in iwl_trans_set_pmi()
1528 if (trans->ops->sw_reset) in iwl_trans_sw_reset()
1529 return trans->ops->sw_reset(trans, retake_ownership); in iwl_trans_sw_reset()
1536 trans->ops->set_bits_mask(trans, reg, mask, value); in iwl_trans_set_bits_mask()
1541 likely((trans)->ops->grab_nic_access(trans)))
1546 trans->ops->release_nic_access(trans); in __releases()
1552 if (WARN_ON_ONCE(!trans->op_mode)) in iwl_trans_fw_error()
1556 if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) { in iwl_trans_fw_error()
1557 trans->state = IWL_TRANS_NO_FW; in iwl_trans_fw_error()
1558 iwl_op_mode_nic_error(trans->op_mode, sync); in iwl_trans_fw_error()
1564 return trans->state == IWL_TRANS_FW_ALIVE; in iwl_trans_fw_running()
1569 if (trans->ops->sync_nmi) in iwl_trans_sync_nmi()
1570 trans->ops->sync_nmi(trans); in iwl_trans_sync_nmi()
1580 return trans->ops->load_pnvm(trans, pnvm_data, capa); in iwl_trans_load_pnvm()
1586 if (trans->ops->set_pnvm) in iwl_trans_set_pnvm()
1587 trans->ops->set_pnvm(trans, capa); in iwl_trans_set_pnvm()
1595 return trans->ops->load_reduce_power(trans, payloads, capa); in iwl_trans_load_reduce_power()
1602 if (trans->ops->set_reduce_power) in iwl_trans_set_reduce_power()
1603 trans->ops->set_reduce_power(trans, capa); in iwl_trans_set_reduce_power()
1608 return trans->dbg.internal_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED || in iwl_trans_dbg_ini_valid()
1609 trans->dbg.external_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED; in iwl_trans_dbg_ini_valid()
1614 if (trans->ops->interrupts) in iwl_trans_interrupts()
1615 trans->ops->interrupts(trans, enable); in iwl_trans_interrupts()