Lines Matching +full:ipa +full:- +full:clock +full:- +full:enabled +full:- +full:valid

1 // SPDX-License-Identifier: GPL-2.0
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2019-2023 Linaro Ltd.
12 #include <linux/dma-direction.h>
16 #include "ipa.h"
27 #define IPA_REPLENISH_BATCH 16 /* Must be non-zero */
30 #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
32 /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
37 /** enum ipa_status_opcode - IPA status opcode field hardware values */
48 /** enum ipa_status_exception - IPA status exception field hardware values */
64 /** enum ipa_status_mask - IPA status mask field bitmask hardware values */
84 /* Special IPA filter/router rule field value indicating "rule miss" */
85 #define IPA_STATUS_RULE_MISS 0x3ff /* 10-bit filter/router rule fields */
87 /** The IPA status nat_type field uses enum ipa_nat_type hardware values */
89 /* enum ipa_status_field_id - IPA packet status structure field identifiers */
111 STATUS_TAG_LOW32, /* Low-order 32 bits of 48-bit tag */
112 STATUS_TAG_HIGH16, /* High-order 16 bits of 48-bit tag */
121 /* Size in bytes of an IPA packet status structure */
124 /* IPA status structure decoder; looks up field values for a structure */
125 static u32 ipa_status_extract(struct ipa *ipa, const void *data, in ipa_status_extract() argument
128 enum ipa_version version = ipa->version; in ipa_status_extract()
144 /* Status word 1, bits 21-23 are reserved (not IPA v5.0+) */ in ipa_status_extract()
145 /* Status word 1, bits 24-26 are reserved (IPA v5.0+) */ in ipa_status_extract()
150 /* Status word 1, bits 29-31 are reserved */ in ipa_status_extract()
163 /* ROUTER_TABLE is in word 3, bits 14-21 (IPA v5.0+) */ in ipa_status_extract()
205 /* Status word 7, bits 16-30 are reserved */ in ipa_status_extract()
206 /* Status word 7, bit 31 is reserved (not IPA v5.0+) */ in ipa_status_extract()
224 rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD; in ipa_aggr_size_kb()
231 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, in ipa_endpoint_data_valid_one() argument
236 struct device *dev = &ipa->pdev->dev; in ipa_endpoint_data_valid_one()
242 if (!data->toward_ipa) { in ipa_endpoint_data_valid_one()
249 if (data->endpoint.filter_support) { in ipa_endpoint_data_valid_one()
252 data->endpoint_id); in ipa_endpoint_data_valid_one()
256 /* Nothing more to check for non-AP RX */ in ipa_endpoint_data_valid_one()
257 if (data->ee_id != GSI_EE_AP) in ipa_endpoint_data_valid_one()
260 rx_config = &data->endpoint.config.rx; in ipa_endpoint_data_valid_one()
263 buffer_size = rx_config->buffer_size; in ipa_endpoint_data_valid_one()
267 data->endpoint_id, buffer_size, limit); in ipa_endpoint_data_valid_one()
271 if (!data->endpoint.config.aggregation) { in ipa_endpoint_data_valid_one()
275 if (rx_config->aggr_time_limit) { in ipa_endpoint_data_valid_one()
278 data->endpoint_id); in ipa_endpoint_data_valid_one()
282 if (rx_config->aggr_hard_limit) { in ipa_endpoint_data_valid_one()
284 data->endpoint_id); in ipa_endpoint_data_valid_one()
288 if (rx_config->aggr_close_eof) { in ipa_endpoint_data_valid_one()
290 data->endpoint_id); in ipa_endpoint_data_valid_one()
303 aggr_size = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD, in ipa_endpoint_data_valid_one()
304 rx_config->aggr_hard_limit); in ipa_endpoint_data_valid_one()
305 reg = ipa_reg(ipa, ENDP_INIT_AGGR); in ipa_endpoint_data_valid_one()
310 data->endpoint_id, aggr_size, limit); in ipa_endpoint_data_valid_one()
318 /* Starting with IPA v4.5 sequencer replication is obsolete */ in ipa_endpoint_data_valid_one()
319 if (ipa->version >= IPA_VERSION_4_5) { in ipa_endpoint_data_valid_one()
320 if (data->endpoint.config.tx.seq_rep_type) { in ipa_endpoint_data_valid_one()
321 dev_err(dev, "no-zero seq_rep_type TX endpoint %u\n", in ipa_endpoint_data_valid_one()
322 data->endpoint_id); in ipa_endpoint_data_valid_one()
327 if (data->endpoint.config.status_enable) { in ipa_endpoint_data_valid_one()
328 other_name = data->endpoint.config.tx.status_endpoint; in ipa_endpoint_data_valid_one()
332 other_name, data->endpoint_id); in ipa_endpoint_data_valid_one()
341 other_name, data->endpoint_id); in ipa_endpoint_data_valid_one()
346 if (other_data->toward_ipa) { in ipa_endpoint_data_valid_one()
349 data->endpoint_id); in ipa_endpoint_data_valid_one()
354 if (other_data->ee_id == GSI_EE_AP) { in ipa_endpoint_data_valid_one()
355 /* ...make sure it has status enabled. */ in ipa_endpoint_data_valid_one()
356 if (!other_data->endpoint.config.status_enable) { in ipa_endpoint_data_valid_one()
358 "status not enabled for endpoint %u\n", in ipa_endpoint_data_valid_one()
359 other_data->endpoint_id); in ipa_endpoint_data_valid_one()
365 if (data->endpoint.config.dma_mode) { in ipa_endpoint_data_valid_one()
366 other_name = data->endpoint.config.dma_endpoint; in ipa_endpoint_data_valid_one()
370 other_name, data->endpoint_id); in ipa_endpoint_data_valid_one()
378 other_name, data->endpoint_id); in ipa_endpoint_data_valid_one()
387 static u32 ipa_endpoint_max(struct ipa *ipa, u32 count, in ipa_endpoint_max() argument
391 struct device *dev = &ipa->pdev->dev; in ipa_endpoint_max()
411 dev_err(dev, "AP->modem TX endpoint not defined\n"); in ipa_endpoint_max()
415 dev_err(dev, "AP<-modem RX endpoint not defined\n"); in ipa_endpoint_max()
421 if (!ipa_endpoint_data_valid_one(ipa, count, data, dp)) in ipa_endpoint_max()
423 max = max_t(u32, max, dp->endpoint_id); in ipa_endpoint_max()
429 /* Allocate a transaction to use on a non-command endpoint */
433 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_trans_alloc()
434 u32 channel_id = endpoint->channel_id; in ipa_endpoint_trans_alloc()
437 direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE; in ipa_endpoint_trans_alloc()
443 * Note that suspend is not supported starting with IPA v4.0, and
444 * delay mode should not be used starting with IPA v4.2.
449 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_ctrl() local
457 if (endpoint->toward_ipa) in ipa_endpoint_init_ctrl()
458 WARN_ON(ipa->version >= IPA_VERSION_4_2); in ipa_endpoint_init_ctrl()
460 WARN_ON(ipa->version >= IPA_VERSION_4_0); in ipa_endpoint_init_ctrl()
462 reg = ipa_reg(ipa, ENDP_INIT_CTRL); in ipa_endpoint_init_ctrl()
463 offset = reg_n_offset(reg, endpoint->endpoint_id); in ipa_endpoint_init_ctrl()
464 val = ioread32(ipa->reg_virt + offset); in ipa_endpoint_init_ctrl()
466 field_id = endpoint->toward_ipa ? ENDP_DELAY : ENDP_SUSPEND; in ipa_endpoint_init_ctrl()
474 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_ctrl()
484 /* Delay mode should not be used for IPA v4.2+ */ in ipa_endpoint_program_delay()
485 WARN_ON(endpoint->ipa->version >= IPA_VERSION_4_2); in ipa_endpoint_program_delay()
486 WARN_ON(!endpoint->toward_ipa); in ipa_endpoint_program_delay()
493 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_aggr_active()
494 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_aggr_active() local
499 WARN_ON(!test_bit(endpoint_id, ipa->available)); in ipa_endpoint_aggr_active()
501 reg = ipa_reg(ipa, STATE_AGGR_ACTIVE); in ipa_endpoint_aggr_active()
502 val = ioread32(ipa->reg_virt + reg_n_offset(reg, unit)); in ipa_endpoint_aggr_active()
509 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_force_close()
511 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_force_close() local
515 WARN_ON(!test_bit(endpoint_id, ipa->available)); in ipa_endpoint_force_close()
517 reg = ipa_reg(ipa, AGGR_FORCE_CLOSE); in ipa_endpoint_force_close()
518 iowrite32(mask, ipa->reg_virt + reg_n_offset(reg, unit)); in ipa_endpoint_force_close()
522 * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
525 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended
527 * issue in IPA version 3.5.1 where the suspend interrupt will not be
532 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_suspend_aggr() local
534 if (!endpoint->config.aggregation) in ipa_endpoint_suspend_aggr()
544 ipa_interrupt_simulate_suspend(ipa->interrupt); in ipa_endpoint_suspend_aggr()
547 /* Returns previous suspend state (true means suspend was enabled) */
553 if (endpoint->ipa->version >= IPA_VERSION_4_0) in ipa_endpoint_program_suspend()
554 return enable; /* For IPA v4.0+, no change made */ in ipa_endpoint_program_suspend()
556 WARN_ON(endpoint->toward_ipa); in ipa_endpoint_program_suspend()
561 * generate a SUSPEND IPA interrupt. If enabling suspend, have in ipa_endpoint_program_suspend()
571 * on all modem TX endpoints. Prior to IPA v4.2, endpoint DELAY mode is
572 * used for TX endpoints; starting with IPA v4.2 we use GSI channel flow
575 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable) in ipa_endpoint_modem_pause_all() argument
579 while (endpoint_id < ipa->endpoint_count) { in ipa_endpoint_modem_pause_all()
580 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++]; in ipa_endpoint_modem_pause_all()
582 if (endpoint->ee_id != GSI_EE_MODEM) in ipa_endpoint_modem_pause_all()
585 if (!endpoint->toward_ipa) in ipa_endpoint_modem_pause_all()
587 else if (ipa->version < IPA_VERSION_4_2) in ipa_endpoint_modem_pause_all()
590 gsi_modem_channel_flow_control(&ipa->gsi, in ipa_endpoint_modem_pause_all()
591 endpoint->channel_id, in ipa_endpoint_modem_pause_all()
597 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa) in ipa_endpoint_modem_exception_reset_all() argument
606 count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count(); in ipa_endpoint_modem_exception_reset_all()
607 trans = ipa_cmd_trans_alloc(ipa, count); in ipa_endpoint_modem_exception_reset_all()
609 dev_err(&ipa->pdev->dev, in ipa_endpoint_modem_exception_reset_all()
611 return -EBUSY; in ipa_endpoint_modem_exception_reset_all()
614 for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) { in ipa_endpoint_modem_exception_reset_all()
620 endpoint = &ipa->endpoint[endpoint_id]; in ipa_endpoint_modem_exception_reset_all()
621 if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa)) in ipa_endpoint_modem_exception_reset_all()
624 reg = ipa_reg(ipa, ENDP_STATUS); in ipa_endpoint_modem_exception_reset_all()
638 ipa_cmd_pipeline_clear_wait(ipa); in ipa_endpoint_modem_exception_reset_all()
645 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_cfg()
646 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_cfg() local
647 enum ipa_cs_offload_en enabled; in ipa_endpoint_init_cfg() local
651 reg = ipa_reg(ipa, ENDP_INIT_CFG); in ipa_endpoint_init_cfg()
653 if (endpoint->config.checksum) { in ipa_endpoint_init_cfg()
654 enum ipa_version version = ipa->version; in ipa_endpoint_init_cfg()
656 if (endpoint->toward_ipa) { in ipa_endpoint_init_cfg()
659 /* Checksum header offset is in 4-byte units */ in ipa_endpoint_init_cfg()
663 enabled = version < IPA_VERSION_4_5 in ipa_endpoint_init_cfg()
667 enabled = version < IPA_VERSION_4_5 in ipa_endpoint_init_cfg()
672 enabled = IPA_CS_OFFLOAD_NONE; in ipa_endpoint_init_cfg()
674 val |= reg_encode(reg, CS_OFFLOAD_EN, enabled); in ipa_endpoint_init_cfg()
677 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_cfg()
682 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_nat()
683 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_nat() local
687 if (!endpoint->toward_ipa) in ipa_endpoint_init_nat()
690 reg = ipa_reg(ipa, ENDP_INIT_NAT); in ipa_endpoint_init_nat()
693 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_nat()
702 if (!endpoint->config.checksum) in ipa_qmap_header_size()
707 if (endpoint->toward_ipa) in ipa_qmap_header_size()
724 /* We know field_max can be used as a mask (2^n - 1) */ in ipa_header_size_encode()
731 /* IPA v4.5 adds a few more most-significant bits */ in ipa_header_size_encode()
746 /* We know field_max can be used as a mask (2^n - 1) */ in ipa_metadata_offset_encode()
753 /* IPA v4.5 adds a few more most-significant bits */ in ipa_metadata_offset_encode()
762 * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
766 * header structure. The QMAP header contains a 1-byte mux_id and 2-byte
767 * packet size field, and we have the IPA hardware populate both for each
774 * The mux_id comes from a 4-byte metadata value supplied with each packet
776 * value that we want, in its low-order byte. A bitmask defined in the
784 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_hdr()
785 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hdr() local
789 reg = ipa_reg(ipa, ENDP_INIT_HDR); in ipa_endpoint_init_hdr()
790 if (endpoint->config.qmap) { in ipa_endpoint_init_hdr()
791 enum ipa_version version = ipa->version; in ipa_endpoint_init_hdr()
798 if (!endpoint->toward_ipa) { in ipa_endpoint_init_hdr()
801 /* Where IPA will write the metadata value */ in ipa_endpoint_init_hdr()
805 /* Where IPA will write the length */ in ipa_endpoint_init_hdr()
807 /* Upper bits are stored in HDR_EXT with IPA v4.5 */ in ipa_endpoint_init_hdr()
823 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_hdr()
828 u32 pad_align = endpoint->config.rx.pad_align; in ipa_endpoint_init_hdr_ext()
829 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_hdr_ext()
830 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hdr_ext() local
834 reg = ipa_reg(ipa, ENDP_INIT_HDR_EXT); in ipa_endpoint_init_hdr_ext()
835 if (endpoint->config.qmap) { in ipa_endpoint_init_hdr_ext()
846 if (!endpoint->toward_ipa) { in ipa_endpoint_init_hdr_ext()
855 if (!endpoint->toward_ipa) in ipa_endpoint_init_hdr_ext()
858 /* IPA v4.5 adds some most-significant bits to a few fields, in ipa_endpoint_init_hdr_ext()
861 if (ipa->version >= IPA_VERSION_4_5) { in ipa_endpoint_init_hdr_ext()
863 if (endpoint->config.qmap && !endpoint->toward_ipa) { in ipa_endpoint_init_hdr_ext()
875 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_hdr_ext()
880 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_hdr_metadata_mask()
881 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hdr_metadata_mask() local
886 if (endpoint->toward_ipa) in ipa_endpoint_init_hdr_metadata_mask()
887 return; /* Register not valid for TX endpoints */ in ipa_endpoint_init_hdr_metadata_mask()
889 reg = ipa_reg(ipa, ENDP_INIT_HDR_METADATA_MASK); in ipa_endpoint_init_hdr_metadata_mask()
893 if (endpoint->config.qmap) in ipa_endpoint_init_hdr_metadata_mask()
896 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_hdr_metadata_mask()
901 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_mode() local
906 if (!endpoint->toward_ipa) in ipa_endpoint_init_mode()
907 return; /* Register not valid for RX endpoints */ in ipa_endpoint_init_mode()
909 reg = ipa_reg(ipa, ENDP_INIT_MODE); in ipa_endpoint_init_mode()
910 if (endpoint->config.dma_mode) { in ipa_endpoint_init_mode()
911 enum ipa_endpoint_name name = endpoint->config.dma_endpoint; in ipa_endpoint_init_mode()
912 u32 dma_endpoint_id = ipa->name_map[name]->endpoint_id; in ipa_endpoint_init_mode()
921 offset = reg_n_offset(reg, endpoint->endpoint_id); in ipa_endpoint_init_mode()
922 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_mode()
925 /* For IPA v4.5+, times are expressed using Qtime. A time is represented
927 * ipa_qtime_config(). Three (or, starting with IPA v5.0, four) pulse
931 * available to the AP; a third is available starting with IPA v5.0.
937 ipa_qtime_val(struct ipa *ipa, u32 microseconds, u32 max, u32 *select) in ipa_qtime_val() argument
953 if (ipa->version >= IPA_VERSION_5_0) { in ipa_qtime_val()
965 /* Encode the aggregation timer limit (microseconds) based on IPA version */
966 static u32 aggr_time_limit_encode(struct ipa *ipa, const struct reg *reg, in aggr_time_limit_encode() argument
976 if (ipa->version >= IPA_VERSION_4_5) { in aggr_time_limit_encode()
979 ticks = ipa_qtime_val(ipa, microseconds, max, &select); in aggr_time_limit_encode()
995 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_aggr()
996 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_aggr() local
1000 reg = ipa_reg(ipa, ENDP_INIT_AGGR); in ipa_endpoint_init_aggr()
1001 if (endpoint->config.aggregation) { in ipa_endpoint_init_aggr()
1002 if (!endpoint->toward_ipa) { in ipa_endpoint_init_aggr()
1007 rx_config = &endpoint->config.rx; in ipa_endpoint_init_aggr()
1011 buffer_size = rx_config->buffer_size; in ipa_endpoint_init_aggr()
1012 limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD, in ipa_endpoint_init_aggr()
1013 rx_config->aggr_hard_limit); in ipa_endpoint_init_aggr()
1016 limit = rx_config->aggr_time_limit; in ipa_endpoint_init_aggr()
1017 val |= aggr_time_limit_encode(ipa, reg, limit); in ipa_endpoint_init_aggr()
1021 if (rx_config->aggr_close_eof) in ipa_endpoint_init_aggr()
1029 /* AGGR_GRAN_SEL is 0 for IPA v4.5 */ in ipa_endpoint_init_aggr()
1035 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_aggr()
1038 /* The head-of-line blocking timer is defined as a tick count. For
1039 * IPA version 4.5 the tick count is based on the Qtimer, which is
1040 * derived from the 19.2 MHz SoC XO clock. For older IPA versions
1041 * each tick represents 128 cycles of the IPA core clock.
1046 static u32 hol_block_timer_encode(struct ipa *ipa, const struct reg *reg, in hol_block_timer_encode() argument
1059 if (ipa->version >= IPA_VERSION_4_5) { in hol_block_timer_encode()
1064 ticks = ipa_qtime_val(ipa, microseconds, max, &select); in hol_block_timer_encode()
1071 rate = ipa_core_clock_rate(ipa); in hol_block_timer_encode()
1077 /* IPA v3.5.1 through v4.1 just record the tick count */ in hol_block_timer_encode()
1078 if (ipa->version < IPA_VERSION_4_2) in hol_block_timer_encode()
1081 /* For IPA v4.2, the tick count is represented by base and in hol_block_timer_encode()
1082 * scale fields within the 32-bit timer register, where: in hol_block_timer_encode()
1091 scale = high > width ? high - width : 0; in hol_block_timer_encode()
1094 ticks += 1 << (scale - 1); in hol_block_timer_encode()
1110 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_hol_block_timer()
1111 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hol_block_timer() local
1116 reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_TIMER); in ipa_endpoint_init_hol_block_timer()
1117 val = hol_block_timer_encode(ipa, reg, microseconds); in ipa_endpoint_init_hol_block_timer()
1119 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_hol_block_timer()
1125 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_hol_block_en()
1126 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hol_block_en() local
1131 reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_EN); in ipa_endpoint_init_hol_block_en()
1135 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_hol_block_en()
1137 /* When enabling, the register must be written twice for IPA v4.5+ */ in ipa_endpoint_init_hol_block_en()
1138 if (enable && ipa->version >= IPA_VERSION_4_5) in ipa_endpoint_init_hol_block_en()
1139 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_hol_block_en()
1155 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa) in ipa_endpoint_modem_hol_block_clear_all() argument
1159 while (endpoint_id < ipa->endpoint_count) { in ipa_endpoint_modem_hol_block_clear_all()
1160 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++]; in ipa_endpoint_modem_hol_block_clear_all()
1162 if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM) in ipa_endpoint_modem_hol_block_clear_all()
1172 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_deaggr()
1173 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_deaggr() local
1177 if (!endpoint->toward_ipa) in ipa_endpoint_init_deaggr()
1178 return; /* Register not valid for RX endpoints */ in ipa_endpoint_init_deaggr()
1180 reg = ipa_reg(ipa, ENDP_INIT_DEAGGR); in ipa_endpoint_init_deaggr()
1183 /* PACKET_OFFSET_LOCATION is ignored (not valid) */ in ipa_endpoint_init_deaggr()
1186 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_deaggr()
1191 u32 resource_group = endpoint->config.resource_group; in ipa_endpoint_init_rsrc_grp()
1192 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_rsrc_grp()
1193 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_rsrc_grp() local
1197 reg = ipa_reg(ipa, ENDP_INIT_RSRC_GRP); in ipa_endpoint_init_rsrc_grp()
1200 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_rsrc_grp()
1205 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_seq()
1206 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_seq() local
1210 if (!endpoint->toward_ipa) in ipa_endpoint_init_seq()
1211 return; /* Register not valid for RX endpoints */ in ipa_endpoint_init_seq()
1213 reg = ipa_reg(ipa, ENDP_INIT_SEQ); in ipa_endpoint_init_seq()
1215 /* Low-order byte configures primary packet processing */ in ipa_endpoint_init_seq()
1216 val = reg_encode(reg, SEQ_TYPE, endpoint->config.tx.seq_type); in ipa_endpoint_init_seq()
1219 if (ipa->version < IPA_VERSION_4_5) in ipa_endpoint_init_seq()
1221 endpoint->config.tx.seq_rep_type); in ipa_endpoint_init_seq()
1223 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_seq()
1227 * ipa_endpoint_skb_tx() - Transmit a socket buffer
1243 nr_frags = skb_shinfo(skb)->nr_frags; in ipa_endpoint_skb_tx()
1244 if (nr_frags > endpoint->skb_frag_max) { in ipa_endpoint_skb_tx()
1246 return -E2BIG; in ipa_endpoint_skb_tx()
1252 return -EBUSY; in ipa_endpoint_skb_tx()
1257 trans->data = skb; /* transaction owns skb now */ in ipa_endpoint_skb_tx()
1266 return -ENOMEM; in ipa_endpoint_skb_tx()
1271 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_status()
1272 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_status() local
1276 reg = ipa_reg(ipa, ENDP_STATUS); in ipa_endpoint_status()
1277 if (endpoint->config.status_enable) { in ipa_endpoint_status()
1279 if (endpoint->toward_ipa) { in ipa_endpoint_status()
1283 name = endpoint->config.tx.status_endpoint; in ipa_endpoint_status()
1284 status_endpoint_id = ipa->name_map[name]->endpoint_id; in ipa_endpoint_status()
1288 /* STATUS_LOCATION is 0, meaning IPA packet status in ipa_endpoint_status()
1289 * precedes the packet (not present for IPA v4.5+) in ipa_endpoint_status()
1294 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_status()
1306 buffer_size = endpoint->config.rx.buffer_size; in ipa_endpoint_replenish_one()
1309 return -ENOMEM; in ipa_endpoint_replenish_one()
1313 len = buffer_size - offset; in ipa_endpoint_replenish_one()
1319 trans->data = page; /* transaction owns page now */ in ipa_endpoint_replenish_one()
1325 * ipa_endpoint_replenish() - Replenish endpoint receive buffers
1328 * The IPA hardware can hold a fixed number of receive buffers for an RX
1330 * buffer. If an endpoint's "backlog" is non-zero, it indicates how many
1339 if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags)) in ipa_endpoint_replenish()
1343 if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags)) in ipa_endpoint_replenish()
1354 doorbell = !(++endpoint->replenish_count % IPA_REPLENISH_BATCH); in ipa_endpoint_replenish()
1358 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); in ipa_endpoint_replenish()
1364 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); in ipa_endpoint_replenish()
1372 if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id)) in ipa_endpoint_replenish()
1373 schedule_delayed_work(&endpoint->replenish_work, in ipa_endpoint_replenish()
1379 set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); in ipa_endpoint_replenish_enable()
1382 if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id)) in ipa_endpoint_replenish_enable()
1388 clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); in ipa_endpoint_replenish_disable()
1406 if (!endpoint->netdev) in ipa_endpoint_skb_copy()
1413 memcpy(skb->data, data, len); in ipa_endpoint_skb_copy()
1414 skb->truesize += extra; in ipa_endpoint_skb_copy()
1417 ipa_modem_skb_rx(endpoint->netdev, skb); in ipa_endpoint_skb_copy()
1423 u32 buffer_size = endpoint->config.rx.buffer_size; in ipa_endpoint_skb_build()
1427 if (!endpoint->netdev) in ipa_endpoint_skb_build()
1430 WARN_ON(len > SKB_WITH_OVERHEAD(buffer_size - NET_SKB_PAD)); in ipa_endpoint_skb_build()
1440 ipa_modem_skb_rx(endpoint->netdev, skb); in ipa_endpoint_skb_build()
1445 /* The format of an IPA packet status structure is the same for several
1464 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_status_skip() local
1468 opcode = ipa_status_extract(ipa, data, STATUS_OPCODE); in ipa_endpoint_status_skip()
1472 endpoint_id = ipa_status_extract(ipa, data, STATUS_DST_ENDPOINT); in ipa_endpoint_status_skip()
1473 if (endpoint_id != endpoint->endpoint_id) in ipa_endpoint_status_skip()
1484 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_status_tag_valid() local
1487 status_mask = ipa_status_extract(ipa, data, STATUS_MASK); in ipa_endpoint_status_tag_valid()
1489 return false; /* No valid tag */ in ipa_endpoint_status_tag_valid()
1491 /* The status contains a valid tag. We know the packet was sent to in ipa_endpoint_status_tag_valid()
1493 * If the packet came from the AP->command TX endpoint we know in ipa_endpoint_status_tag_valid()
1496 endpoint_id = ipa_status_extract(ipa, data, STATUS_SRC_ENDPOINT); in ipa_endpoint_status_tag_valid()
1497 command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]; in ipa_endpoint_status_tag_valid()
1498 if (endpoint_id == command_endpoint->endpoint_id) { in ipa_endpoint_status_tag_valid()
1499 complete(&ipa->completion); in ipa_endpoint_status_tag_valid()
1501 dev_err(&ipa->pdev->dev, in ipa_endpoint_status_tag_valid()
1514 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_status_drop() local
1522 exception = ipa_status_extract(ipa, data, STATUS_EXCEPTION); in ipa_endpoint_status_drop()
1527 rule = ipa_status_extract(ipa, data, STATUS_ROUTER_RULE_INDEX); in ipa_endpoint_status_drop()
1535 u32 buffer_size = endpoint->config.rx.buffer_size; in ipa_endpoint_status_parse()
1537 u32 unused = buffer_size - total_len; in ipa_endpoint_status_parse()
1538 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_status_parse() local
1547 dev_err(&endpoint->ipa->pdev->dev, in ipa_endpoint_status_parse()
1554 length = ipa_status_extract(ipa, data, STATUS_LENGTH); in ipa_endpoint_status_parse()
1557 resid -= IPA_STATUS_SIZE; in ipa_endpoint_status_parse()
1564 * And if checksum offload is enabled a trailer containing in ipa_endpoint_status_parse()
1567 align = endpoint->config.rx.pad_align ? : 1; in ipa_endpoint_status_parse()
1569 if (endpoint->config.checksum) in ipa_endpoint_status_parse()
1590 resid -= len; in ipa_endpoint_status_parse()
1599 if (endpoint->toward_ipa) in ipa_endpoint_trans_complete()
1602 if (trans->cancelled) in ipa_endpoint_trans_complete()
1606 page = trans->data; in ipa_endpoint_trans_complete()
1607 if (endpoint->config.status_enable) in ipa_endpoint_trans_complete()
1608 ipa_endpoint_status_parse(endpoint, page, trans->len); in ipa_endpoint_trans_complete()
1609 else if (ipa_endpoint_skb_build(endpoint, page, trans->len)) in ipa_endpoint_trans_complete()
1610 trans->data = NULL; /* Pages have been consumed */ in ipa_endpoint_trans_complete()
1618 if (endpoint->toward_ipa) { in ipa_endpoint_trans_release()
1619 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_trans_release() local
1622 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) { in ipa_endpoint_trans_release()
1623 struct sk_buff *skb = trans->data; in ipa_endpoint_trans_release()
1629 struct page *page = trans->data; in ipa_endpoint_trans_release()
1636 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id) in ipa_endpoint_default_route_set() argument
1641 reg = ipa_reg(ipa, ROUTE); in ipa_endpoint_default_route_set()
1649 iowrite32(val, ipa->reg_virt + reg_offset(reg)); in ipa_endpoint_default_route_set()
1652 void ipa_endpoint_default_route_clear(struct ipa *ipa) in ipa_endpoint_default_route_clear() argument
1654 ipa_endpoint_default_route_set(ipa, 0); in ipa_endpoint_default_route_clear()
1658 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1663 * taken to ensure the IPA pipeline is properly cleared.
1669 struct device *dev = &endpoint->ipa->pdev->dev; in ipa_endpoint_reset_rx_aggr()
1670 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_reset_rx_aggr() local
1671 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_reset_rx_aggr()
1681 return -ENOMEM; in ipa_endpoint_reset_rx_aggr()
1685 ret = -ENOMEM; in ipa_endpoint_reset_rx_aggr()
1694 * active. We'll re-enable the doorbell (if appropriate) when in ipa_endpoint_reset_rx_aggr()
1697 gsi_channel_reset(gsi, endpoint->channel_id, false); in ipa_endpoint_reset_rx_aggr()
1703 ret = gsi_channel_start(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
1707 ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr); in ipa_endpoint_reset_rx_aggr()
1717 } while (retries--); in ipa_endpoint_reset_rx_aggr()
1722 endpoint->endpoint_id); in ipa_endpoint_reset_rx_aggr()
1724 gsi_trans_read_byte_done(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
1726 ret = gsi_channel_stop(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
1730 /* Finally, reset and reconfigure the channel again (re-enabling in ipa_endpoint_reset_rx_aggr()
1735 gsi_channel_reset(gsi, endpoint->channel_id, true); in ipa_endpoint_reset_rx_aggr()
1742 (void)gsi_channel_stop(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
1755 u32 channel_id = endpoint->channel_id; in ipa_endpoint_reset()
1756 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_reset() local
1760 /* On IPA v3.5.1, if an RX endpoint is reset while aggregation in ipa_endpoint_reset()
1764 special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa && in ipa_endpoint_reset()
1765 endpoint->config.aggregation; in ipa_endpoint_reset()
1769 gsi_channel_reset(&ipa->gsi, channel_id, true); in ipa_endpoint_reset()
1772 dev_err(&ipa->pdev->dev, in ipa_endpoint_reset()
1774 ret, endpoint->channel_id, endpoint->endpoint_id); in ipa_endpoint_reset()
1779 if (endpoint->toward_ipa) { in ipa_endpoint_program()
1780 /* Newer versions of IPA use GSI channel flow control in ipa_endpoint_program()
1782 * Flow control is disabled for newly-allocated channels, in ipa_endpoint_program()
1783 * and we can assume flow control is not (ever) enabled in ipa_endpoint_program()
1786 if (endpoint->ipa->version < IPA_VERSION_4_2) in ipa_endpoint_program()
1799 if (!endpoint->toward_ipa) { in ipa_endpoint_program()
1800 if (endpoint->config.rx.holb_drop) in ipa_endpoint_program()
1813 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_enable_one()
1814 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_enable_one() local
1815 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_enable_one()
1818 ret = gsi_channel_start(gsi, endpoint->channel_id); in ipa_endpoint_enable_one()
1820 dev_err(&ipa->pdev->dev, in ipa_endpoint_enable_one()
1822 ret, endpoint->toward_ipa ? 'T' : 'R', in ipa_endpoint_enable_one()
1823 endpoint->channel_id, endpoint_id); in ipa_endpoint_enable_one()
1827 if (!endpoint->toward_ipa) { in ipa_endpoint_enable_one()
1828 ipa_interrupt_suspend_enable(ipa->interrupt, endpoint_id); in ipa_endpoint_enable_one()
1832 __set_bit(endpoint_id, ipa->enabled); in ipa_endpoint_enable_one()
1839 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_disable_one()
1840 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_disable_one() local
1841 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_disable_one()
1844 if (!test_bit(endpoint_id, ipa->enabled)) in ipa_endpoint_disable_one()
1847 __clear_bit(endpoint_id, endpoint->ipa->enabled); in ipa_endpoint_disable_one()
1849 if (!endpoint->toward_ipa) { in ipa_endpoint_disable_one()
1851 ipa_interrupt_suspend_disable(ipa->interrupt, endpoint_id); in ipa_endpoint_disable_one()
1854 /* Note that if stop fails, the channel's state is not well-defined */ in ipa_endpoint_disable_one()
1855 ret = gsi_channel_stop(gsi, endpoint->channel_id); in ipa_endpoint_disable_one()
1857 dev_err(&ipa->pdev->dev, in ipa_endpoint_disable_one()
1864 struct device *dev = &endpoint->ipa->pdev->dev; in ipa_endpoint_suspend_one()
1865 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_suspend_one()
1868 if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled)) in ipa_endpoint_suspend_one()
1871 if (!endpoint->toward_ipa) { in ipa_endpoint_suspend_one()
1876 ret = gsi_channel_suspend(gsi, endpoint->channel_id); in ipa_endpoint_suspend_one()
1879 endpoint->channel_id); in ipa_endpoint_suspend_one()
1884 struct device *dev = &endpoint->ipa->pdev->dev; in ipa_endpoint_resume_one()
1885 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_resume_one()
1888 if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled)) in ipa_endpoint_resume_one()
1891 if (!endpoint->toward_ipa) in ipa_endpoint_resume_one()
1894 ret = gsi_channel_resume(gsi, endpoint->channel_id); in ipa_endpoint_resume_one()
1897 endpoint->channel_id); in ipa_endpoint_resume_one()
1898 else if (!endpoint->toward_ipa) in ipa_endpoint_resume_one()
1902 void ipa_endpoint_suspend(struct ipa *ipa) in ipa_endpoint_suspend() argument
1904 if (!ipa->setup_complete) in ipa_endpoint_suspend()
1907 if (ipa->modem_netdev) in ipa_endpoint_suspend()
1908 ipa_modem_suspend(ipa->modem_netdev); in ipa_endpoint_suspend()
1910 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); in ipa_endpoint_suspend()
1911 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); in ipa_endpoint_suspend()
1914 void ipa_endpoint_resume(struct ipa *ipa) in ipa_endpoint_resume() argument
1916 if (!ipa->setup_complete) in ipa_endpoint_resume()
1919 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); in ipa_endpoint_resume()
1920 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); in ipa_endpoint_resume()
1922 if (ipa->modem_netdev) in ipa_endpoint_resume()
1923 ipa_modem_resume(ipa->modem_netdev); in ipa_endpoint_resume()
1928 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_setup_one()
1929 u32 channel_id = endpoint->channel_id; in ipa_endpoint_setup_one()
1932 if (endpoint->ee_id != GSI_EE_AP) in ipa_endpoint_setup_one()
1935 endpoint->skb_frag_max = gsi->channel[channel_id].trans_tre_max - 1; in ipa_endpoint_setup_one()
1936 if (!endpoint->toward_ipa) { in ipa_endpoint_setup_one()
1940 clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); in ipa_endpoint_setup_one()
1941 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); in ipa_endpoint_setup_one()
1942 INIT_DELAYED_WORK(&endpoint->replenish_work, in ipa_endpoint_setup_one()
1948 __set_bit(endpoint->endpoint_id, endpoint->ipa->set_up); in ipa_endpoint_setup_one()
1953 __clear_bit(endpoint->endpoint_id, endpoint->ipa->set_up); in ipa_endpoint_teardown_one()
1955 if (!endpoint->toward_ipa) in ipa_endpoint_teardown_one()
1956 cancel_delayed_work_sync(&endpoint->replenish_work); in ipa_endpoint_teardown_one()
1961 void ipa_endpoint_setup(struct ipa *ipa) in ipa_endpoint_setup() argument
1965 for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) in ipa_endpoint_setup()
1966 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_setup()
1969 void ipa_endpoint_teardown(struct ipa *ipa) in ipa_endpoint_teardown() argument
1973 for_each_set_bit(endpoint_id, ipa->set_up, ipa->endpoint_count) in ipa_endpoint_teardown()
1974 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_teardown()
1977 void ipa_endpoint_deconfig(struct ipa *ipa) in ipa_endpoint_deconfig() argument
1979 ipa->available_count = 0; in ipa_endpoint_deconfig()
1980 bitmap_free(ipa->available); in ipa_endpoint_deconfig()
1981 ipa->available = NULL; in ipa_endpoint_deconfig()
1984 int ipa_endpoint_config(struct ipa *ipa) in ipa_endpoint_config() argument
1986 struct device *dev = &ipa->pdev->dev; in ipa_endpoint_config()
1996 /* Prior to IPA v3.5, the FLAVOR_0 register was not supported. in ipa_endpoint_config()
2004 * assume the configuration is valid. in ipa_endpoint_config()
2006 if (ipa->version < IPA_VERSION_3_5) { in ipa_endpoint_config()
2007 ipa->available = bitmap_zalloc(IPA_ENDPOINT_MAX, GFP_KERNEL); in ipa_endpoint_config()
2008 if (!ipa->available) in ipa_endpoint_config()
2009 return -ENOMEM; in ipa_endpoint_config()
2010 ipa->available_count = IPA_ENDPOINT_MAX; in ipa_endpoint_config()
2012 bitmap_set(ipa->available, 0, IPA_ENDPOINT_MAX); in ipa_endpoint_config()
2020 reg = ipa_reg(ipa, FLAVOR_0); in ipa_endpoint_config()
2021 val = ioread32(ipa->reg_virt + reg_offset(reg)); in ipa_endpoint_config()
2023 /* Our RX is an IPA producer; our TX is an IPA consumer. */ in ipa_endpoint_config()
2032 return -EINVAL; in ipa_endpoint_config()
2035 /* Until IPA v5.0, the max endpoint ID was 32 */ in ipa_endpoint_config()
2036 hw_limit = ipa->version < IPA_VERSION_5_0 ? 32 : U8_MAX + 1; in ipa_endpoint_config()
2040 return -EINVAL; in ipa_endpoint_config()
2044 ipa->available = bitmap_zalloc(limit, GFP_KERNEL); in ipa_endpoint_config()
2045 if (!ipa->available) in ipa_endpoint_config()
2046 return -ENOMEM; in ipa_endpoint_config()
2047 ipa->available_count = limit; in ipa_endpoint_config()
2050 bitmap_set(ipa->available, 0, tx_count); in ipa_endpoint_config()
2051 bitmap_set(ipa->available, rx_base, rx_count); in ipa_endpoint_config()
2053 for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) { in ipa_endpoint_config()
2058 endpoint_id, limit - 1); in ipa_endpoint_config()
2062 if (!test_bit(endpoint_id, ipa->available)) { in ipa_endpoint_config()
2069 endpoint = &ipa->endpoint[endpoint_id]; in ipa_endpoint_config()
2070 if (endpoint->toward_ipa) { in ipa_endpoint_config()
2084 ipa_endpoint_deconfig(ipa); in ipa_endpoint_config()
2086 return -EINVAL; in ipa_endpoint_config()
2089 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name, in ipa_endpoint_init_one() argument
2094 endpoint = &ipa->endpoint[data->endpoint_id]; in ipa_endpoint_init_one()
2096 if (data->ee_id == GSI_EE_AP) in ipa_endpoint_init_one()
2097 ipa->channel_map[data->channel_id] = endpoint; in ipa_endpoint_init_one()
2098 ipa->name_map[name] = endpoint; in ipa_endpoint_init_one()
2100 endpoint->ipa = ipa; in ipa_endpoint_init_one()
2101 endpoint->ee_id = data->ee_id; in ipa_endpoint_init_one()
2102 endpoint->channel_id = data->channel_id; in ipa_endpoint_init_one()
2103 endpoint->endpoint_id = data->endpoint_id; in ipa_endpoint_init_one()
2104 endpoint->toward_ipa = data->toward_ipa; in ipa_endpoint_init_one()
2105 endpoint->config = data->endpoint.config; in ipa_endpoint_init_one()
2107 __set_bit(endpoint->endpoint_id, ipa->defined); in ipa_endpoint_init_one()
2112 __clear_bit(endpoint->endpoint_id, endpoint->ipa->defined); in ipa_endpoint_exit_one()
2117 void ipa_endpoint_exit(struct ipa *ipa) in ipa_endpoint_exit() argument
2121 ipa->filtered = 0; in ipa_endpoint_exit()
2123 for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) in ipa_endpoint_exit()
2124 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_exit()
2126 bitmap_free(ipa->enabled); in ipa_endpoint_exit()
2127 ipa->enabled = NULL; in ipa_endpoint_exit()
2128 bitmap_free(ipa->set_up); in ipa_endpoint_exit()
2129 ipa->set_up = NULL; in ipa_endpoint_exit()
2130 bitmap_free(ipa->defined); in ipa_endpoint_exit()
2131 ipa->defined = NULL; in ipa_endpoint_exit()
2133 memset(ipa->name_map, 0, sizeof(ipa->name_map)); in ipa_endpoint_exit()
2134 memset(ipa->channel_map, 0, sizeof(ipa->channel_map)); in ipa_endpoint_exit()
2138 int ipa_endpoint_init(struct ipa *ipa, u32 count, in ipa_endpoint_init() argument
2147 ipa->endpoint_count = ipa_endpoint_max(ipa, count, data) + 1; in ipa_endpoint_init()
2148 if (!ipa->endpoint_count) in ipa_endpoint_init()
2149 return -EINVAL; in ipa_endpoint_init()
2152 ipa->defined = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL); in ipa_endpoint_init()
2153 if (!ipa->defined) in ipa_endpoint_init()
2154 return -ENOMEM; in ipa_endpoint_init()
2156 ipa->set_up = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL); in ipa_endpoint_init()
2157 if (!ipa->set_up) in ipa_endpoint_init()
2160 ipa->enabled = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL); in ipa_endpoint_init()
2161 if (!ipa->enabled) in ipa_endpoint_init()
2169 ipa_endpoint_init_one(ipa, name, data); in ipa_endpoint_init()
2171 if (data->endpoint.filter_support) in ipa_endpoint_init()
2172 filtered |= BIT(data->endpoint_id); in ipa_endpoint_init()
2173 if (data->ee_id == GSI_EE_MODEM && data->toward_ipa) in ipa_endpoint_init()
2174 ipa->modem_tx_count++; in ipa_endpoint_init()
2177 /* Make sure the set of filtered endpoints is valid */ in ipa_endpoint_init()
2178 if (!ipa_filtered_valid(ipa, filtered)) { in ipa_endpoint_init()
2179 ipa_endpoint_exit(ipa); in ipa_endpoint_init()
2181 return -EINVAL; in ipa_endpoint_init()
2184 ipa->filtered = filtered; in ipa_endpoint_init()
2189 bitmap_free(ipa->set_up); in ipa_endpoint_init()
2190 ipa->set_up = NULL; in ipa_endpoint_init()
2192 bitmap_free(ipa->defined); in ipa_endpoint_init()
2193 ipa->defined = NULL; in ipa_endpoint_init()
2195 return -ENOMEM; in ipa_endpoint_init()