1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2022 Intel Corporation 4 */ 5 6 #include "mvm.h" 7 #include "time-sync.h" 8 #include <linux/ieee80211.h> 9 10 void iwl_mvm_init_time_sync(struct iwl_time_sync_data *data) 11 { 12 skb_queue_head_init(&data->frame_list); 13 } 14 15 static bool iwl_mvm_is_skb_match(struct sk_buff *skb, u8 *addr, u8 dialog_token) 16 { 17 struct ieee80211_mgmt *mgmt = (void *)skb->data; 18 u8 skb_dialog_token; 19 20 if (ieee80211_is_timing_measurement(skb)) 21 skb_dialog_token = mgmt->u.action.u.wnm_timing_msr.dialog_token; 22 else 23 skb_dialog_token = mgmt->u.action.u.ftm.dialog_token; 24 25 if ((ether_addr_equal(mgmt->sa, addr) || 26 ether_addr_equal(mgmt->da, addr)) && 27 skb_dialog_token == dialog_token) 28 return true; 29 30 return false; 31 } 32 33 static struct sk_buff *iwl_mvm_time_sync_find_skb(struct iwl_mvm *mvm, u8 *addr, 34 u8 dialog_token) 35 { 36 struct sk_buff *skb; 37 38 /* The queue is expected to have only one SKB. If there are other SKBs 39 * in the queue, they did not get a time sync notification and are 40 * probably obsolete by now, so drop them. 41 */ 42 while ((skb = skb_dequeue(&mvm->time_sync.frame_list))) { 43 if (iwl_mvm_is_skb_match(skb, addr, dialog_token)) 44 break; 45 46 kfree_skb(skb); 47 skb = NULL; 48 } 49 50 return skb; 51 } 52 53 static u64 iwl_mvm_get_64_bit(__le32 high, __le32 low) 54 { 55 return ((u64)le32_to_cpu(high) << 32) | le32_to_cpu(low); 56 } 57 58 void iwl_mvm_time_sync_msmt_event(struct iwl_mvm *mvm, 59 struct iwl_rx_cmd_buffer *rxb) 60 { 61 struct iwl_rx_packet *pkt = rxb_addr(rxb); 62 struct iwl_time_msmt_notify *notif = (void *)pkt->data; 63 struct ieee80211_rx_status *rx_status; 64 struct skb_shared_hwtstamps *shwt; 65 u64 ts_10ns; 66 struct sk_buff *skb = 67 iwl_mvm_time_sync_find_skb(mvm, notif->peer_addr, 68 le32_to_cpu(notif->dialog_token)); 69 u64 adj_time; 70 71 if (!skb) { 72 IWL_DEBUG_INFO(mvm, "Time sync event but no pending skb\n"); 73 return; 74 } 75 76 ts_10ns = iwl_mvm_get_64_bit(notif->t2_hi, notif->t2_lo); 77 adj_time = iwl_mvm_ptp_get_adj_time(mvm, ts_10ns * 10); 78 shwt = skb_hwtstamps(skb); 79 shwt->hwtstamp = ktime_set(0, adj_time); 80 81 ts_10ns = iwl_mvm_get_64_bit(notif->t3_hi, notif->t3_lo); 82 adj_time = iwl_mvm_ptp_get_adj_time(mvm, ts_10ns * 10); 83 rx_status = IEEE80211_SKB_RXCB(skb); 84 rx_status->ack_tx_hwtstamp = ktime_set(0, adj_time); 85 86 IWL_DEBUG_INFO(mvm, 87 "Time sync: RX event - report frame t2=%llu t3=%llu\n", 88 ktime_to_ns(shwt->hwtstamp), 89 ktime_to_ns(rx_status->ack_tx_hwtstamp)); 90 ieee80211_rx_napi(mvm->hw, NULL, skb, NULL); 91 } 92 93 void iwl_mvm_time_sync_msmt_confirm_event(struct iwl_mvm *mvm, 94 struct iwl_rx_cmd_buffer *rxb) 95 { 96 struct iwl_rx_packet *pkt = rxb_addr(rxb); 97 struct iwl_time_msmt_cfm_notify *notif = (void *)pkt->data; 98 struct ieee80211_tx_status status = {}; 99 struct skb_shared_hwtstamps *shwt; 100 u64 ts_10ns, adj_time; 101 102 status.skb = 103 iwl_mvm_time_sync_find_skb(mvm, notif->peer_addr, 104 le32_to_cpu(notif->dialog_token)); 105 106 if (!status.skb) { 107 IWL_DEBUG_INFO(mvm, "Time sync confirm but no pending skb\n"); 108 return; 109 } 110 111 ts_10ns = iwl_mvm_get_64_bit(notif->t1_hi, notif->t1_lo); 112 adj_time = iwl_mvm_ptp_get_adj_time(mvm, ts_10ns * 10); 113 shwt = skb_hwtstamps(status.skb); 114 shwt->hwtstamp = ktime_set(0, adj_time); 115 116 ts_10ns = iwl_mvm_get_64_bit(notif->t4_hi, notif->t4_lo); 117 adj_time = iwl_mvm_ptp_get_adj_time(mvm, ts_10ns * 10); 118 status.info = IEEE80211_SKB_CB(status.skb); 119 status.ack_hwtstamp = ktime_set(0, adj_time); 120 121 IWL_DEBUG_INFO(mvm, 122 "Time sync: TX event - report frame t1=%llu t4=%llu\n", 123 ktime_to_ns(shwt->hwtstamp), 124 ktime_to_ns(status.ack_hwtstamp)); 125 ieee80211_tx_status_ext(mvm->hw, &status); 126 } 127 128 int iwl_mvm_time_sync_config(struct iwl_mvm *mvm, const u8 *addr, u32 protocols) 129 { 130 struct iwl_time_sync_cfg_cmd cmd = {}; 131 int err; 132 133 lockdep_assert_held(&mvm->mutex); 134 135 if (!fw_has_capa(&mvm->fw->ucode_capa, 136 IWL_UCODE_TLV_CAPA_TIME_SYNC_BOTH_FTM_TM)) 137 return -EINVAL; 138 139 /* The fw only supports one peer. We do allow reconfiguration of the 140 * same peer for cases of fw reset etc. 141 */ 142 if (mvm->time_sync.active && 143 !ether_addr_equal(addr, mvm->time_sync.peer_addr)) { 144 IWL_DEBUG_INFO(mvm, "Time sync: reject config for peer: %pM\n", 145 addr); 146 return -ENOBUFS; 147 } 148 149 if (protocols & ~(IWL_TIME_SYNC_PROTOCOL_TM | 150 IWL_TIME_SYNC_PROTOCOL_FTM)) 151 return -EINVAL; 152 153 cmd.protocols = cpu_to_le32(protocols); 154 155 ether_addr_copy(cmd.peer_addr, addr); 156 157 err = iwl_mvm_send_cmd_pdu(mvm, 158 WIDE_ID(DATA_PATH_GROUP, 159 WNM_80211V_TIMING_MEASUREMENT_CONFIG_CMD), 160 0, sizeof(cmd), &cmd); 161 if (err) { 162 IWL_ERR(mvm, "Failed to send time sync cfg cmd: %d\n", err); 163 } else { 164 mvm->time_sync.active = protocols != 0; 165 ether_addr_copy(mvm->time_sync.peer_addr, addr); 166 IWL_DEBUG_INFO(mvm, "Time sync: set peer addr=%pM\n", addr); 167 } 168 169 if (!mvm->time_sync.active) 170 skb_queue_purge(&mvm->time_sync.frame_list); 171 172 return err; 173 } 174