1 /* 2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 3 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <linux/kernel.h> 19 #include <linux/firmware.h> 20 #include <linux/delay.h> 21 22 #include "mt76x02_mcu.h" 23 24 struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len) 25 { 26 struct sk_buff *skb; 27 28 skb = alloc_skb(len, GFP_KERNEL); 29 if (!skb) 30 return NULL; 31 memcpy(skb_put(skb, len), data, len); 32 33 return skb; 34 } 35 EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_alloc); 36 37 static struct sk_buff * 38 mt76x02_mcu_get_response(struct mt76x02_dev *dev, unsigned long expires) 39 { 40 unsigned long timeout; 41 42 if (!time_is_after_jiffies(expires)) 43 return NULL; 44 45 timeout = expires - jiffies; 46 wait_event_timeout(dev->mt76.mmio.mcu.wait, 47 !skb_queue_empty(&dev->mt76.mmio.mcu.res_q), 48 timeout); 49 return skb_dequeue(&dev->mt76.mmio.mcu.res_q); 50 } 51 52 static int 53 mt76x02_tx_queue_mcu(struct mt76x02_dev *dev, enum mt76_txq_id qid, 54 struct sk_buff *skb, int cmd, int seq) 55 { 56 struct mt76_queue *q = &dev->mt76.q_tx[qid]; 57 struct mt76_queue_buf buf; 58 dma_addr_t addr; 59 u32 tx_info; 60 61 tx_info = MT_MCU_MSG_TYPE_CMD | 62 FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) | 63 FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) | 64 FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) | 65 FIELD_PREP(MT_MCU_MSG_LEN, skb->len); 66 67 addr = dma_map_single(dev->mt76.dev, skb->data, skb->len, 68 DMA_TO_DEVICE); 69 if (dma_mapping_error(dev->mt76.dev, addr)) 70 return -ENOMEM; 71 72 buf.addr = addr; 73 buf.len = skb->len; 74 75 spin_lock_bh(&q->lock); 76 mt76_queue_add_buf(dev, q, &buf, 1, tx_info, skb, NULL); 77 mt76_queue_kick(dev, q); 78 spin_unlock_bh(&q->lock); 79 80 return 0; 81 } 82 83 int mt76x02_mcu_msg_send(struct mt76_dev *mdev, struct sk_buff *skb, 84 int cmd, bool wait_resp) 85 { 86 struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); 87 unsigned long expires = jiffies + HZ; 88 int ret; 89 u8 seq; 90 91 if (!skb) 92 return -EINVAL; 93 94 mutex_lock(&mdev->mmio.mcu.mutex); 95 96 seq = ++mdev->mmio.mcu.msg_seq & 0xf; 97 if (!seq) 98 seq = ++mdev->mmio.mcu.msg_seq & 0xf; 99 100 ret = mt76x02_tx_queue_mcu(dev, MT_TXQ_MCU, skb, cmd, seq); 101 if (ret) 102 goto out; 103 104 while (wait_resp) { 105 u32 *rxfce; 106 bool check_seq = false; 107 108 skb = mt76x02_mcu_get_response(dev, expires); 109 if (!skb) { 110 dev_err(mdev->dev, 111 "MCU message %d (seq %d) timed out\n", cmd, 112 seq); 113 ret = -ETIMEDOUT; 114 break; 115 } 116 117 rxfce = (u32 *) skb->cb; 118 119 if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, *rxfce)) 120 check_seq = true; 121 122 dev_kfree_skb(skb); 123 if (check_seq) 124 break; 125 } 126 127 out: 128 mutex_unlock(&mdev->mmio.mcu.mutex); 129 130 return ret; 131 } 132 EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_send); 133 134 int mt76x02_mcu_function_select(struct mt76x02_dev *dev, 135 enum mcu_function func, 136 u32 val, bool wait_resp) 137 { 138 struct sk_buff *skb; 139 struct { 140 __le32 id; 141 __le32 value; 142 } __packed __aligned(4) msg = { 143 .id = cpu_to_le32(func), 144 .value = cpu_to_le32(val), 145 }; 146 147 skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); 148 return mt76_mcu_send_msg(dev, skb, CMD_FUN_SET_OP, wait_resp); 149 } 150 EXPORT_SYMBOL_GPL(mt76x02_mcu_function_select); 151 152 int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on, 153 bool wait_resp) 154 { 155 struct sk_buff *skb; 156 struct { 157 __le32 mode; 158 __le32 level; 159 } __packed __aligned(4) msg = { 160 .mode = cpu_to_le32(on ? RADIO_ON : RADIO_OFF), 161 .level = cpu_to_le32(0), 162 }; 163 164 skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); 165 return mt76_mcu_send_msg(dev, skb, CMD_POWER_SAVING_OP, wait_resp); 166 } 167 EXPORT_SYMBOL_GPL(mt76x02_mcu_set_radio_state); 168 169 int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type, 170 u32 param, bool wait) 171 { 172 struct sk_buff *skb; 173 struct { 174 __le32 id; 175 __le32 value; 176 } __packed __aligned(4) msg = { 177 .id = cpu_to_le32(type), 178 .value = cpu_to_le32(param), 179 }; 180 int ret; 181 182 if (wait) 183 mt76_rmw(dev, MT_MCU_COM_REG0, BIT(31), 0); 184 185 skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); 186 ret = mt76_mcu_send_msg(dev, skb, CMD_CALIBRATION_OP, true); 187 if (ret) 188 return ret; 189 190 if (wait && 191 WARN_ON(!mt76_poll_msec(dev, MT_MCU_COM_REG0, 192 BIT(31), BIT(31), 100))) 193 return -ETIMEDOUT; 194 195 return 0; 196 } 197 EXPORT_SYMBOL_GPL(mt76x02_mcu_calibrate); 198 199 int mt76x02_mcu_cleanup(struct mt76x02_dev *dev) 200 { 201 struct sk_buff *skb; 202 203 mt76_wr(dev, MT_MCU_INT_LEVEL, 1); 204 usleep_range(20000, 30000); 205 206 while ((skb = skb_dequeue(&dev->mt76.mmio.mcu.res_q)) != NULL) 207 dev_kfree_skb(skb); 208 209 return 0; 210 } 211 EXPORT_SYMBOL_GPL(mt76x02_mcu_cleanup); 212 213 void mt76x02_set_ethtool_fwver(struct mt76x02_dev *dev, 214 const struct mt76x02_fw_header *h) 215 { 216 u16 bld = le16_to_cpu(h->build_ver); 217 u16 ver = le16_to_cpu(h->fw_ver); 218 219 snprintf(dev->mt76.hw->wiphy->fw_version, 220 sizeof(dev->mt76.hw->wiphy->fw_version), 221 "%d.%d.%02d-b%x", 222 (ver >> 12) & 0xf, (ver >> 8) & 0xf, ver & 0xf, bld); 223 } 224 EXPORT_SYMBOL_GPL(mt76x02_set_ethtool_fwver); 225