1 /* 2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 3 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <linux/kernel.h> 19 #include <linux/firmware.h> 20 #include <linux/delay.h> 21 22 #include "mt76x02_mcu.h" 23 24 static struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len) 25 { 26 struct sk_buff *skb; 27 28 skb = alloc_skb(len, GFP_KERNEL); 29 if (!skb) 30 return NULL; 31 memcpy(skb_put(skb, len), data, len); 32 33 return skb; 34 } 35 36 static struct sk_buff * 37 mt76x02_mcu_get_response(struct mt76x02_dev *dev, unsigned long expires) 38 { 39 unsigned long timeout; 40 41 if (!time_is_after_jiffies(expires)) 42 return NULL; 43 44 timeout = expires - jiffies; 45 wait_event_timeout(dev->mt76.mmio.mcu.wait, 46 !skb_queue_empty(&dev->mt76.mmio.mcu.res_q), 47 timeout); 48 return skb_dequeue(&dev->mt76.mmio.mcu.res_q); 49 } 50 51 static int 52 mt76x02_tx_queue_mcu(struct mt76x02_dev *dev, enum mt76_txq_id qid, 53 struct sk_buff *skb, int cmd, int seq) 54 { 55 struct mt76_queue *q = &dev->mt76.q_tx[qid]; 56 struct mt76_queue_buf buf; 57 dma_addr_t addr; 58 u32 tx_info; 59 60 tx_info = MT_MCU_MSG_TYPE_CMD | 61 FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) | 62 FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) | 63 FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) | 64 FIELD_PREP(MT_MCU_MSG_LEN, skb->len); 65 66 addr = dma_map_single(dev->mt76.dev, skb->data, skb->len, 67 DMA_TO_DEVICE); 68 if (dma_mapping_error(dev->mt76.dev, addr)) 69 return -ENOMEM; 70 71 buf.addr = addr; 72 buf.len = skb->len; 73 74 spin_lock_bh(&q->lock); 75 mt76_queue_add_buf(dev, q, &buf, 1, tx_info, skb, NULL); 76 mt76_queue_kick(dev, q); 77 spin_unlock_bh(&q->lock); 78 79 return 0; 80 } 81 82 int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data, 83 int len, bool wait_resp) 84 { 85 struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); 86 unsigned long expires = jiffies + HZ; 87 struct sk_buff *skb; 88 int ret; 89 u8 seq; 90 91 skb = mt76x02_mcu_msg_alloc(data, len); 92 if (!skb) 93 return -ENOMEM; 94 95 mutex_lock(&mdev->mmio.mcu.mutex); 96 97 seq = ++mdev->mmio.mcu.msg_seq & 0xf; 98 if (!seq) 99 seq = ++mdev->mmio.mcu.msg_seq & 0xf; 100 101 ret = mt76x02_tx_queue_mcu(dev, MT_TXQ_MCU, skb, cmd, seq); 102 if (ret) 103 goto out; 104 105 while (wait_resp) { 106 u32 *rxfce; 107 bool check_seq = false; 108 109 skb = mt76x02_mcu_get_response(dev, expires); 110 if (!skb) { 111 dev_err(mdev->dev, 112 "MCU message %d (seq %d) timed out\n", cmd, 113 seq); 114 ret = -ETIMEDOUT; 115 break; 116 } 117 118 rxfce = (u32 *) skb->cb; 119 120 if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, *rxfce)) 121 check_seq = true; 122 123 dev_kfree_skb(skb); 124 if (check_seq) 125 break; 126 } 127 128 out: 129 mutex_unlock(&mdev->mmio.mcu.mutex); 130 131 return ret; 132 } 133 EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_send); 134 135 int mt76x02_mcu_function_select(struct mt76x02_dev *dev, enum mcu_function func, 136 u32 val) 137 { 138 struct { 139 __le32 id; 140 __le32 value; 141 } __packed __aligned(4) msg = { 142 .id = cpu_to_le32(func), 143 .value = cpu_to_le32(val), 144 }; 145 bool wait = false; 146 147 if (func != Q_SELECT) 148 wait = true; 149 150 return mt76_mcu_send_msg(dev, CMD_FUN_SET_OP, &msg, sizeof(msg), wait); 151 } 152 EXPORT_SYMBOL_GPL(mt76x02_mcu_function_select); 153 154 int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on) 155 { 156 struct { 157 __le32 mode; 158 __le32 level; 159 } __packed __aligned(4) msg = { 160 .mode = cpu_to_le32(on ? RADIO_ON : RADIO_OFF), 161 .level = cpu_to_le32(0), 162 }; 163 164 return mt76_mcu_send_msg(dev, CMD_POWER_SAVING_OP, &msg, sizeof(msg), false); 165 } 166 EXPORT_SYMBOL_GPL(mt76x02_mcu_set_radio_state); 167 168 int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type, u32 param) 169 { 170 struct { 171 __le32 id; 172 __le32 value; 173 } __packed __aligned(4) msg = { 174 .id = cpu_to_le32(type), 175 .value = cpu_to_le32(param), 176 }; 177 bool is_mt76x2e = mt76_is_mmio(dev) && is_mt76x2(dev); 178 int ret; 179 180 if (is_mt76x2e) 181 mt76_rmw(dev, MT_MCU_COM_REG0, BIT(31), 0); 182 183 ret = mt76_mcu_send_msg(dev, CMD_CALIBRATION_OP, &msg, sizeof(msg), 184 true); 185 if (ret) 186 return ret; 187 188 if (is_mt76x2e && 189 WARN_ON(!mt76_poll_msec(dev, MT_MCU_COM_REG0, 190 BIT(31), BIT(31), 100))) 191 return -ETIMEDOUT; 192 193 return 0; 194 } 195 EXPORT_SYMBOL_GPL(mt76x02_mcu_calibrate); 196 197 int mt76x02_mcu_cleanup(struct mt76x02_dev *dev) 198 { 199 struct sk_buff *skb; 200 201 mt76_wr(dev, MT_MCU_INT_LEVEL, 1); 202 usleep_range(20000, 30000); 203 204 while ((skb = skb_dequeue(&dev->mt76.mmio.mcu.res_q)) != NULL) 205 dev_kfree_skb(skb); 206 207 return 0; 208 } 209 EXPORT_SYMBOL_GPL(mt76x02_mcu_cleanup); 210 211 void mt76x02_set_ethtool_fwver(struct mt76x02_dev *dev, 212 const struct mt76x02_fw_header *h) 213 { 214 u16 bld = le16_to_cpu(h->build_ver); 215 u16 ver = le16_to_cpu(h->fw_ver); 216 217 snprintf(dev->mt76.hw->wiphy->fw_version, 218 sizeof(dev->mt76.hw->wiphy->fw_version), 219 "%d.%d.%02d-b%x", 220 (ver >> 12) & 0xf, (ver >> 8) & 0xf, ver & 0xf, bld); 221 } 222 EXPORT_SYMBOL_GPL(mt76x02_set_ethtool_fwver); 223