1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Driver for NXP MCR20A 802.15.4 Wireless-PAN Networking controller
4 *
5 * Copyright (C) 2018 Xue Liu <liuxuenetmail@gmail.com>
6 */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/gpio/consumer.h>
10 #include <linux/spi/spi.h>
11 #include <linux/workqueue.h>
12 #include <linux/interrupt.h>
13 #include <linux/irq.h>
14 #include <linux/skbuff.h>
15 #include <linux/of_gpio.h>
16 #include <linux/regmap.h>
17 #include <linux/ieee802154.h>
18 #include <linux/debugfs.h>
19
20 #include <net/mac802154.h>
21 #include <net/cfg802154.h>
22
23 #include <linux/device.h>
24
25 #include "mcr20a.h"
26
27 #define SPI_COMMAND_BUFFER 3
28
29 #define REGISTER_READ BIT(7)
30 #define REGISTER_WRITE (0 << 7)
31 #define REGISTER_ACCESS (0 << 6)
32 #define PACKET_BUFF_BURST_ACCESS BIT(6)
33 #define PACKET_BUFF_BYTE_ACCESS BIT(5)
34
35 #define MCR20A_WRITE_REG(x) (x)
36 #define MCR20A_READ_REG(x) (REGISTER_READ | (x))
37 #define MCR20A_BURST_READ_PACKET_BUF (0xC0)
38 #define MCR20A_BURST_WRITE_PACKET_BUF (0x40)
39
40 #define MCR20A_CMD_REG 0x80
41 #define MCR20A_CMD_REG_MASK 0x3f
42 #define MCR20A_CMD_WRITE 0x40
43 #define MCR20A_CMD_FB 0x20
44
45 /* Number of Interrupt Request Status Register */
46 #define MCR20A_IRQSTS_NUM 2 /* only IRQ_STS1 and IRQ_STS2 */
47
48 /* MCR20A CCA Type */
49 enum {
50 MCR20A_CCA_ED, // energy detect - CCA bit not active,
51 // not to be used for T and CCCA sequences
52 MCR20A_CCA_MODE1, // energy detect - CCA bit ACTIVE
53 MCR20A_CCA_MODE2, // 802.15.4 compliant signal detect - CCA bit ACTIVE
54 MCR20A_CCA_MODE3
55 };
56
57 enum {
58 MCR20A_XCVSEQ_IDLE = 0x00,
59 MCR20A_XCVSEQ_RX = 0x01,
60 MCR20A_XCVSEQ_TX = 0x02,
61 MCR20A_XCVSEQ_CCA = 0x03,
62 MCR20A_XCVSEQ_TR = 0x04,
63 MCR20A_XCVSEQ_CCCA = 0x05,
64 };
65
66 /* IEEE-802.15.4 defined constants (2.4 GHz logical channels) */
67 #define MCR20A_MIN_CHANNEL (11)
68 #define MCR20A_MAX_CHANNEL (26)
69 #define MCR20A_CHANNEL_SPACING (5)
70
71 /* MCR20A CCA Threshold constans */
72 #define MCR20A_MIN_CCA_THRESHOLD (0x6EU)
73 #define MCR20A_MAX_CCA_THRESHOLD (0x00U)
74
75 /* version 0C */
76 #define MCR20A_OVERWRITE_VERSION (0x0C)
77
78 /* MCR20A PLL configurations */
79 static const u8 PLL_INT[16] = {
80 /* 2405 */ 0x0B, /* 2410 */ 0x0B, /* 2415 */ 0x0B,
81 /* 2420 */ 0x0B, /* 2425 */ 0x0B, /* 2430 */ 0x0B,
82 /* 2435 */ 0x0C, /* 2440 */ 0x0C, /* 2445 */ 0x0C,
83 /* 2450 */ 0x0C, /* 2455 */ 0x0C, /* 2460 */ 0x0C,
84 /* 2465 */ 0x0D, /* 2470 */ 0x0D, /* 2475 */ 0x0D,
85 /* 2480 */ 0x0D
86 };
87
88 static const u8 PLL_FRAC[16] = {
89 /* 2405 */ 0x28, /* 2410 */ 0x50, /* 2415 */ 0x78,
90 /* 2420 */ 0xA0, /* 2425 */ 0xC8, /* 2430 */ 0xF0,
91 /* 2435 */ 0x18, /* 2440 */ 0x40, /* 2445 */ 0x68,
92 /* 2450 */ 0x90, /* 2455 */ 0xB8, /* 2460 */ 0xE0,
93 /* 2465 */ 0x08, /* 2470 */ 0x30, /* 2475 */ 0x58,
94 /* 2480 */ 0x80
95 };
96
97 static const struct reg_sequence mar20a_iar_overwrites[] = {
98 { IAR_MISC_PAD_CTRL, 0x02 },
99 { IAR_VCO_CTRL1, 0xB3 },
100 { IAR_VCO_CTRL2, 0x07 },
101 { IAR_PA_TUNING, 0x71 },
102 { IAR_CHF_IBUF, 0x2F },
103 { IAR_CHF_QBUF, 0x2F },
104 { IAR_CHF_IRIN, 0x24 },
105 { IAR_CHF_QRIN, 0x24 },
106 { IAR_CHF_IL, 0x24 },
107 { IAR_CHF_QL, 0x24 },
108 { IAR_CHF_CC1, 0x32 },
109 { IAR_CHF_CCL, 0x1D },
110 { IAR_CHF_CC2, 0x2D },
111 { IAR_CHF_IROUT, 0x24 },
112 { IAR_CHF_QROUT, 0x24 },
113 { IAR_PA_CAL, 0x28 },
114 { IAR_AGC_THR1, 0x55 },
115 { IAR_AGC_THR2, 0x2D },
116 { IAR_ATT_RSSI1, 0x5F },
117 { IAR_ATT_RSSI2, 0x8F },
118 { IAR_RSSI_OFFSET, 0x61 },
119 { IAR_CHF_PMA_GAIN, 0x03 },
120 { IAR_CCA1_THRESH, 0x50 },
121 { IAR_CORR_NVAL, 0x13 },
122 { IAR_ACKDELAY, 0x3D },
123 };
124
125 #define MCR20A_VALID_CHANNELS (0x07FFF800)
126 #define MCR20A_MAX_BUF (127)
127
128 #define printdev(X) (&X->spi->dev)
129
130 /* regmap information for Direct Access Register (DAR) access */
131 #define MCR20A_DAR_WRITE 0x01
132 #define MCR20A_DAR_READ 0x00
133 #define MCR20A_DAR_NUMREGS 0x3F
134
135 /* regmap information for Indirect Access Register (IAR) access */
136 #define MCR20A_IAR_ACCESS 0x80
137 #define MCR20A_IAR_NUMREGS 0xBEFF
138
139 /* Read/Write SPI Commands for DAR and IAR registers. */
140 #define MCR20A_READSHORT(reg) ((reg) << 1)
141 #define MCR20A_WRITESHORT(reg) ((reg) << 1 | 1)
142 #define MCR20A_READLONG(reg) (1 << 15 | (reg) << 5)
143 #define MCR20A_WRITELONG(reg) (1 << 15 | (reg) << 5 | 1 << 4)
144
145 /* Type definitions for link configuration of instantiable layers */
146 #define MCR20A_PHY_INDIRECT_QUEUE_SIZE (12)
147
148 static bool
mcr20a_dar_writeable(struct device * dev,unsigned int reg)149 mcr20a_dar_writeable(struct device *dev, unsigned int reg)
150 {
151 switch (reg) {
152 case DAR_IRQ_STS1:
153 case DAR_IRQ_STS2:
154 case DAR_IRQ_STS3:
155 case DAR_PHY_CTRL1:
156 case DAR_PHY_CTRL2:
157 case DAR_PHY_CTRL3:
158 case DAR_PHY_CTRL4:
159 case DAR_SRC_CTRL:
160 case DAR_SRC_ADDRS_SUM_LSB:
161 case DAR_SRC_ADDRS_SUM_MSB:
162 case DAR_T3CMP_LSB:
163 case DAR_T3CMP_MSB:
164 case DAR_T3CMP_USB:
165 case DAR_T2PRIMECMP_LSB:
166 case DAR_T2PRIMECMP_MSB:
167 case DAR_T1CMP_LSB:
168 case DAR_T1CMP_MSB:
169 case DAR_T1CMP_USB:
170 case DAR_T2CMP_LSB:
171 case DAR_T2CMP_MSB:
172 case DAR_T2CMP_USB:
173 case DAR_T4CMP_LSB:
174 case DAR_T4CMP_MSB:
175 case DAR_T4CMP_USB:
176 case DAR_PLL_INT0:
177 case DAR_PLL_FRAC0_LSB:
178 case DAR_PLL_FRAC0_MSB:
179 case DAR_PA_PWR:
180 /* no DAR_ACM */
181 case DAR_OVERWRITE_VER:
182 case DAR_CLK_OUT_CTRL:
183 case DAR_PWR_MODES:
184 return true;
185 default:
186 return false;
187 }
188 }
189
190 static bool
mcr20a_dar_readable(struct device * dev,unsigned int reg)191 mcr20a_dar_readable(struct device *dev, unsigned int reg)
192 {
193 bool rc;
194
195 /* all writeable are also readable */
196 rc = mcr20a_dar_writeable(dev, reg);
197 if (rc)
198 return rc;
199
200 /* readonly regs */
201 switch (reg) {
202 case DAR_RX_FRM_LEN:
203 case DAR_CCA1_ED_FNL:
204 case DAR_EVENT_TMR_LSB:
205 case DAR_EVENT_TMR_MSB:
206 case DAR_EVENT_TMR_USB:
207 case DAR_TIMESTAMP_LSB:
208 case DAR_TIMESTAMP_MSB:
209 case DAR_TIMESTAMP_USB:
210 case DAR_SEQ_STATE:
211 case DAR_LQI_VALUE:
212 case DAR_RSSI_CCA_CONT:
213 return true;
214 default:
215 return false;
216 }
217 }
218
219 static bool
mcr20a_dar_volatile(struct device * dev,unsigned int reg)220 mcr20a_dar_volatile(struct device *dev, unsigned int reg)
221 {
222 /* can be changed during runtime */
223 switch (reg) {
224 case DAR_IRQ_STS1:
225 case DAR_IRQ_STS2:
226 case DAR_IRQ_STS3:
227 /* use them in spi_async and regmap so it's volatile */
228 return true;
229 default:
230 return false;
231 }
232 }
233
234 static bool
mcr20a_dar_precious(struct device * dev,unsigned int reg)235 mcr20a_dar_precious(struct device *dev, unsigned int reg)
236 {
237 /* don't clear irq line on read */
238 switch (reg) {
239 case DAR_IRQ_STS1:
240 case DAR_IRQ_STS2:
241 case DAR_IRQ_STS3:
242 return true;
243 default:
244 return false;
245 }
246 }
247
248 static const struct regmap_config mcr20a_dar_regmap = {
249 .name = "mcr20a_dar",
250 .reg_bits = 8,
251 .val_bits = 8,
252 .write_flag_mask = REGISTER_ACCESS | REGISTER_WRITE,
253 .read_flag_mask = REGISTER_ACCESS | REGISTER_READ,
254 .cache_type = REGCACHE_RBTREE,
255 .writeable_reg = mcr20a_dar_writeable,
256 .readable_reg = mcr20a_dar_readable,
257 .volatile_reg = mcr20a_dar_volatile,
258 .precious_reg = mcr20a_dar_precious,
259 .fast_io = true,
260 .can_multi_write = true,
261 };
262
263 static bool
mcr20a_iar_writeable(struct device * dev,unsigned int reg)264 mcr20a_iar_writeable(struct device *dev, unsigned int reg)
265 {
266 switch (reg) {
267 case IAR_XTAL_TRIM:
268 case IAR_PMC_LP_TRIM:
269 case IAR_MACPANID0_LSB:
270 case IAR_MACPANID0_MSB:
271 case IAR_MACSHORTADDRS0_LSB:
272 case IAR_MACSHORTADDRS0_MSB:
273 case IAR_MACLONGADDRS0_0:
274 case IAR_MACLONGADDRS0_8:
275 case IAR_MACLONGADDRS0_16:
276 case IAR_MACLONGADDRS0_24:
277 case IAR_MACLONGADDRS0_32:
278 case IAR_MACLONGADDRS0_40:
279 case IAR_MACLONGADDRS0_48:
280 case IAR_MACLONGADDRS0_56:
281 case IAR_RX_FRAME_FILTER:
282 case IAR_PLL_INT1:
283 case IAR_PLL_FRAC1_LSB:
284 case IAR_PLL_FRAC1_MSB:
285 case IAR_MACPANID1_LSB:
286 case IAR_MACPANID1_MSB:
287 case IAR_MACSHORTADDRS1_LSB:
288 case IAR_MACSHORTADDRS1_MSB:
289 case IAR_MACLONGADDRS1_0:
290 case IAR_MACLONGADDRS1_8:
291 case IAR_MACLONGADDRS1_16:
292 case IAR_MACLONGADDRS1_24:
293 case IAR_MACLONGADDRS1_32:
294 case IAR_MACLONGADDRS1_40:
295 case IAR_MACLONGADDRS1_48:
296 case IAR_MACLONGADDRS1_56:
297 case IAR_DUAL_PAN_CTRL:
298 case IAR_DUAL_PAN_DWELL:
299 case IAR_CCA1_THRESH:
300 case IAR_CCA1_ED_OFFSET_COMP:
301 case IAR_LQI_OFFSET_COMP:
302 case IAR_CCA_CTRL:
303 case IAR_CCA2_CORR_PEAKS:
304 case IAR_CCA2_CORR_THRESH:
305 case IAR_TMR_PRESCALE:
306 case IAR_ANT_PAD_CTRL:
307 case IAR_MISC_PAD_CTRL:
308 case IAR_BSM_CTRL:
309 case IAR_RNG:
310 case IAR_RX_WTR_MARK:
311 case IAR_SOFT_RESET:
312 case IAR_TXDELAY:
313 case IAR_ACKDELAY:
314 case IAR_CORR_NVAL:
315 case IAR_ANT_AGC_CTRL:
316 case IAR_AGC_THR1:
317 case IAR_AGC_THR2:
318 case IAR_PA_CAL:
319 case IAR_ATT_RSSI1:
320 case IAR_ATT_RSSI2:
321 case IAR_RSSI_OFFSET:
322 case IAR_XTAL_CTRL:
323 case IAR_CHF_PMA_GAIN:
324 case IAR_CHF_IBUF:
325 case IAR_CHF_QBUF:
326 case IAR_CHF_IRIN:
327 case IAR_CHF_QRIN:
328 case IAR_CHF_IL:
329 case IAR_CHF_QL:
330 case IAR_CHF_CC1:
331 case IAR_CHF_CCL:
332 case IAR_CHF_CC2:
333 case IAR_CHF_IROUT:
334 case IAR_CHF_QROUT:
335 case IAR_PA_TUNING:
336 case IAR_VCO_CTRL1:
337 case IAR_VCO_CTRL2:
338 return true;
339 default:
340 return false;
341 }
342 }
343
344 static bool
mcr20a_iar_readable(struct device * dev,unsigned int reg)345 mcr20a_iar_readable(struct device *dev, unsigned int reg)
346 {
347 bool rc;
348
349 /* all writeable are also readable */
350 rc = mcr20a_iar_writeable(dev, reg);
351 if (rc)
352 return rc;
353
354 /* readonly regs */
355 switch (reg) {
356 case IAR_PART_ID:
357 case IAR_DUAL_PAN_STS:
358 case IAR_RX_BYTE_COUNT:
359 case IAR_FILTERFAIL_CODE1:
360 case IAR_FILTERFAIL_CODE2:
361 case IAR_RSSI:
362 return true;
363 default:
364 return false;
365 }
366 }
367
368 static bool
mcr20a_iar_volatile(struct device * dev,unsigned int reg)369 mcr20a_iar_volatile(struct device *dev, unsigned int reg)
370 {
371 /* can be changed during runtime */
372 switch (reg) {
373 case IAR_DUAL_PAN_STS:
374 case IAR_RX_BYTE_COUNT:
375 case IAR_FILTERFAIL_CODE1:
376 case IAR_FILTERFAIL_CODE2:
377 case IAR_RSSI:
378 return true;
379 default:
380 return false;
381 }
382 }
383
384 static const struct regmap_config mcr20a_iar_regmap = {
385 .name = "mcr20a_iar",
386 .reg_bits = 16,
387 .val_bits = 8,
388 .write_flag_mask = REGISTER_ACCESS | REGISTER_WRITE | IAR_INDEX,
389 .read_flag_mask = REGISTER_ACCESS | REGISTER_READ | IAR_INDEX,
390 .cache_type = REGCACHE_RBTREE,
391 .writeable_reg = mcr20a_iar_writeable,
392 .readable_reg = mcr20a_iar_readable,
393 .volatile_reg = mcr20a_iar_volatile,
394 .fast_io = true,
395 };
396
397 struct mcr20a_local {
398 struct spi_device *spi;
399
400 struct ieee802154_hw *hw;
401 struct regmap *regmap_dar;
402 struct regmap *regmap_iar;
403
404 u8 *buf;
405
406 bool is_tx;
407
408 /* for writing tx buffer */
409 struct spi_message tx_buf_msg;
410 u8 tx_header[1];
411 /* burst buffer write command */
412 struct spi_transfer tx_xfer_header;
413 u8 tx_len[1];
414 /* len of tx packet */
415 struct spi_transfer tx_xfer_len;
416 /* data of tx packet */
417 struct spi_transfer tx_xfer_buf;
418 struct sk_buff *tx_skb;
419
420 /* for read length rxfifo */
421 struct spi_message reg_msg;
422 u8 reg_cmd[1];
423 u8 reg_data[MCR20A_IRQSTS_NUM];
424 struct spi_transfer reg_xfer_cmd;
425 struct spi_transfer reg_xfer_data;
426
427 /* receive handling */
428 struct spi_message rx_buf_msg;
429 u8 rx_header[1];
430 struct spi_transfer rx_xfer_header;
431 u8 rx_lqi[1];
432 struct spi_transfer rx_xfer_lqi;
433 u8 rx_buf[MCR20A_MAX_BUF];
434 struct spi_transfer rx_xfer_buf;
435
436 /* isr handling for reading intstat */
437 struct spi_message irq_msg;
438 u8 irq_header[1];
439 u8 irq_data[MCR20A_IRQSTS_NUM];
440 struct spi_transfer irq_xfer_data;
441 struct spi_transfer irq_xfer_header;
442 };
443
444 static void
mcr20a_write_tx_buf_complete(void * context)445 mcr20a_write_tx_buf_complete(void *context)
446 {
447 struct mcr20a_local *lp = context;
448 int ret;
449
450 dev_dbg(printdev(lp), "%s\n", __func__);
451
452 lp->reg_msg.complete = NULL;
453 lp->reg_cmd[0] = MCR20A_WRITE_REG(DAR_PHY_CTRL1);
454 lp->reg_data[0] = MCR20A_XCVSEQ_TX;
455 lp->reg_xfer_data.len = 1;
456
457 ret = spi_async(lp->spi, &lp->reg_msg);
458 if (ret)
459 dev_err(printdev(lp), "failed to set SEQ TX\n");
460 }
461
462 static int
mcr20a_xmit(struct ieee802154_hw * hw,struct sk_buff * skb)463 mcr20a_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
464 {
465 struct mcr20a_local *lp = hw->priv;
466
467 dev_dbg(printdev(lp), "%s\n", __func__);
468
469 lp->tx_skb = skb;
470
471 print_hex_dump_debug("mcr20a tx: ", DUMP_PREFIX_OFFSET, 16, 1,
472 skb->data, skb->len, 0);
473
474 lp->is_tx = 1;
475
476 lp->reg_msg.complete = NULL;
477 lp->reg_cmd[0] = MCR20A_WRITE_REG(DAR_PHY_CTRL1);
478 lp->reg_data[0] = MCR20A_XCVSEQ_IDLE;
479 lp->reg_xfer_data.len = 1;
480
481 return spi_async(lp->spi, &lp->reg_msg);
482 }
483
484 static int
mcr20a_ed(struct ieee802154_hw * hw,u8 * level)485 mcr20a_ed(struct ieee802154_hw *hw, u8 *level)
486 {
487 WARN_ON(!level);
488 *level = 0xbe;
489 return 0;
490 }
491
492 static int
mcr20a_set_channel(struct ieee802154_hw * hw,u8 page,u8 channel)493 mcr20a_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
494 {
495 struct mcr20a_local *lp = hw->priv;
496 int ret;
497
498 dev_dbg(printdev(lp), "%s\n", __func__);
499
500 /* freqency = ((PLL_INT+64) + (PLL_FRAC/65536)) * 32 MHz */
501 ret = regmap_write(lp->regmap_dar, DAR_PLL_INT0, PLL_INT[channel - 11]);
502 if (ret)
503 return ret;
504 ret = regmap_write(lp->regmap_dar, DAR_PLL_FRAC0_LSB, 0x00);
505 if (ret)
506 return ret;
507 ret = regmap_write(lp->regmap_dar, DAR_PLL_FRAC0_MSB,
508 PLL_FRAC[channel - 11]);
509 if (ret)
510 return ret;
511
512 return 0;
513 }
514
515 static int
mcr20a_start(struct ieee802154_hw * hw)516 mcr20a_start(struct ieee802154_hw *hw)
517 {
518 struct mcr20a_local *lp = hw->priv;
519 int ret;
520
521 dev_dbg(printdev(lp), "%s\n", __func__);
522
523 /* No slotted operation */
524 dev_dbg(printdev(lp), "no slotted operation\n");
525 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
526 DAR_PHY_CTRL1_SLOTTED, 0x0);
527 if (ret < 0)
528 return ret;
529
530 /* enable irq */
531 enable_irq(lp->spi->irq);
532
533 /* Unmask SEQ interrupt */
534 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL2,
535 DAR_PHY_CTRL2_SEQMSK, 0x0);
536 if (ret < 0)
537 return ret;
538
539 /* Start the RX sequence */
540 dev_dbg(printdev(lp), "start the RX sequence\n");
541 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
542 DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_RX);
543 if (ret < 0)
544 return ret;
545
546 return 0;
547 }
548
549 static void
mcr20a_stop(struct ieee802154_hw * hw)550 mcr20a_stop(struct ieee802154_hw *hw)
551 {
552 struct mcr20a_local *lp = hw->priv;
553
554 dev_dbg(printdev(lp), "%s\n", __func__);
555
556 /* stop all running sequence */
557 regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
558 DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_IDLE);
559
560 /* disable irq */
561 disable_irq(lp->spi->irq);
562 }
563
564 static int
mcr20a_set_hw_addr_filt(struct ieee802154_hw * hw,struct ieee802154_hw_addr_filt * filt,unsigned long changed)565 mcr20a_set_hw_addr_filt(struct ieee802154_hw *hw,
566 struct ieee802154_hw_addr_filt *filt,
567 unsigned long changed)
568 {
569 struct mcr20a_local *lp = hw->priv;
570
571 dev_dbg(printdev(lp), "%s\n", __func__);
572
573 if (changed & IEEE802154_AFILT_SADDR_CHANGED) {
574 u16 addr = le16_to_cpu(filt->short_addr);
575
576 regmap_write(lp->regmap_iar, IAR_MACSHORTADDRS0_LSB, addr);
577 regmap_write(lp->regmap_iar, IAR_MACSHORTADDRS0_MSB, addr >> 8);
578 }
579
580 if (changed & IEEE802154_AFILT_PANID_CHANGED) {
581 u16 pan = le16_to_cpu(filt->pan_id);
582
583 regmap_write(lp->regmap_iar, IAR_MACPANID0_LSB, pan);
584 regmap_write(lp->regmap_iar, IAR_MACPANID0_MSB, pan >> 8);
585 }
586
587 if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) {
588 u8 addr[8], i;
589
590 memcpy(addr, &filt->ieee_addr, 8);
591 for (i = 0; i < 8; i++)
592 regmap_write(lp->regmap_iar,
593 IAR_MACLONGADDRS0_0 + i, addr[i]);
594 }
595
596 if (changed & IEEE802154_AFILT_PANC_CHANGED) {
597 if (filt->pan_coord) {
598 regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4,
599 DAR_PHY_CTRL4_PANCORDNTR0, 0x10);
600 } else {
601 regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4,
602 DAR_PHY_CTRL4_PANCORDNTR0, 0x00);
603 }
604 }
605
606 return 0;
607 }
608
609 /* -30 dBm to 10 dBm */
610 #define MCR20A_MAX_TX_POWERS 0x14
611 static const s32 mcr20a_powers[MCR20A_MAX_TX_POWERS + 1] = {
612 -3000, -2800, -2600, -2400, -2200, -2000, -1800, -1600, -1400,
613 -1200, -1000, -800, -600, -400, -200, 0, 200, 400, 600, 800, 1000
614 };
615
616 static int
mcr20a_set_txpower(struct ieee802154_hw * hw,s32 mbm)617 mcr20a_set_txpower(struct ieee802154_hw *hw, s32 mbm)
618 {
619 struct mcr20a_local *lp = hw->priv;
620 u32 i;
621
622 dev_dbg(printdev(lp), "%s(%d)\n", __func__, mbm);
623
624 for (i = 0; i < lp->hw->phy->supported.tx_powers_size; i++) {
625 if (lp->hw->phy->supported.tx_powers[i] == mbm)
626 return regmap_write(lp->regmap_dar, DAR_PA_PWR,
627 ((i + 8) & 0x1F));
628 }
629
630 return -EINVAL;
631 }
632
633 #define MCR20A_MAX_ED_LEVELS MCR20A_MIN_CCA_THRESHOLD
634 static s32 mcr20a_ed_levels[MCR20A_MAX_ED_LEVELS + 1];
635
636 static int
mcr20a_set_cca_mode(struct ieee802154_hw * hw,const struct wpan_phy_cca * cca)637 mcr20a_set_cca_mode(struct ieee802154_hw *hw,
638 const struct wpan_phy_cca *cca)
639 {
640 struct mcr20a_local *lp = hw->priv;
641 unsigned int cca_mode = 0xff;
642 bool cca_mode_and = false;
643 int ret;
644
645 dev_dbg(printdev(lp), "%s\n", __func__);
646
647 /* mapping 802.15.4 to driver spec */
648 switch (cca->mode) {
649 case NL802154_CCA_ENERGY:
650 cca_mode = MCR20A_CCA_MODE1;
651 break;
652 case NL802154_CCA_CARRIER:
653 cca_mode = MCR20A_CCA_MODE2;
654 break;
655 case NL802154_CCA_ENERGY_CARRIER:
656 switch (cca->opt) {
657 case NL802154_CCA_OPT_ENERGY_CARRIER_AND:
658 cca_mode = MCR20A_CCA_MODE3;
659 cca_mode_and = true;
660 break;
661 case NL802154_CCA_OPT_ENERGY_CARRIER_OR:
662 cca_mode = MCR20A_CCA_MODE3;
663 cca_mode_and = false;
664 break;
665 default:
666 return -EINVAL;
667 }
668 break;
669 default:
670 return -EINVAL;
671 }
672 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4,
673 DAR_PHY_CTRL4_CCATYPE_MASK,
674 cca_mode << DAR_PHY_CTRL4_CCATYPE_SHIFT);
675 if (ret < 0)
676 return ret;
677
678 if (cca_mode == MCR20A_CCA_MODE3) {
679 if (cca_mode_and) {
680 ret = regmap_update_bits(lp->regmap_iar, IAR_CCA_CTRL,
681 IAR_CCA_CTRL_CCA3_AND_NOT_OR,
682 0x08);
683 } else {
684 ret = regmap_update_bits(lp->regmap_iar,
685 IAR_CCA_CTRL,
686 IAR_CCA_CTRL_CCA3_AND_NOT_OR,
687 0x00);
688 }
689 if (ret < 0)
690 return ret;
691 }
692
693 return ret;
694 }
695
696 static int
mcr20a_set_cca_ed_level(struct ieee802154_hw * hw,s32 mbm)697 mcr20a_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm)
698 {
699 struct mcr20a_local *lp = hw->priv;
700 u32 i;
701
702 dev_dbg(printdev(lp), "%s\n", __func__);
703
704 for (i = 0; i < hw->phy->supported.cca_ed_levels_size; i++) {
705 if (hw->phy->supported.cca_ed_levels[i] == mbm)
706 return regmap_write(lp->regmap_iar, IAR_CCA1_THRESH, i);
707 }
708
709 return 0;
710 }
711
712 static int
mcr20a_set_promiscuous_mode(struct ieee802154_hw * hw,const bool on)713 mcr20a_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on)
714 {
715 struct mcr20a_local *lp = hw->priv;
716 int ret;
717 u8 rx_frame_filter_reg = 0x0;
718
719 dev_dbg(printdev(lp), "%s(%d)\n", __func__, on);
720
721 if (on) {
722 /* All frame types accepted*/
723 rx_frame_filter_reg &= ~(IAR_RX_FRAME_FLT_FRM_VER);
724 rx_frame_filter_reg |= (IAR_RX_FRAME_FLT_ACK_FT |
725 IAR_RX_FRAME_FLT_NS_FT);
726
727 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4,
728 DAR_PHY_CTRL4_PROMISCUOUS,
729 DAR_PHY_CTRL4_PROMISCUOUS);
730 if (ret < 0)
731 return ret;
732
733 ret = regmap_write(lp->regmap_iar, IAR_RX_FRAME_FILTER,
734 rx_frame_filter_reg);
735 if (ret < 0)
736 return ret;
737 } else {
738 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4,
739 DAR_PHY_CTRL4_PROMISCUOUS, 0x0);
740 if (ret < 0)
741 return ret;
742
743 ret = regmap_write(lp->regmap_iar, IAR_RX_FRAME_FILTER,
744 IAR_RX_FRAME_FLT_FRM_VER |
745 IAR_RX_FRAME_FLT_BEACON_FT |
746 IAR_RX_FRAME_FLT_DATA_FT |
747 IAR_RX_FRAME_FLT_CMD_FT);
748 if (ret < 0)
749 return ret;
750 }
751
752 return 0;
753 }
754
755 static const struct ieee802154_ops mcr20a_hw_ops = {
756 .owner = THIS_MODULE,
757 .xmit_async = mcr20a_xmit,
758 .ed = mcr20a_ed,
759 .set_channel = mcr20a_set_channel,
760 .start = mcr20a_start,
761 .stop = mcr20a_stop,
762 .set_hw_addr_filt = mcr20a_set_hw_addr_filt,
763 .set_txpower = mcr20a_set_txpower,
764 .set_cca_mode = mcr20a_set_cca_mode,
765 .set_cca_ed_level = mcr20a_set_cca_ed_level,
766 .set_promiscuous_mode = mcr20a_set_promiscuous_mode,
767 };
768
769 static int
mcr20a_request_rx(struct mcr20a_local * lp)770 mcr20a_request_rx(struct mcr20a_local *lp)
771 {
772 dev_dbg(printdev(lp), "%s\n", __func__);
773
774 /* Start the RX sequence */
775 regmap_update_bits_async(lp->regmap_dar, DAR_PHY_CTRL1,
776 DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_RX);
777
778 return 0;
779 }
780
781 static void
mcr20a_handle_rx_read_buf_complete(void * context)782 mcr20a_handle_rx_read_buf_complete(void *context)
783 {
784 struct mcr20a_local *lp = context;
785 u8 len = lp->reg_data[0] & DAR_RX_FRAME_LENGTH_MASK;
786 struct sk_buff *skb;
787
788 dev_dbg(printdev(lp), "%s\n", __func__);
789
790 dev_dbg(printdev(lp), "RX is done\n");
791
792 if (!ieee802154_is_valid_psdu_len(len)) {
793 dev_vdbg(&lp->spi->dev, "corrupted frame received\n");
794 len = IEEE802154_MTU;
795 }
796
797 len = len - 2; /* get rid of frame check field */
798
799 skb = dev_alloc_skb(len);
800 if (!skb)
801 return;
802
803 __skb_put_data(skb, lp->rx_buf, len);
804 ieee802154_rx_irqsafe(lp->hw, skb, lp->rx_lqi[0]);
805
806 print_hex_dump_debug("mcr20a rx: ", DUMP_PREFIX_OFFSET, 16, 1,
807 lp->rx_buf, len, 0);
808 pr_debug("mcr20a rx: lqi: %02hhx\n", lp->rx_lqi[0]);
809
810 /* start RX sequence */
811 mcr20a_request_rx(lp);
812 }
813
814 static void
mcr20a_handle_rx_read_len_complete(void * context)815 mcr20a_handle_rx_read_len_complete(void *context)
816 {
817 struct mcr20a_local *lp = context;
818 u8 len;
819 int ret;
820
821 dev_dbg(printdev(lp), "%s\n", __func__);
822
823 /* get the length of received frame */
824 len = lp->reg_data[0] & DAR_RX_FRAME_LENGTH_MASK;
825 dev_dbg(printdev(lp), "frame len : %d\n", len);
826
827 /* prepare to read the rx buf */
828 lp->rx_buf_msg.complete = mcr20a_handle_rx_read_buf_complete;
829 lp->rx_header[0] = MCR20A_BURST_READ_PACKET_BUF;
830 lp->rx_xfer_buf.len = len;
831
832 ret = spi_async(lp->spi, &lp->rx_buf_msg);
833 if (ret)
834 dev_err(printdev(lp), "failed to read rx buffer length\n");
835 }
836
837 static int
mcr20a_handle_rx(struct mcr20a_local * lp)838 mcr20a_handle_rx(struct mcr20a_local *lp)
839 {
840 dev_dbg(printdev(lp), "%s\n", __func__);
841 lp->reg_msg.complete = mcr20a_handle_rx_read_len_complete;
842 lp->reg_cmd[0] = MCR20A_READ_REG(DAR_RX_FRM_LEN);
843 lp->reg_xfer_data.len = 1;
844
845 return spi_async(lp->spi, &lp->reg_msg);
846 }
847
848 static int
mcr20a_handle_tx_complete(struct mcr20a_local * lp)849 mcr20a_handle_tx_complete(struct mcr20a_local *lp)
850 {
851 dev_dbg(printdev(lp), "%s\n", __func__);
852
853 ieee802154_xmit_complete(lp->hw, lp->tx_skb, false);
854
855 return mcr20a_request_rx(lp);
856 }
857
858 static int
mcr20a_handle_tx(struct mcr20a_local * lp)859 mcr20a_handle_tx(struct mcr20a_local *lp)
860 {
861 int ret;
862
863 dev_dbg(printdev(lp), "%s\n", __func__);
864
865 /* write tx buffer */
866 lp->tx_header[0] = MCR20A_BURST_WRITE_PACKET_BUF;
867 /* add 2 bytes of FCS */
868 lp->tx_len[0] = lp->tx_skb->len + 2;
869 lp->tx_xfer_buf.tx_buf = lp->tx_skb->data;
870 /* add 1 byte psduLength */
871 lp->tx_xfer_buf.len = lp->tx_skb->len + 1;
872
873 ret = spi_async(lp->spi, &lp->tx_buf_msg);
874 if (ret) {
875 dev_err(printdev(lp), "SPI write Failed for TX buf\n");
876 return ret;
877 }
878
879 return 0;
880 }
881
882 static void
mcr20a_irq_clean_complete(void * context)883 mcr20a_irq_clean_complete(void *context)
884 {
885 struct mcr20a_local *lp = context;
886 u8 seq_state = lp->irq_data[DAR_IRQ_STS1] & DAR_PHY_CTRL1_XCVSEQ_MASK;
887
888 dev_dbg(printdev(lp), "%s\n", __func__);
889
890 enable_irq(lp->spi->irq);
891
892 dev_dbg(printdev(lp), "IRQ STA1 (%02x) STA2 (%02x)\n",
893 lp->irq_data[DAR_IRQ_STS1], lp->irq_data[DAR_IRQ_STS2]);
894
895 switch (seq_state) {
896 /* TX IRQ, RX IRQ and SEQ IRQ */
897 case (DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ):
898 if (lp->is_tx) {
899 lp->is_tx = 0;
900 dev_dbg(printdev(lp), "TX is done. No ACK\n");
901 mcr20a_handle_tx_complete(lp);
902 }
903 break;
904 case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_SEQIRQ):
905 /* rx is starting */
906 dev_dbg(printdev(lp), "RX is starting\n");
907 mcr20a_handle_rx(lp);
908 break;
909 case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ):
910 if (lp->is_tx) {
911 /* tx is done */
912 lp->is_tx = 0;
913 dev_dbg(printdev(lp), "TX is done. Get ACK\n");
914 mcr20a_handle_tx_complete(lp);
915 } else {
916 /* rx is starting */
917 dev_dbg(printdev(lp), "RX is starting\n");
918 mcr20a_handle_rx(lp);
919 }
920 break;
921 case (DAR_IRQSTS1_SEQIRQ):
922 if (lp->is_tx) {
923 dev_dbg(printdev(lp), "TX is starting\n");
924 mcr20a_handle_tx(lp);
925 } else {
926 dev_dbg(printdev(lp), "MCR20A is stop\n");
927 }
928 break;
929 }
930 }
931
mcr20a_irq_status_complete(void * context)932 static void mcr20a_irq_status_complete(void *context)
933 {
934 int ret;
935 struct mcr20a_local *lp = context;
936
937 dev_dbg(printdev(lp), "%s\n", __func__);
938 regmap_update_bits_async(lp->regmap_dar, DAR_PHY_CTRL1,
939 DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_IDLE);
940
941 lp->reg_msg.complete = mcr20a_irq_clean_complete;
942 lp->reg_cmd[0] = MCR20A_WRITE_REG(DAR_IRQ_STS1);
943 memcpy(lp->reg_data, lp->irq_data, MCR20A_IRQSTS_NUM);
944 lp->reg_xfer_data.len = MCR20A_IRQSTS_NUM;
945
946 ret = spi_async(lp->spi, &lp->reg_msg);
947
948 if (ret)
949 dev_err(printdev(lp), "failed to clean irq status\n");
950 }
951
mcr20a_irq_isr(int irq,void * data)952 static irqreturn_t mcr20a_irq_isr(int irq, void *data)
953 {
954 struct mcr20a_local *lp = data;
955 int ret;
956
957 disable_irq_nosync(irq);
958
959 lp->irq_header[0] = MCR20A_READ_REG(DAR_IRQ_STS1);
960 /* read IRQSTSx */
961 ret = spi_async(lp->spi, &lp->irq_msg);
962 if (ret) {
963 enable_irq(irq);
964 return IRQ_NONE;
965 }
966
967 return IRQ_HANDLED;
968 }
969
mcr20a_hw_setup(struct mcr20a_local * lp)970 static void mcr20a_hw_setup(struct mcr20a_local *lp)
971 {
972 u8 i;
973 struct ieee802154_hw *hw = lp->hw;
974 struct wpan_phy *phy = lp->hw->phy;
975
976 dev_dbg(printdev(lp), "%s\n", __func__);
977
978 hw->flags = IEEE802154_HW_TX_OMIT_CKSUM |
979 IEEE802154_HW_AFILT |
980 IEEE802154_HW_PROMISCUOUS;
981
982 phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL |
983 WPAN_PHY_FLAG_CCA_MODE;
984
985 phy->supported.cca_modes = BIT(NL802154_CCA_ENERGY) |
986 BIT(NL802154_CCA_CARRIER) | BIT(NL802154_CCA_ENERGY_CARRIER);
987 phy->supported.cca_opts = BIT(NL802154_CCA_OPT_ENERGY_CARRIER_AND) |
988 BIT(NL802154_CCA_OPT_ENERGY_CARRIER_OR);
989
990 /* initiating cca_ed_levels */
991 for (i = MCR20A_MAX_CCA_THRESHOLD; i < MCR20A_MIN_CCA_THRESHOLD + 1;
992 ++i) {
993 mcr20a_ed_levels[i] = -i * 100;
994 }
995
996 phy->supported.cca_ed_levels = mcr20a_ed_levels;
997 phy->supported.cca_ed_levels_size = ARRAY_SIZE(mcr20a_ed_levels);
998
999 phy->cca.mode = NL802154_CCA_ENERGY;
1000
1001 phy->supported.channels[0] = MCR20A_VALID_CHANNELS;
1002 phy->current_page = 0;
1003 /* MCR20A default reset value */
1004 phy->current_channel = 20;
1005 phy->supported.tx_powers = mcr20a_powers;
1006 phy->supported.tx_powers_size = ARRAY_SIZE(mcr20a_powers);
1007 phy->cca_ed_level = phy->supported.cca_ed_levels[75];
1008 phy->transmit_power = phy->supported.tx_powers[0x0F];
1009 }
1010
1011 static void
mcr20a_setup_tx_spi_messages(struct mcr20a_local * lp)1012 mcr20a_setup_tx_spi_messages(struct mcr20a_local *lp)
1013 {
1014 spi_message_init(&lp->tx_buf_msg);
1015 lp->tx_buf_msg.context = lp;
1016 lp->tx_buf_msg.complete = mcr20a_write_tx_buf_complete;
1017
1018 lp->tx_xfer_header.len = 1;
1019 lp->tx_xfer_header.tx_buf = lp->tx_header;
1020
1021 lp->tx_xfer_len.len = 1;
1022 lp->tx_xfer_len.tx_buf = lp->tx_len;
1023
1024 spi_message_add_tail(&lp->tx_xfer_header, &lp->tx_buf_msg);
1025 spi_message_add_tail(&lp->tx_xfer_len, &lp->tx_buf_msg);
1026 spi_message_add_tail(&lp->tx_xfer_buf, &lp->tx_buf_msg);
1027 }
1028
1029 static void
mcr20a_setup_rx_spi_messages(struct mcr20a_local * lp)1030 mcr20a_setup_rx_spi_messages(struct mcr20a_local *lp)
1031 {
1032 spi_message_init(&lp->reg_msg);
1033 lp->reg_msg.context = lp;
1034
1035 lp->reg_xfer_cmd.len = 1;
1036 lp->reg_xfer_cmd.tx_buf = lp->reg_cmd;
1037 lp->reg_xfer_cmd.rx_buf = lp->reg_cmd;
1038
1039 lp->reg_xfer_data.rx_buf = lp->reg_data;
1040 lp->reg_xfer_data.tx_buf = lp->reg_data;
1041
1042 spi_message_add_tail(&lp->reg_xfer_cmd, &lp->reg_msg);
1043 spi_message_add_tail(&lp->reg_xfer_data, &lp->reg_msg);
1044
1045 spi_message_init(&lp->rx_buf_msg);
1046 lp->rx_buf_msg.context = lp;
1047 lp->rx_buf_msg.complete = mcr20a_handle_rx_read_buf_complete;
1048 lp->rx_xfer_header.len = 1;
1049 lp->rx_xfer_header.tx_buf = lp->rx_header;
1050 lp->rx_xfer_header.rx_buf = lp->rx_header;
1051
1052 lp->rx_xfer_buf.rx_buf = lp->rx_buf;
1053
1054 lp->rx_xfer_lqi.len = 1;
1055 lp->rx_xfer_lqi.rx_buf = lp->rx_lqi;
1056
1057 spi_message_add_tail(&lp->rx_xfer_header, &lp->rx_buf_msg);
1058 spi_message_add_tail(&lp->rx_xfer_buf, &lp->rx_buf_msg);
1059 spi_message_add_tail(&lp->rx_xfer_lqi, &lp->rx_buf_msg);
1060 }
1061
1062 static void
mcr20a_setup_irq_spi_messages(struct mcr20a_local * lp)1063 mcr20a_setup_irq_spi_messages(struct mcr20a_local *lp)
1064 {
1065 spi_message_init(&lp->irq_msg);
1066 lp->irq_msg.context = lp;
1067 lp->irq_msg.complete = mcr20a_irq_status_complete;
1068 lp->irq_xfer_header.len = 1;
1069 lp->irq_xfer_header.tx_buf = lp->irq_header;
1070 lp->irq_xfer_header.rx_buf = lp->irq_header;
1071
1072 lp->irq_xfer_data.len = MCR20A_IRQSTS_NUM;
1073 lp->irq_xfer_data.rx_buf = lp->irq_data;
1074
1075 spi_message_add_tail(&lp->irq_xfer_header, &lp->irq_msg);
1076 spi_message_add_tail(&lp->irq_xfer_data, &lp->irq_msg);
1077 }
1078
1079 static int
mcr20a_phy_init(struct mcr20a_local * lp)1080 mcr20a_phy_init(struct mcr20a_local *lp)
1081 {
1082 u8 index;
1083 unsigned int phy_reg = 0;
1084 int ret;
1085
1086 dev_dbg(printdev(lp), "%s\n", __func__);
1087
1088 /* Disable Tristate on COCO MISO for SPI reads */
1089 ret = regmap_write(lp->regmap_iar, IAR_MISC_PAD_CTRL, 0x02);
1090 if (ret)
1091 goto err_ret;
1092
1093 /* Clear all PP IRQ bits in IRQSTS1 to avoid unexpected interrupts
1094 * immediately after init
1095 */
1096 ret = regmap_write(lp->regmap_dar, DAR_IRQ_STS1, 0xEF);
1097 if (ret)
1098 goto err_ret;
1099
1100 /* Clear all PP IRQ bits in IRQSTS2 */
1101 ret = regmap_write(lp->regmap_dar, DAR_IRQ_STS2,
1102 DAR_IRQSTS2_ASM_IRQ | DAR_IRQSTS2_PB_ERR_IRQ |
1103 DAR_IRQSTS2_WAKE_IRQ);
1104 if (ret)
1105 goto err_ret;
1106
1107 /* Disable all timer interrupts */
1108 ret = regmap_write(lp->regmap_dar, DAR_IRQ_STS3, 0xFF);
1109 if (ret)
1110 goto err_ret;
1111
1112 /* PHY_CTRL1 : default HW settings + AUTOACK enabled */
1113 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
1114 DAR_PHY_CTRL1_AUTOACK, DAR_PHY_CTRL1_AUTOACK);
1115
1116 /* PHY_CTRL2 : disable all interrupts */
1117 ret = regmap_write(lp->regmap_dar, DAR_PHY_CTRL2, 0xFF);
1118 if (ret)
1119 goto err_ret;
1120
1121 /* PHY_CTRL3 : disable all timers and remaining interrupts */
1122 ret = regmap_write(lp->regmap_dar, DAR_PHY_CTRL3,
1123 DAR_PHY_CTRL3_ASM_MSK | DAR_PHY_CTRL3_PB_ERR_MSK |
1124 DAR_PHY_CTRL3_WAKE_MSK);
1125 if (ret)
1126 goto err_ret;
1127
1128 /* SRC_CTRL : enable Acknowledge Frame Pending and
1129 * Source Address Matching Enable
1130 */
1131 ret = regmap_write(lp->regmap_dar, DAR_SRC_CTRL,
1132 DAR_SRC_CTRL_ACK_FRM_PND |
1133 (DAR_SRC_CTRL_INDEX << DAR_SRC_CTRL_INDEX_SHIFT));
1134 if (ret)
1135 goto err_ret;
1136
1137 /* RX_FRAME_FILTER */
1138 /* FRM_VER[1:0] = b11. Accept FrameVersion 0 and 1 packets */
1139 ret = regmap_write(lp->regmap_iar, IAR_RX_FRAME_FILTER,
1140 IAR_RX_FRAME_FLT_FRM_VER |
1141 IAR_RX_FRAME_FLT_BEACON_FT |
1142 IAR_RX_FRAME_FLT_DATA_FT |
1143 IAR_RX_FRAME_FLT_CMD_FT);
1144 if (ret)
1145 goto err_ret;
1146
1147 dev_info(printdev(lp), "MCR20A DAR overwrites version: 0x%02x\n",
1148 MCR20A_OVERWRITE_VERSION);
1149
1150 /* Overwrites direct registers */
1151 ret = regmap_write(lp->regmap_dar, DAR_OVERWRITE_VER,
1152 MCR20A_OVERWRITE_VERSION);
1153 if (ret)
1154 goto err_ret;
1155
1156 /* Overwrites indirect registers */
1157 ret = regmap_multi_reg_write(lp->regmap_iar, mar20a_iar_overwrites,
1158 ARRAY_SIZE(mar20a_iar_overwrites));
1159 if (ret)
1160 goto err_ret;
1161
1162 /* Clear HW indirect queue */
1163 dev_dbg(printdev(lp), "clear HW indirect queue\n");
1164 for (index = 0; index < MCR20A_PHY_INDIRECT_QUEUE_SIZE; index++) {
1165 phy_reg = (u8)(((index & DAR_SRC_CTRL_INDEX) <<
1166 DAR_SRC_CTRL_INDEX_SHIFT)
1167 | (DAR_SRC_CTRL_SRCADDR_EN)
1168 | (DAR_SRC_CTRL_INDEX_DISABLE));
1169 ret = regmap_write(lp->regmap_dar, DAR_SRC_CTRL, phy_reg);
1170 if (ret)
1171 goto err_ret;
1172 phy_reg = 0;
1173 }
1174
1175 /* Assign HW Indirect hash table to PAN0 */
1176 ret = regmap_read(lp->regmap_iar, IAR_DUAL_PAN_CTRL, &phy_reg);
1177 if (ret)
1178 goto err_ret;
1179
1180 /* Clear current lvl */
1181 phy_reg &= ~IAR_DUAL_PAN_CTRL_DUAL_PAN_SAM_LVL_MSK;
1182
1183 /* Set new lvl */
1184 phy_reg |= MCR20A_PHY_INDIRECT_QUEUE_SIZE <<
1185 IAR_DUAL_PAN_CTRL_DUAL_PAN_SAM_LVL_SHIFT;
1186 ret = regmap_write(lp->regmap_iar, IAR_DUAL_PAN_CTRL, phy_reg);
1187 if (ret)
1188 goto err_ret;
1189
1190 /* Set CCA threshold to -75 dBm */
1191 ret = regmap_write(lp->regmap_iar, IAR_CCA1_THRESH, 0x4B);
1192 if (ret)
1193 goto err_ret;
1194
1195 /* Set prescaller to obtain 1 symbol (16us) timebase */
1196 ret = regmap_write(lp->regmap_iar, IAR_TMR_PRESCALE, 0x05);
1197 if (ret)
1198 goto err_ret;
1199
1200 /* Enable autodoze mode. */
1201 ret = regmap_update_bits(lp->regmap_dar, DAR_PWR_MODES,
1202 DAR_PWR_MODES_AUTODOZE,
1203 DAR_PWR_MODES_AUTODOZE);
1204 if (ret)
1205 goto err_ret;
1206
1207 /* Disable clk_out */
1208 ret = regmap_update_bits(lp->regmap_dar, DAR_CLK_OUT_CTRL,
1209 DAR_CLK_OUT_CTRL_EN, 0x0);
1210 if (ret)
1211 goto err_ret;
1212
1213 return 0;
1214
1215 err_ret:
1216 return ret;
1217 }
1218
1219 static int
mcr20a_probe(struct spi_device * spi)1220 mcr20a_probe(struct spi_device *spi)
1221 {
1222 struct ieee802154_hw *hw;
1223 struct mcr20a_local *lp;
1224 struct gpio_desc *rst_b;
1225 int irq_type;
1226 int ret = -ENOMEM;
1227
1228 dev_dbg(&spi->dev, "%s\n", __func__);
1229
1230 if (!spi->irq) {
1231 dev_err(&spi->dev, "no IRQ specified\n");
1232 return -EINVAL;
1233 }
1234
1235 rst_b = devm_gpiod_get(&spi->dev, "rst_b", GPIOD_OUT_HIGH);
1236 if (IS_ERR(rst_b))
1237 return dev_err_probe(&spi->dev, PTR_ERR(rst_b),
1238 "Failed to get 'rst_b' gpio");
1239
1240 /* reset mcr20a */
1241 usleep_range(10, 20);
1242 gpiod_set_value_cansleep(rst_b, 1);
1243 usleep_range(10, 20);
1244 gpiod_set_value_cansleep(rst_b, 0);
1245 usleep_range(120, 240);
1246
1247 /* allocate ieee802154_hw and private data */
1248 hw = ieee802154_alloc_hw(sizeof(*lp), &mcr20a_hw_ops);
1249 if (!hw) {
1250 dev_crit(&spi->dev, "ieee802154_alloc_hw failed\n");
1251 return ret;
1252 }
1253
1254 /* init mcr20a local data */
1255 lp = hw->priv;
1256 lp->hw = hw;
1257 lp->spi = spi;
1258
1259 /* init ieee802154_hw */
1260 hw->parent = &spi->dev;
1261 ieee802154_random_extended_addr(&hw->phy->perm_extended_addr);
1262
1263 /* init buf */
1264 lp->buf = devm_kzalloc(&spi->dev, SPI_COMMAND_BUFFER, GFP_KERNEL);
1265
1266 if (!lp->buf) {
1267 ret = -ENOMEM;
1268 goto free_dev;
1269 }
1270
1271 mcr20a_setup_tx_spi_messages(lp);
1272 mcr20a_setup_rx_spi_messages(lp);
1273 mcr20a_setup_irq_spi_messages(lp);
1274
1275 /* setup regmap */
1276 lp->regmap_dar = devm_regmap_init_spi(spi, &mcr20a_dar_regmap);
1277 if (IS_ERR(lp->regmap_dar)) {
1278 ret = PTR_ERR(lp->regmap_dar);
1279 dev_err(&spi->dev, "Failed to allocate dar map: %d\n",
1280 ret);
1281 goto free_dev;
1282 }
1283
1284 lp->regmap_iar = devm_regmap_init_spi(spi, &mcr20a_iar_regmap);
1285 if (IS_ERR(lp->regmap_iar)) {
1286 ret = PTR_ERR(lp->regmap_iar);
1287 dev_err(&spi->dev, "Failed to allocate iar map: %d\n", ret);
1288 goto free_dev;
1289 }
1290
1291 mcr20a_hw_setup(lp);
1292
1293 spi_set_drvdata(spi, lp);
1294
1295 ret = mcr20a_phy_init(lp);
1296 if (ret < 0) {
1297 dev_crit(&spi->dev, "mcr20a_phy_init failed\n");
1298 goto free_dev;
1299 }
1300
1301 irq_type = irq_get_trigger_type(spi->irq);
1302 if (!irq_type)
1303 irq_type = IRQF_TRIGGER_FALLING;
1304
1305 ret = devm_request_irq(&spi->dev, spi->irq, mcr20a_irq_isr,
1306 irq_type | IRQF_NO_AUTOEN, dev_name(&spi->dev), lp);
1307 if (ret) {
1308 dev_err(&spi->dev, "could not request_irq for mcr20a\n");
1309 ret = -ENODEV;
1310 goto free_dev;
1311 }
1312
1313 ret = ieee802154_register_hw(hw);
1314 if (ret) {
1315 dev_crit(&spi->dev, "ieee802154_register_hw failed\n");
1316 goto free_dev;
1317 }
1318
1319 return ret;
1320
1321 free_dev:
1322 ieee802154_free_hw(lp->hw);
1323
1324 return ret;
1325 }
1326
mcr20a_remove(struct spi_device * spi)1327 static void mcr20a_remove(struct spi_device *spi)
1328 {
1329 struct mcr20a_local *lp = spi_get_drvdata(spi);
1330
1331 dev_dbg(&spi->dev, "%s\n", __func__);
1332
1333 ieee802154_unregister_hw(lp->hw);
1334 ieee802154_free_hw(lp->hw);
1335 }
1336
1337 static const struct of_device_id mcr20a_of_match[] = {
1338 { .compatible = "nxp,mcr20a", },
1339 { },
1340 };
1341 MODULE_DEVICE_TABLE(of, mcr20a_of_match);
1342
1343 static const struct spi_device_id mcr20a_device_id[] = {
1344 { .name = "mcr20a", },
1345 { },
1346 };
1347 MODULE_DEVICE_TABLE(spi, mcr20a_device_id);
1348
1349 static struct spi_driver mcr20a_driver = {
1350 .id_table = mcr20a_device_id,
1351 .driver = {
1352 .of_match_table = mcr20a_of_match,
1353 .name = "mcr20a",
1354 },
1355 .probe = mcr20a_probe,
1356 .remove = mcr20a_remove,
1357 };
1358
1359 module_spi_driver(mcr20a_driver);
1360
1361 MODULE_DESCRIPTION("MCR20A Transceiver Driver");
1362 MODULE_LICENSE("GPL v2");
1363 MODULE_AUTHOR("Xue Liu <liuxuenetmail@gmail>");
1364