Lines Matching +full:mram +full:- +full:cfg
1 // SPDX-License-Identifier: GPL-2.0
5 // Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/
8 * https://github.com/linux-can/can-doc/tree/master/m_can
325 return cdev->ops->read_reg(cdev, reg); in m_can_read()
331 cdev->ops->write_reg(cdev, reg, val); in m_can_write()
338 u32 addr_offset = cdev->mcfg[MRAM_RXF0].off + fgi * RXF0_ELEMENT_SIZE + in m_can_fifo_read()
344 return cdev->ops->read_fifo(cdev, addr_offset, val, val_count); in m_can_fifo_read()
351 u32 addr_offset = cdev->mcfg[MRAM_TXB].off + fpi * TXB_ELEMENT_SIZE + in m_can_fifo_write()
357 return cdev->ops->write_fifo(cdev, addr_offset, val, val_count); in m_can_fifo_write()
363 return cdev->ops->write_fifo(cdev, fpi, &val, 1); in m_can_fifo_write_no_off()
369 u32 addr_offset = cdev->mcfg[MRAM_TXE].off + fgi * TXE_ELEMENT_SIZE + in m_can_txe_fifo_read()
372 return cdev->ops->read_fifo(cdev, addr_offset, val, 1); in m_can_txe_fifo_read()
411 netdev_warn(cdev->net, "Failed to init module\n"); in m_can_config_endisable()
414 timeout--; in m_can_config_endisable()
421 if (!cdev->net->irq) { in m_can_enable_all_interrupts()
422 dev_dbg(cdev->dev, "Start hrtimer\n"); in m_can_enable_all_interrupts()
423 hrtimer_start(&cdev->hrtimer, in m_can_enable_all_interrupts()
436 if (!cdev->net->irq) { in m_can_disable_all_interrupts()
437 dev_dbg(cdev->dev, "Stop hrtimer\n"); in m_can_disable_all_interrupts()
438 hrtimer_cancel(&cdev->hrtimer); in m_can_disable_all_interrupts()
442 /* Retrieve internal timestamp counter from TSCV.TSC, and shift it to 32-bit
460 if (cdev->tx_skb) { in m_can_clean()
463 net->stats.tx_errors++; in m_can_clean()
464 if (cdev->version > 30) in m_can_clean()
468 can_free_echo_skb(cdev->net, putidx, NULL); in m_can_clean()
469 cdev->tx_skb = NULL; in m_can_clean()
473 /* For peripherals, pass skb to rx-offload, which will push skb from
474 * napi. For non-peripherals, RX is done in napi already, so push
476 * rx-offload and is ignored for non-peripherals.
482 if (cdev->is_peripheral) { in m_can_receive_skb()
483 struct net_device_stats *stats = &cdev->net->stats; in m_can_receive_skb()
486 err = can_rx_offload_queue_timestamp(&cdev->offload, skb, in m_can_receive_skb()
489 stats->rx_fifo_errors++; in m_can_receive_skb()
497 struct net_device_stats *stats = &dev->stats; in m_can_read_fifo()
514 stats->rx_dropped++; in m_can_read_fifo()
519 cf->len = can_fd_dlc2len((fifo_header.dlc >> 16) & 0x0F); in m_can_read_fifo()
521 cf->len = can_cc_dlc2len((fifo_header.dlc >> 16) & 0x0F); in m_can_read_fifo()
524 cf->can_id = (fifo_header.id & CAN_EFF_MASK) | CAN_EFF_FLAG; in m_can_read_fifo()
526 cf->can_id = (fifo_header.id >> 18) & CAN_SFF_MASK; in m_can_read_fifo()
529 cf->flags |= CANFD_ESI; in m_can_read_fifo()
534 cf->can_id |= CAN_RTR_FLAG; in m_can_read_fifo()
537 cf->flags |= CANFD_BRS; in m_can_read_fifo()
540 cf->data, DIV_ROUND_UP(cf->len, 4)); in m_can_read_fifo()
544 stats->rx_bytes += cf->len; in m_can_read_fifo()
546 stats->rx_packets++; in m_can_read_fifo()
568 int ack_fgi = -1; in m_can_do_rx_poll()
586 quota--; in m_can_do_rx_poll()
589 fgi = (++fgi >= cdev->mcfg[MRAM_RXF0].num ? 0 : fgi); in m_can_do_rx_poll()
592 if (ack_fgi != -1) in m_can_do_rx_poll()
604 struct net_device_stats *stats = &dev->stats; in m_can_handle_lost_msg()
611 stats->rx_errors++; in m_can_handle_lost_msg()
612 stats->rx_over_errors++; in m_can_handle_lost_msg()
618 frame->can_id |= CAN_ERR_CRTL; in m_can_handle_lost_msg()
619 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; in m_can_handle_lost_msg()
621 if (cdev->is_peripheral) in m_can_handle_lost_msg()
633 struct net_device_stats *stats = &dev->stats; in m_can_handle_lec_err()
638 cdev->can.can_stats.bus_error++; in m_can_handle_lec_err()
647 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; in m_can_handle_lec_err()
652 stats->rx_errors++; in m_can_handle_lec_err()
654 cf->data[2] |= CAN_ERR_PROT_STUFF; in m_can_handle_lec_err()
658 stats->rx_errors++; in m_can_handle_lec_err()
660 cf->data[2] |= CAN_ERR_PROT_FORM; in m_can_handle_lec_err()
664 stats->tx_errors++; in m_can_handle_lec_err()
666 cf->data[3] = CAN_ERR_PROT_LOC_ACK; in m_can_handle_lec_err()
670 stats->tx_errors++; in m_can_handle_lec_err()
672 cf->data[2] |= CAN_ERR_PROT_BIT1; in m_can_handle_lec_err()
676 stats->tx_errors++; in m_can_handle_lec_err()
678 cf->data[2] |= CAN_ERR_PROT_BIT0; in m_can_handle_lec_err()
682 stats->rx_errors++; in m_can_handle_lec_err()
684 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; in m_can_handle_lec_err()
693 if (cdev->is_peripheral) in m_can_handle_lec_err()
708 bec->rxerr = FIELD_GET(ECR_REC_MASK, ecr); in __m_can_get_berr_counter()
709 bec->txerr = FIELD_GET(ECR_TEC_MASK, ecr); in __m_can_get_berr_counter()
716 if (cdev->pm_clock_support == 0) in m_can_clk_start()
719 return pm_runtime_resume_and_get(cdev->dev); in m_can_clk_start()
724 if (cdev->pm_clock_support) in m_can_clk_stop()
725 pm_runtime_put_sync(cdev->dev); in m_can_clk_stop()
758 cdev->can.can_stats.error_warning++; in m_can_handle_state_change()
759 cdev->can.state = CAN_STATE_ERROR_WARNING; in m_can_handle_state_change()
763 cdev->can.can_stats.error_passive++; in m_can_handle_state_change()
764 cdev->can.state = CAN_STATE_ERROR_PASSIVE; in m_can_handle_state_change()
767 /* bus-off state */ in m_can_handle_state_change()
768 cdev->can.state = CAN_STATE_BUS_OFF; in m_can_handle_state_change()
770 cdev->can.can_stats.bus_off++; in m_can_handle_state_change()
787 cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT; in m_can_handle_state_change()
788 cf->data[1] = (bec.txerr > bec.rxerr) ? in m_can_handle_state_change()
791 cf->data[6] = bec.txerr; in m_can_handle_state_change()
792 cf->data[7] = bec.rxerr; in m_can_handle_state_change()
796 cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT; in m_can_handle_state_change()
799 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; in m_can_handle_state_change()
801 cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE; in m_can_handle_state_change()
802 cf->data[6] = bec.txerr; in m_can_handle_state_change()
803 cf->data[7] = bec.rxerr; in m_can_handle_state_change()
806 /* bus-off state */ in m_can_handle_state_change()
807 cf->can_id |= CAN_ERR_BUSOFF; in m_can_handle_state_change()
813 if (cdev->is_peripheral) in m_can_handle_state_change()
826 if (psr & PSR_EW && cdev->can.state != CAN_STATE_ERROR_WARNING) { in m_can_handle_state_errors()
832 if (psr & PSR_EP && cdev->can.state != CAN_STATE_ERROR_PASSIVE) { in m_can_handle_state_errors()
838 if (psr & PSR_BO && cdev->can.state != CAN_STATE_BUS_OFF) { in m_can_handle_state_errors()
873 struct net_device_stats *stats = &dev->stats; in m_can_handle_protocol_error()
883 stats->tx_errors++; in m_can_handle_protocol_error()
886 if (cdev->version >= 31 && (irqstatus & IR_PEA)) { in m_can_handle_protocol_error()
888 cdev->can.can_stats.arbitration_lost++; in m_can_handle_protocol_error()
890 cf->can_id |= CAN_ERR_LOSTARB; in m_can_handle_protocol_error()
891 cf->data[0] |= CAN_ERR_LOSTARB_UNSPEC; in m_can_handle_protocol_error()
900 if (cdev->is_peripheral) in m_can_handle_protocol_error()
918 if (cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) { in m_can_handle_bus_errors()
934 if ((cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) && in m_can_handle_bus_errors()
963 if (cdev->version <= 31 && irqstatus & IR_MRAF && in m_can_rx_handler()
983 rx_work_or_err = m_can_do_rx_poll(dev, (quota - work_done)); in m_can_rx_handler()
1000 /* Don't re-enable interrupts if the driver had a fatal error in m_can_rx_peripheral()
1011 struct net_device *dev = napi->dev; in m_can_poll()
1016 irqstatus = cdev->irqstatus | m_can_read(cdev, M_CAN_IR); in m_can_poll()
1020 /* Don't re-enable interrupts if the driver had a fatal error in m_can_poll()
1031 /* Echo tx skb and update net stats. Peripherals use rx-offload for
1033 * by rx-offload, and is ignored for non-peripherals.
1039 struct net_device *dev = cdev->net; in m_can_tx_update_stats()
1040 struct net_device_stats *stats = &dev->stats; in m_can_tx_update_stats()
1042 if (cdev->is_peripheral) in m_can_tx_update_stats()
1043 stats->tx_bytes += in m_can_tx_update_stats()
1044 can_rx_offload_get_echo_skb_queue_timestamp(&cdev->offload, in m_can_tx_update_stats()
1049 stats->tx_bytes += can_get_echo_skb(dev, msg_mark, NULL); in m_can_tx_update_stats()
1051 stats->tx_packets++; in m_can_tx_update_stats()
1059 int ack_fgi = -1; in m_can_echo_tx_event()
1088 fgi = (++fgi >= cdev->mcfg[MRAM_TXE].num ? 0 : fgi); in m_can_echo_tx_event()
1094 if (ack_fgi != -1) in m_can_echo_tx_event()
1107 if (pm_runtime_suspended(cdev->dev)) in m_can_isr()
1116 if (cdev->ops->clear_interrupts) in m_can_isr()
1117 cdev->ops->clear_interrupts(cdev); in m_can_isr()
1120 * - rx IRQ in m_can_isr()
1121 * - state change IRQ in m_can_isr()
1122 * - bus error IRQ and bus error reporting in m_can_isr()
1125 cdev->irqstatus = ir; in m_can_isr()
1126 if (!cdev->is_peripheral) { in m_can_isr()
1128 napi_schedule(&cdev->napi); in m_can_isr()
1134 if (cdev->version == 30) { in m_can_isr()
1139 if (cdev->is_peripheral) in m_can_isr()
1156 if (cdev->is_peripheral) in m_can_isr()
1157 can_rx_offload_threaded_irq_finish(&cdev->offload); in m_can_isr()
1217 const struct can_bittiming *bt = &cdev->can.bittiming; in m_can_set_bittiming()
1218 const struct can_bittiming *dbt = &cdev->can.data_bittiming; in m_can_set_bittiming()
1222 brp = bt->brp - 1; in m_can_set_bittiming()
1223 sjw = bt->sjw - 1; in m_can_set_bittiming()
1224 tseg1 = bt->prop_seg + bt->phase_seg1 - 1; in m_can_set_bittiming()
1225 tseg2 = bt->phase_seg2 - 1; in m_can_set_bittiming()
1232 if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) { in m_can_set_bittiming()
1234 brp = dbt->brp - 1; in m_can_set_bittiming()
1235 sjw = dbt->sjw - 1; in m_can_set_bittiming()
1236 tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1; in m_can_set_bittiming()
1237 tseg2 = dbt->phase_seg2 - 1; in m_can_set_bittiming()
1243 if (dbt->bitrate > 2500000) { in m_can_set_bittiming()
1249 ssp = dbt->sample_point; in m_can_set_bittiming()
1254 tdco = (cdev->can.clock.freq / 1000) * in m_can_set_bittiming()
1255 ssp / dbt->bitrate; in m_can_set_bittiming()
1281 * - set rx buffer/fifo element size
1282 * - configure rx fifo
1283 * - accept non-matching frame into fifo 0
1284 * - configure tx buffer
1285 * - >= v3.1.x: TX FIFO is used
1286 * - configure mode
1287 * - setup bittiming
1288 * - configure timestamp generation
1299 dev_err(cdev->dev, "Message RAM configuration failed\n"); in m_can_chip_config()
1316 /* Accept Non-matching Frames Into FIFO 0 */ in m_can_chip_config()
1319 if (cdev->version == 30) { in m_can_chip_config()
1322 cdev->mcfg[MRAM_TXB].off); in m_can_chip_config()
1327 cdev->mcfg[MRAM_TXB].num) | in m_can_chip_config()
1328 cdev->mcfg[MRAM_TXB].off); in m_can_chip_config()
1336 if (cdev->version == 30) { in m_can_chip_config()
1339 cdev->mcfg[MRAM_TXE].off); in m_can_chip_config()
1344 cdev->mcfg[MRAM_TXE].num) | in m_can_chip_config()
1345 cdev->mcfg[MRAM_TXE].off); in m_can_chip_config()
1350 FIELD_PREP(RXFC_FS_MASK, cdev->mcfg[MRAM_RXF0].num) | in m_can_chip_config()
1351 cdev->mcfg[MRAM_RXF0].off); in m_can_chip_config()
1354 FIELD_PREP(RXFC_FS_MASK, cdev->mcfg[MRAM_RXF1].num) | in m_can_chip_config()
1355 cdev->mcfg[MRAM_RXF1].off); in m_can_chip_config()
1360 if (cdev->version == 30) { in m_can_chip_config()
1367 if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) in m_can_chip_config()
1376 if (cdev->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) in m_can_chip_config()
1379 if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) in m_can_chip_config()
1384 if (cdev->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { in m_can_chip_config()
1390 if (cdev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) in m_can_chip_config()
1394 if (cdev->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) in m_can_chip_config()
1402 if (!(cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) { in m_can_chip_config()
1403 if (cdev->version == 30) in m_can_chip_config()
1425 if (cdev->ops->init) in m_can_chip_config()
1426 cdev->ops->init(cdev); in m_can_chip_config()
1441 cdev->can.state = CAN_STATE_ERROR_ACTIVE; in m_can_start()
1457 return -EOPNOTSUPP; in m_can_set_mode()
1499 int niso_timeout = -ETIMEDOUT; in m_can_niso_supported()
1523 /* return false if time out (-ETIMEDOUT), else return true */ in m_can_niso_supported()
1529 struct net_device *dev = cdev->net; in m_can_dev_setup()
1535 dev_err(cdev->dev, "Unsupported version number: %2d", in m_can_dev_setup()
1537 return -EINVAL; in m_can_dev_setup()
1540 if (!cdev->is_peripheral) in m_can_dev_setup()
1541 netif_napi_add(dev, &cdev->napi, m_can_poll); in m_can_dev_setup()
1544 cdev->version = m_can_version; in m_can_dev_setup()
1545 cdev->can.do_set_mode = m_can_set_mode; in m_can_dev_setup()
1546 cdev->can.do_get_berr_counter = m_can_get_berr_counter; in m_can_dev_setup()
1549 cdev->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | in m_can_dev_setup()
1556 switch (cdev->version) { in m_can_dev_setup()
1562 cdev->can.bittiming_const = &m_can_bittiming_const_30X; in m_can_dev_setup()
1563 cdev->can.data_bittiming_const = &m_can_data_bittiming_const_30X; in m_can_dev_setup()
1570 cdev->can.bittiming_const = &m_can_bittiming_const_31X; in m_can_dev_setup()
1571 cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X; in m_can_dev_setup()
1576 cdev->can.bittiming_const = &m_can_bittiming_const_31X; in m_can_dev_setup()
1577 cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X; in m_can_dev_setup()
1579 cdev->can.ctrlmode_supported |= in m_can_dev_setup()
1584 dev_err(cdev->dev, "Unsupported version number: %2d", in m_can_dev_setup()
1585 cdev->version); in m_can_dev_setup()
1586 return -EINVAL; in m_can_dev_setup()
1589 if (cdev->ops->init) in m_can_dev_setup()
1590 cdev->ops->init(cdev); in m_can_dev_setup()
1606 cdev->can.state = CAN_STATE_STOPPED; in m_can_stop()
1616 if (dev->irq) in m_can_close()
1617 free_irq(dev->irq, dev); in m_can_close()
1619 if (cdev->is_peripheral) { in m_can_close()
1620 cdev->tx_skb = NULL; in m_can_close()
1621 destroy_workqueue(cdev->tx_wq); in m_can_close()
1622 cdev->tx_wq = NULL; in m_can_close()
1623 can_rx_offload_disable(&cdev->offload); in m_can_close()
1625 napi_disable(&cdev->napi); in m_can_close()
1631 phy_power_off(cdev->transceiver); in m_can_close()
1640 unsigned int wrap = cdev->can.echo_skb_max; in m_can_next_echo_skb_occupied()
1647 return !!cdev->can.echo_skb[next_idx]; in m_can_next_echo_skb_occupied()
1652 struct canfd_frame *cf = (struct canfd_frame *)cdev->tx_skb->data; in m_can_tx_handler()
1653 struct net_device *dev = cdev->net; in m_can_tx_handler()
1654 struct sk_buff *skb = cdev->tx_skb; in m_can_tx_handler()
1661 cdev->tx_skb = NULL; in m_can_tx_handler()
1665 if (cf->can_id & CAN_EFF_FLAG) { in m_can_tx_handler()
1666 fifo_header.id = cf->can_id & CAN_EFF_MASK; in m_can_tx_handler()
1669 fifo_header.id = ((cf->can_id & CAN_SFF_MASK) << 18); in m_can_tx_handler()
1672 if (cf->can_id & CAN_RTR_FLAG) in m_can_tx_handler()
1675 if (cdev->version == 30) { in m_can_tx_handler()
1678 fifo_header.dlc = can_fd_len2dlc(cf->len) << 16; in m_can_tx_handler()
1686 cf->data, DIV_ROUND_UP(cf->len, 4)); in m_can_tx_handler()
1690 if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) { in m_can_tx_handler()
1694 if (cf->flags & CANFD_BRS) in m_can_tx_handler()
1723 if (cdev->is_peripheral) { in m_can_tx_handler()
1725 dev->stats.tx_dropped++; in m_can_tx_handler()
1735 /* Construct DLC Field, with CAN-FD configuration. in m_can_tx_handler()
1744 if (cf->flags & CANFD_BRS) in m_can_tx_handler()
1749 FIELD_PREP(TX_BUF_DLC_MASK, can_fd_len2dlc(cf->len)) | in m_can_tx_handler()
1756 cf->data, DIV_ROUND_UP(cf->len, 4)); in m_can_tx_handler()
1798 if (cdev->is_peripheral) { in m_can_start_xmit()
1799 if (cdev->tx_skb) { in m_can_start_xmit()
1804 if (cdev->can.state == CAN_STATE_BUS_OFF) { in m_can_start_xmit()
1812 cdev->tx_skb = skb; in m_can_start_xmit()
1813 netif_stop_queue(cdev->net); in m_can_start_xmit()
1814 queue_work(cdev->tx_wq, &cdev->tx_work); in m_can_start_xmit()
1817 cdev->tx_skb = skb; in m_can_start_xmit()
1829 m_can_isr(0, cdev->net); in hrtimer_callback()
1841 err = phy_power_on(cdev->transceiver); in m_can_open()
1856 if (cdev->is_peripheral) in m_can_open()
1857 can_rx_offload_enable(&cdev->offload); in m_can_open()
1859 napi_enable(&cdev->napi); in m_can_open()
1862 if (cdev->is_peripheral) { in m_can_open()
1863 cdev->tx_skb = NULL; in m_can_open()
1864 cdev->tx_wq = alloc_workqueue("mcan_wq", in m_can_open()
1866 if (!cdev->tx_wq) { in m_can_open()
1867 err = -ENOMEM; in m_can_open()
1871 INIT_WORK(&cdev->tx_work, m_can_tx_work_queue); in m_can_open()
1873 err = request_threaded_irq(dev->irq, NULL, m_can_isr, in m_can_open()
1875 dev->name, dev); in m_can_open()
1876 } else if (dev->irq) { in m_can_open()
1877 err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name, in m_can_open()
1896 if (cdev->is_peripheral || dev->irq) in m_can_open()
1897 free_irq(dev->irq, dev); in m_can_open()
1899 if (cdev->is_peripheral) in m_can_open()
1900 destroy_workqueue(cdev->tx_wq); in m_can_open()
1902 if (cdev->is_peripheral) in m_can_open()
1903 can_rx_offload_disable(&cdev->offload); in m_can_open()
1905 napi_disable(&cdev->napi); in m_can_open()
1910 phy_power_off(cdev->transceiver); in m_can_open()
1927 dev->flags |= IFF_ECHO; /* we support local echo */ in register_m_can_dev()
1928 dev->netdev_ops = &m_can_netdev_ops; in register_m_can_dev()
1929 dev->ethtool_ops = &m_can_ethtool_ops; in register_m_can_dev()
1938 total_size = cdev->mcfg[MRAM_TXB].off - cdev->mcfg[MRAM_SIDF].off + in m_can_check_mram_cfg()
1939 cdev->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE; in m_can_check_mram_cfg()
1941 dev_err(cdev->dev, "Total size of mram config(%u) exceeds mram(%u)\n", in m_can_check_mram_cfg()
1943 return -EINVAL; in m_can_check_mram_cfg()
1953 cdev->mcfg[MRAM_SIDF].off = mram_config_vals[0]; in m_can_of_parse_mram()
1954 cdev->mcfg[MRAM_SIDF].num = mram_config_vals[1]; in m_can_of_parse_mram()
1955 cdev->mcfg[MRAM_XIDF].off = cdev->mcfg[MRAM_SIDF].off + in m_can_of_parse_mram()
1956 cdev->mcfg[MRAM_SIDF].num * SIDF_ELEMENT_SIZE; in m_can_of_parse_mram()
1957 cdev->mcfg[MRAM_XIDF].num = mram_config_vals[2]; in m_can_of_parse_mram()
1958 cdev->mcfg[MRAM_RXF0].off = cdev->mcfg[MRAM_XIDF].off + in m_can_of_parse_mram()
1959 cdev->mcfg[MRAM_XIDF].num * XIDF_ELEMENT_SIZE; in m_can_of_parse_mram()
1960 cdev->mcfg[MRAM_RXF0].num = mram_config_vals[3] & in m_can_of_parse_mram()
1962 cdev->mcfg[MRAM_RXF1].off = cdev->mcfg[MRAM_RXF0].off + in m_can_of_parse_mram()
1963 cdev->mcfg[MRAM_RXF0].num * RXF0_ELEMENT_SIZE; in m_can_of_parse_mram()
1964 cdev->mcfg[MRAM_RXF1].num = mram_config_vals[4] & in m_can_of_parse_mram()
1966 cdev->mcfg[MRAM_RXB].off = cdev->mcfg[MRAM_RXF1].off + in m_can_of_parse_mram()
1967 cdev->mcfg[MRAM_RXF1].num * RXF1_ELEMENT_SIZE; in m_can_of_parse_mram()
1968 cdev->mcfg[MRAM_RXB].num = mram_config_vals[5]; in m_can_of_parse_mram()
1969 cdev->mcfg[MRAM_TXE].off = cdev->mcfg[MRAM_RXB].off + in m_can_of_parse_mram()
1970 cdev->mcfg[MRAM_RXB].num * RXB_ELEMENT_SIZE; in m_can_of_parse_mram()
1971 cdev->mcfg[MRAM_TXE].num = mram_config_vals[6]; in m_can_of_parse_mram()
1972 cdev->mcfg[MRAM_TXB].off = cdev->mcfg[MRAM_TXE].off + in m_can_of_parse_mram()
1973 cdev->mcfg[MRAM_TXE].num * TXE_ELEMENT_SIZE; in m_can_of_parse_mram()
1974 cdev->mcfg[MRAM_TXB].num = mram_config_vals[7] & in m_can_of_parse_mram()
1977 dev_dbg(cdev->dev, in m_can_of_parse_mram()
1979 cdev->mcfg[MRAM_SIDF].off, cdev->mcfg[MRAM_SIDF].num, in m_can_of_parse_mram()
1980 cdev->mcfg[MRAM_XIDF].off, cdev->mcfg[MRAM_XIDF].num, in m_can_of_parse_mram()
1981 cdev->mcfg[MRAM_RXF0].off, cdev->mcfg[MRAM_RXF0].num, in m_can_of_parse_mram()
1982 cdev->mcfg[MRAM_RXF1].off, cdev->mcfg[MRAM_RXF1].num, in m_can_of_parse_mram()
1983 cdev->mcfg[MRAM_RXB].off, cdev->mcfg[MRAM_RXB].num, in m_can_of_parse_mram()
1984 cdev->mcfg[MRAM_TXE].off, cdev->mcfg[MRAM_TXE].num, in m_can_of_parse_mram()
1985 cdev->mcfg[MRAM_TXB].off, cdev->mcfg[MRAM_TXB].num); in m_can_of_parse_mram()
1996 start = cdev->mcfg[MRAM_SIDF].off; in m_can_init_ram()
1997 end = cdev->mcfg[MRAM_TXB].off + in m_can_init_ram()
1998 cdev->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE; in m_can_init_ram()
2014 cdev->hclk = devm_clk_get(cdev->dev, "hclk"); in m_can_class_get_clocks()
2015 cdev->cclk = devm_clk_get(cdev->dev, "cclk"); in m_can_class_get_clocks()
2017 if (IS_ERR(cdev->hclk) || IS_ERR(cdev->cclk)) { in m_can_class_get_clocks()
2018 dev_err(cdev->dev, "no clock found\n"); in m_can_class_get_clocks()
2019 ret = -ENODEV; in m_can_class_get_clocks()
2036 "bosch,mram-cfg", in m_can_class_allocate_dev()
2057 class_dev->net = net_dev; in m_can_class_allocate_dev()
2058 class_dev->dev = dev; in m_can_class_allocate_dev()
2077 if (cdev->pm_clock_support) { in m_can_class_register()
2083 if (cdev->is_peripheral) { in m_can_class_register()
2084 ret = can_rx_offload_add_manual(cdev->net, &cdev->offload, in m_can_class_register()
2090 if (!cdev->net->irq) in m_can_class_register()
2091 cdev->hrtimer.function = &hrtimer_callback; in m_can_class_register()
2097 ret = register_m_can_dev(cdev->net); in m_can_class_register()
2099 dev_err(cdev->dev, "registering %s failed (err=%d)\n", in m_can_class_register()
2100 cdev->net->name, ret); in m_can_class_register()
2104 of_can_transceiver(cdev->net); in m_can_class_register()
2106 dev_info(cdev->dev, "%s device registered (irq=%d, version=%d)\n", in m_can_class_register()
2107 KBUILD_MODNAME, cdev->net->irq, cdev->version); in m_can_class_register()
2117 if (cdev->is_peripheral) in m_can_class_register()
2118 can_rx_offload_del(&cdev->offload); in m_can_class_register()
2128 if (cdev->is_peripheral) in m_can_class_unregister()
2129 can_rx_offload_del(&cdev->offload); in m_can_class_unregister()
2130 unregister_candev(cdev->net); in m_can_class_unregister()
2137 struct net_device *ndev = cdev->net; in m_can_class_suspend()
2148 cdev->can.state = CAN_STATE_SLEEPING; in m_can_class_suspend()
2157 struct net_device *ndev = cdev->net; in m_can_class_resume()
2161 cdev->can.state = CAN_STATE_ERROR_ACTIVE; in m_can_class_resume()