Lines Matching refs:h5

63 struct h5 {  struct
110 int (*setup)(struct h5 *h5); argument
111 void (*open)(struct h5 *h5);
112 void (*close)(struct h5 *h5);
113 int (*suspend)(struct h5 *h5);
114 int (*resume)(struct h5 *h5);
124 static void h5_reset_rx(struct h5 *h5);
128 struct h5 *h5 = hu->priv; in h5_link_control() local
139 skb_queue_tail(&h5->unrel, nskb); in h5_link_control()
142 static u8 h5_cfg_field(struct h5 *h5) in h5_cfg_field() argument
145 return h5->tx_win & 0x07; in h5_cfg_field()
152 struct h5 *h5 = from_timer(h5, t, timer); in h5_timed_event() local
153 struct hci_uart *hu = h5->hu; in h5_timed_event()
159 if (h5->state == H5_UNINITIALIZED) in h5_timed_event()
162 if (h5->state == H5_INITIALIZED) { in h5_timed_event()
163 conf_req[2] = h5_cfg_field(h5); in h5_timed_event()
167 if (h5->state != H5_ACTIVE) { in h5_timed_event()
168 mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT); in h5_timed_event()
172 if (h5->sleep != H5_AWAKE) { in h5_timed_event()
173 h5->sleep = H5_SLEEPING; in h5_timed_event()
177 BT_DBG("hu %p retransmitting %u pkts", hu, h5->unack.qlen); in h5_timed_event()
179 spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING); in h5_timed_event()
181 while ((skb = __skb_dequeue_tail(&h5->unack)) != NULL) { in h5_timed_event()
182 h5->tx_seq = (h5->tx_seq - 1) & 0x07; in h5_timed_event()
183 skb_queue_head(&h5->rel, skb); in h5_timed_event()
186 spin_unlock_irqrestore(&h5->unack.lock, flags); in h5_timed_event()
194 struct h5 *h5 = hu->priv; in h5_peer_reset() local
198 h5->state = H5_UNINITIALIZED; in h5_peer_reset()
200 del_timer(&h5->timer); in h5_peer_reset()
202 skb_queue_purge(&h5->rel); in h5_peer_reset()
203 skb_queue_purge(&h5->unrel); in h5_peer_reset()
204 skb_queue_purge(&h5->unack); in h5_peer_reset()
206 h5->tx_seq = 0; in h5_peer_reset()
207 h5->tx_ack = 0; in h5_peer_reset()
215 struct h5 *h5; in h5_open() local
221 h5 = serdev_device_get_drvdata(hu->serdev); in h5_open()
223 h5 = kzalloc(sizeof(*h5), GFP_KERNEL); in h5_open()
224 if (!h5) in h5_open()
228 hu->priv = h5; in h5_open()
229 h5->hu = hu; in h5_open()
231 skb_queue_head_init(&h5->unack); in h5_open()
232 skb_queue_head_init(&h5->rel); in h5_open()
233 skb_queue_head_init(&h5->unrel); in h5_open()
235 h5_reset_rx(h5); in h5_open()
237 timer_setup(&h5->timer, h5_timed_event, 0); in h5_open()
239 h5->tx_win = H5_TX_WIN_MAX; in h5_open()
241 if (h5->vnd && h5->vnd->open) in h5_open()
242 h5->vnd->open(h5); in h5_open()
248 mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT); in h5_open()
255 struct h5 *h5 = hu->priv; in h5_close() local
257 del_timer_sync(&h5->timer); in h5_close()
259 skb_queue_purge(&h5->unack); in h5_close()
260 skb_queue_purge(&h5->rel); in h5_close()
261 skb_queue_purge(&h5->unrel); in h5_close()
263 kfree_skb(h5->rx_skb); in h5_close()
264 h5->rx_skb = NULL; in h5_close()
266 if (h5->vnd && h5->vnd->close) in h5_close()
267 h5->vnd->close(h5); in h5_close()
270 kfree(h5); in h5_close()
277 struct h5 *h5 = hu->priv; in h5_setup() local
279 if (h5->vnd && h5->vnd->setup) in h5_setup()
280 return h5->vnd->setup(h5); in h5_setup()
285 static void h5_pkt_cull(struct h5 *h5) in h5_pkt_cull() argument
292 spin_lock_irqsave(&h5->unack.lock, flags); in h5_pkt_cull()
294 to_remove = skb_queue_len(&h5->unack); in h5_pkt_cull()
298 seq = h5->tx_seq; in h5_pkt_cull()
301 if (h5->rx_ack == seq) in h5_pkt_cull()
308 if (seq != h5->rx_ack) in h5_pkt_cull()
312 skb_queue_walk_safe(&h5->unack, skb, tmp) { in h5_pkt_cull()
316 __skb_unlink(skb, &h5->unack); in h5_pkt_cull()
320 if (skb_queue_empty(&h5->unack)) in h5_pkt_cull()
321 del_timer(&h5->timer); in h5_pkt_cull()
324 spin_unlock_irqrestore(&h5->unack.lock, flags); in h5_pkt_cull()
329 struct h5 *h5 = hu->priv; in h5_handle_internal_rx() local
337 const unsigned char *hdr = h5->rx_skb->data; in h5_handle_internal_rx()
338 const unsigned char *data = &h5->rx_skb->data[4]; in h5_handle_internal_rx()
348 conf_req[2] = h5_cfg_field(h5); in h5_handle_internal_rx()
351 if (h5->state == H5_ACTIVE) in h5_handle_internal_rx()
355 if (h5->state == H5_ACTIVE) in h5_handle_internal_rx()
357 h5->state = H5_INITIALIZED; in h5_handle_internal_rx()
364 h5->tx_win = (data[2] & 0x07); in h5_handle_internal_rx()
365 BT_DBG("Three-wire init complete. tx_win %u", h5->tx_win); in h5_handle_internal_rx()
366 h5->state = H5_ACTIVE; in h5_handle_internal_rx()
371 h5->sleep = H5_SLEEPING; in h5_handle_internal_rx()
375 h5->sleep = H5_AWAKE; in h5_handle_internal_rx()
379 h5->sleep = H5_AWAKE; in h5_handle_internal_rx()
390 struct h5 *h5 = hu->priv; in h5_complete_rx_pkt() local
391 const unsigned char *hdr = h5->rx_skb->data; in h5_complete_rx_pkt()
394 h5->tx_ack = (h5->tx_ack + 1) % 8; in h5_complete_rx_pkt()
395 set_bit(H5_TX_ACK_REQ, &h5->flags); in h5_complete_rx_pkt()
399 h5->rx_ack = H5_HDR_ACK(hdr); in h5_complete_rx_pkt()
401 h5_pkt_cull(h5); in h5_complete_rx_pkt()
408 hci_skb_pkt_type(h5->rx_skb) = H5_HDR_PKT_TYPE(hdr); in h5_complete_rx_pkt()
411 skb_pull(h5->rx_skb, 4); in h5_complete_rx_pkt()
413 hci_recv_frame(hu->hdev, h5->rx_skb); in h5_complete_rx_pkt()
414 h5->rx_skb = NULL; in h5_complete_rx_pkt()
423 h5_reset_rx(h5); in h5_complete_rx_pkt()
435 struct h5 *h5 = hu->priv; in h5_rx_payload() local
436 const unsigned char *hdr = h5->rx_skb->data; in h5_rx_payload()
439 h5->rx_func = h5_rx_crc; in h5_rx_payload()
440 h5->rx_pending = 2; in h5_rx_payload()
450 struct h5 *h5 = hu->priv; in h5_rx_3wire_hdr() local
451 const unsigned char *hdr = h5->rx_skb->data; in h5_rx_3wire_hdr()
460 h5_reset_rx(h5); in h5_rx_3wire_hdr()
464 if (H5_HDR_RELIABLE(hdr) && H5_HDR_SEQ(hdr) != h5->tx_ack) { in h5_rx_3wire_hdr()
466 H5_HDR_SEQ(hdr), h5->tx_ack); in h5_rx_3wire_hdr()
467 set_bit(H5_TX_ACK_REQ, &h5->flags); in h5_rx_3wire_hdr()
469 h5_reset_rx(h5); in h5_rx_3wire_hdr()
473 if (h5->state != H5_ACTIVE && in h5_rx_3wire_hdr()
476 h5_reset_rx(h5); in h5_rx_3wire_hdr()
480 h5->rx_func = h5_rx_payload; in h5_rx_3wire_hdr()
481 h5->rx_pending = H5_HDR_LEN(hdr); in h5_rx_3wire_hdr()
488 struct h5 *h5 = hu->priv; in h5_rx_pkt_start() local
493 h5->rx_func = h5_rx_3wire_hdr; in h5_rx_pkt_start()
494 h5->rx_pending = 4; in h5_rx_pkt_start()
496 h5->rx_skb = bt_skb_alloc(H5_MAX_LEN, GFP_ATOMIC); in h5_rx_pkt_start()
497 if (!h5->rx_skb) { in h5_rx_pkt_start()
499 h5_reset_rx(h5); in h5_rx_pkt_start()
503 h5->rx_skb->dev = (void *)hu->hdev; in h5_rx_pkt_start()
510 struct h5 *h5 = hu->priv; in h5_rx_delimiter() local
513 h5->rx_func = h5_rx_pkt_start; in h5_rx_delimiter()
518 static void h5_unslip_one_byte(struct h5 *h5, unsigned char c) in h5_unslip_one_byte() argument
523 if (!test_bit(H5_RX_ESC, &h5->flags) && c == SLIP_ESC) { in h5_unslip_one_byte()
524 set_bit(H5_RX_ESC, &h5->flags); in h5_unslip_one_byte()
528 if (test_and_clear_bit(H5_RX_ESC, &h5->flags)) { in h5_unslip_one_byte()
538 h5_reset_rx(h5); in h5_unslip_one_byte()
543 skb_put_data(h5->rx_skb, byte, 1); in h5_unslip_one_byte()
544 h5->rx_pending--; in h5_unslip_one_byte()
546 BT_DBG("unslipped 0x%02hhx, rx_pending %zu", *byte, h5->rx_pending); in h5_unslip_one_byte()
549 static void h5_reset_rx(struct h5 *h5) in h5_reset_rx() argument
551 if (h5->rx_skb) { in h5_reset_rx()
552 kfree_skb(h5->rx_skb); in h5_reset_rx()
553 h5->rx_skb = NULL; in h5_reset_rx()
556 h5->rx_func = h5_rx_delimiter; in h5_reset_rx()
557 h5->rx_pending = 0; in h5_reset_rx()
558 clear_bit(H5_RX_ESC, &h5->flags); in h5_reset_rx()
563 struct h5 *h5 = hu->priv; in h5_recv() local
566 BT_DBG("%s pending %zu count %d", hu->hdev->name, h5->rx_pending, in h5_recv()
572 if (h5->rx_pending > 0) { in h5_recv()
575 h5_reset_rx(h5); in h5_recv()
579 h5_unslip_one_byte(h5, *ptr); in h5_recv()
585 processed = h5->rx_func(hu, *ptr); in h5_recv()
604 struct h5 *h5 = hu->priv; in h5_enqueue() local
612 if (h5->state != H5_ACTIVE) { in h5_enqueue()
621 skb_queue_tail(&h5->rel, skb); in h5_enqueue()
626 skb_queue_tail(&h5->unrel, skb); in h5_enqueue()
686 struct h5 *h5 = hu->priv; in h5_prepare_pkt() local
710 hdr[0] = h5->tx_ack << 3; in h5_prepare_pkt()
711 clear_bit(H5_TX_ACK_REQ, &h5->flags); in h5_prepare_pkt()
716 hdr[0] |= h5->tx_seq; in h5_prepare_pkt()
717 h5->tx_seq = (h5->tx_seq + 1) % 8; in h5_prepare_pkt()
742 struct h5 *h5 = hu->priv; in h5_dequeue() local
746 if (h5->sleep != H5_AWAKE) { in h5_dequeue()
749 if (h5->sleep == H5_WAKING_UP) in h5_dequeue()
752 h5->sleep = H5_WAKING_UP; in h5_dequeue()
755 mod_timer(&h5->timer, jiffies + HZ / 100); in h5_dequeue()
759 skb = skb_dequeue(&h5->unrel); in h5_dequeue()
768 skb_queue_head(&h5->unrel, skb); in h5_dequeue()
772 spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING); in h5_dequeue()
774 if (h5->unack.qlen >= h5->tx_win) in h5_dequeue()
777 skb = skb_dequeue(&h5->rel); in h5_dequeue()
782 __skb_queue_tail(&h5->unack, skb); in h5_dequeue()
783 mod_timer(&h5->timer, jiffies + H5_ACK_TIMEOUT); in h5_dequeue()
784 spin_unlock_irqrestore(&h5->unack.lock, flags); in h5_dequeue()
788 skb_queue_head(&h5->rel, skb); in h5_dequeue()
793 spin_unlock_irqrestore(&h5->unack.lock, flags); in h5_dequeue()
795 if (test_bit(H5_TX_ACK_REQ, &h5->flags)) in h5_dequeue()
822 struct h5 *h5; in h5_serdev_probe() local
825 h5 = devm_kzalloc(dev, sizeof(*h5), GFP_KERNEL); in h5_serdev_probe()
826 if (!h5) in h5_serdev_probe()
829 h5->hu = &h5->serdev_hu; in h5_serdev_probe()
830 h5->serdev_hu.serdev = serdev; in h5_serdev_probe()
831 serdev_device_set_drvdata(serdev, h5); in h5_serdev_probe()
841 h5->vnd = data->vnd; in h5_serdev_probe()
842 h5->id = (char *)match->id; in h5_serdev_probe()
844 if (h5->vnd->acpi_gpio_map) in h5_serdev_probe()
846 h5->vnd->acpi_gpio_map); in h5_serdev_probe()
852 h5->vnd = data->vnd; in h5_serdev_probe()
856 set_bit(H5_WAKEUP_DISABLE, &h5->flags); in h5_serdev_probe()
858 h5->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW); in h5_serdev_probe()
859 if (IS_ERR(h5->enable_gpio)) in h5_serdev_probe()
860 return PTR_ERR(h5->enable_gpio); in h5_serdev_probe()
862 h5->device_wake_gpio = devm_gpiod_get_optional(dev, "device-wake", in h5_serdev_probe()
864 if (IS_ERR(h5->device_wake_gpio)) in h5_serdev_probe()
865 return PTR_ERR(h5->device_wake_gpio); in h5_serdev_probe()
867 return hci_uart_register_device_priv(&h5->serdev_hu, &h5p, in h5_serdev_probe()
868 h5->vnd->sizeof_priv); in h5_serdev_probe()
873 struct h5 *h5 = serdev_device_get_drvdata(serdev); in h5_serdev_remove() local
875 hci_uart_unregister_device(&h5->serdev_hu); in h5_serdev_remove()
880 struct h5 *h5 = dev_get_drvdata(dev); in h5_serdev_suspend() local
883 if (h5->vnd && h5->vnd->suspend) in h5_serdev_suspend()
884 ret = h5->vnd->suspend(h5); in h5_serdev_suspend()
891 struct h5 *h5 = dev_get_drvdata(dev); in h5_serdev_resume() local
894 if (h5->vnd && h5->vnd->resume) in h5_serdev_resume()
895 ret = h5->vnd->resume(h5); in h5_serdev_resume()
901 static int h5_btrtl_setup(struct h5 *h5) in h5_btrtl_setup() argument
911 btrtl_dev = btrtl_initialize(h5->hu->hdev, h5->id); in h5_btrtl_setup()
915 err = btrtl_get_uart_settings(h5->hu->hdev, btrtl_dev, in h5_btrtl_setup()
922 skb = __hci_cmd_sync(h5->hu->hdev, 0xfc17, sizeof(baudrate_data), in h5_btrtl_setup()
925 rtl_dev_err(h5->hu->hdev, "set baud rate command failed\n"); in h5_btrtl_setup()
934 serdev_device_set_baudrate(h5->hu->serdev, controller_baudrate); in h5_btrtl_setup()
935 serdev_device_set_flow_control(h5->hu->serdev, flow_control); in h5_btrtl_setup()
938 set_bit(H5_HW_FLOW_CONTROL, &h5->flags); in h5_btrtl_setup()
940 err = btrtl_download_firmware(h5->hu->hdev, btrtl_dev); in h5_btrtl_setup()
946 btrtl_set_quirks(h5->hu->hdev, btrtl_dev); in h5_btrtl_setup()
954 static void h5_btrtl_open(struct h5 *h5) in h5_btrtl_open() argument
961 if (test_bit(H5_WAKEUP_DISABLE, &h5->flags)) in h5_btrtl_open()
962 set_bit(HCI_UART_NO_SUSPEND_NOTIFIER, &h5->hu->flags); in h5_btrtl_open()
965 serdev_device_set_flow_control(h5->hu->serdev, false); in h5_btrtl_open()
966 serdev_device_set_parity(h5->hu->serdev, SERDEV_PARITY_EVEN); in h5_btrtl_open()
967 serdev_device_set_baudrate(h5->hu->serdev, 115200); in h5_btrtl_open()
969 if (!test_bit(H5_WAKEUP_DISABLE, &h5->flags)) { in h5_btrtl_open()
970 pm_runtime_set_active(&h5->hu->serdev->dev); in h5_btrtl_open()
971 pm_runtime_use_autosuspend(&h5->hu->serdev->dev); in h5_btrtl_open()
972 pm_runtime_set_autosuspend_delay(&h5->hu->serdev->dev, in h5_btrtl_open()
974 pm_runtime_enable(&h5->hu->serdev->dev); in h5_btrtl_open()
978 gpiod_set_value_cansleep(h5->enable_gpio, 0); in h5_btrtl_open()
979 gpiod_set_value_cansleep(h5->device_wake_gpio, 0); in h5_btrtl_open()
983 gpiod_set_value_cansleep(h5->enable_gpio, 1); in h5_btrtl_open()
984 gpiod_set_value_cansleep(h5->device_wake_gpio, 1); in h5_btrtl_open()
988 static void h5_btrtl_close(struct h5 *h5) in h5_btrtl_close() argument
990 if (!test_bit(H5_WAKEUP_DISABLE, &h5->flags)) in h5_btrtl_close()
991 pm_runtime_disable(&h5->hu->serdev->dev); in h5_btrtl_close()
993 gpiod_set_value_cansleep(h5->device_wake_gpio, 0); in h5_btrtl_close()
994 gpiod_set_value_cansleep(h5->enable_gpio, 0); in h5_btrtl_close()
1003 static int h5_btrtl_suspend(struct h5 *h5) in h5_btrtl_suspend() argument
1005 serdev_device_set_flow_control(h5->hu->serdev, false); in h5_btrtl_suspend()
1006 gpiod_set_value_cansleep(h5->device_wake_gpio, 0); in h5_btrtl_suspend()
1008 if (test_bit(H5_WAKEUP_DISABLE, &h5->flags)) in h5_btrtl_suspend()
1009 gpiod_set_value_cansleep(h5->enable_gpio, 0); in h5_btrtl_suspend()
1034 static int h5_btrtl_resume(struct h5 *h5) in h5_btrtl_resume() argument
1036 if (test_bit(H5_WAKEUP_DISABLE, &h5->flags)) { in h5_btrtl_resume()
1046 reprobe->dev = get_device(&h5->hu->serdev->dev); in h5_btrtl_resume()
1049 gpiod_set_value_cansleep(h5->device_wake_gpio, 1); in h5_btrtl_resume()
1051 if (test_bit(H5_HW_FLOW_CONTROL, &h5->flags)) in h5_btrtl_resume()
1052 serdev_device_set_flow_control(h5->hu->serdev, true); in h5_btrtl_resume()