1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2023, Linaro Ltd. All rights reserved.
4  */
5 
6 #include <linux/err.h>
7 #include <linux/interrupt.h>
8 #include <linux/kernel.h>
9 #include <linux/mod_devicetable.h>
10 #include <linux/module.h>
11 #include <linux/platform_device.h>
12 #include <linux/regmap.h>
13 #include <linux/regulator/consumer.h>
14 #include <linux/slab.h>
15 #include <linux/usb/pd.h>
16 #include <linux/usb/tcpm.h>
17 #include "qcom_pmic_typec_pdphy.h"
18 
19 struct pmic_typec_pdphy_irq_data {
20 	int				virq;
21 	int				irq;
22 	struct pmic_typec_pdphy		*pmic_typec_pdphy;
23 };
24 
25 struct pmic_typec_pdphy {
26 	struct device			*dev;
27 	struct tcpm_port		*tcpm_port;
28 	struct regmap			*regmap;
29 	u32				base;
30 
31 	unsigned int			nr_irqs;
32 	struct pmic_typec_pdphy_irq_data	*irq_data;
33 
34 	struct work_struct		reset_work;
35 	struct work_struct		receive_work;
36 	struct regulator		*vdd_pdphy;
37 	spinlock_t			lock;		/* Register atomicity */
38 };
39 
40 static void qcom_pmic_typec_pdphy_reset_on(struct pmic_typec_pdphy *pmic_typec_pdphy)
41 {
42 	struct device *dev = pmic_typec_pdphy->dev;
43 	int ret;
44 
45 	/* Terminate TX */
46 	ret = regmap_write(pmic_typec_pdphy->regmap,
47 			   pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, 0);
48 	if (ret)
49 		goto err;
50 
51 	ret = regmap_write(pmic_typec_pdphy->regmap,
52 			   pmic_typec_pdphy->base + USB_PDPHY_FRAME_FILTER_REG, 0);
53 	if (ret)
54 		goto err;
55 
56 	return;
57 err:
58 	dev_err(dev, "pd_reset_on error\n");
59 }
60 
61 static void qcom_pmic_typec_pdphy_reset_off(struct pmic_typec_pdphy *pmic_typec_pdphy)
62 {
63 	struct device *dev = pmic_typec_pdphy->dev;
64 	int ret;
65 
66 	ret = regmap_write(pmic_typec_pdphy->regmap,
67 			   pmic_typec_pdphy->base + USB_PDPHY_FRAME_FILTER_REG,
68 			   FRAME_FILTER_EN_SOP | FRAME_FILTER_EN_HARD_RESET);
69 	if (ret)
70 		dev_err(dev, "pd_reset_off error\n");
71 }
72 
73 static void qcom_pmic_typec_pdphy_sig_reset_work(struct work_struct *work)
74 {
75 	struct pmic_typec_pdphy *pmic_typec_pdphy = container_of(work, struct pmic_typec_pdphy,
76 						     reset_work);
77 	unsigned long flags;
78 
79 	spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
80 
81 	qcom_pmic_typec_pdphy_reset_on(pmic_typec_pdphy);
82 	qcom_pmic_typec_pdphy_reset_off(pmic_typec_pdphy);
83 
84 	spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
85 
86 	tcpm_pd_hard_reset(pmic_typec_pdphy->tcpm_port);
87 }
88 
89 static int
90 qcom_pmic_typec_pdphy_clear_tx_control_reg(struct pmic_typec_pdphy *pmic_typec_pdphy)
91 {
92 	struct device *dev = pmic_typec_pdphy->dev;
93 	unsigned int val;
94 	int ret;
95 
96 	/* Clear TX control register */
97 	ret = regmap_write(pmic_typec_pdphy->regmap,
98 			   pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, 0);
99 	if (ret)
100 		goto done;
101 
102 	/* Perform readback to ensure sufficient delay for command to latch */
103 	ret = regmap_read(pmic_typec_pdphy->regmap,
104 			  pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, &val);
105 
106 done:
107 	if (ret)
108 		dev_err(dev, "pd_clear_tx_control_reg: clear tx flag\n");
109 
110 	return ret;
111 }
112 
113 static int
114 qcom_pmic_typec_pdphy_pd_transmit_signal(struct pmic_typec_pdphy *pmic_typec_pdphy,
115 					 enum tcpm_transmit_type type,
116 					 unsigned int negotiated_rev)
117 {
118 	struct device *dev = pmic_typec_pdphy->dev;
119 	unsigned int val;
120 	unsigned long flags;
121 	int ret;
122 
123 	spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
124 
125 	/* Clear TX control register */
126 	ret = qcom_pmic_typec_pdphy_clear_tx_control_reg(pmic_typec_pdphy);
127 	if (ret)
128 		goto done;
129 
130 	val = TX_CONTROL_SEND_SIGNAL;
131 	if (negotiated_rev == PD_REV30)
132 		val |= TX_CONTROL_RETRY_COUNT(2);
133 	else
134 		val |= TX_CONTROL_RETRY_COUNT(3);
135 
136 	if (type == TCPC_TX_CABLE_RESET || type == TCPC_TX_HARD_RESET)
137 		val |= TX_CONTROL_FRAME_TYPE(1);
138 
139 	ret = regmap_write(pmic_typec_pdphy->regmap,
140 			   pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, val);
141 
142 done:
143 	spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
144 
145 	dev_vdbg(dev, "pd_transmit_signal: type %d negotiate_rev %d send %d\n",
146 		 type, negotiated_rev, ret);
147 
148 	return ret;
149 }
150 
151 static int
152 qcom_pmic_typec_pdphy_pd_transmit_payload(struct pmic_typec_pdphy *pmic_typec_pdphy,
153 					  enum tcpm_transmit_type type,
154 					  const struct pd_message *msg,
155 					  unsigned int negotiated_rev)
156 {
157 	struct device *dev = pmic_typec_pdphy->dev;
158 	unsigned int val, hdr_len, txbuf_len, txsize_len;
159 	unsigned long flags;
160 	int ret;
161 
162 	spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
163 
164 	ret = regmap_read(pmic_typec_pdphy->regmap,
165 			  pmic_typec_pdphy->base + USB_PDPHY_RX_ACKNOWLEDGE_REG,
166 			  &val);
167 	if (ret)
168 		goto done;
169 
170 	if (val) {
171 		dev_err(dev, "pd_transmit_payload: RX message pending\n");
172 		ret = -EBUSY;
173 		goto done;
174 	}
175 
176 	/* Clear TX control register */
177 	ret = qcom_pmic_typec_pdphy_clear_tx_control_reg(pmic_typec_pdphy);
178 	if (ret)
179 		goto done;
180 
181 	hdr_len = sizeof(msg->header);
182 	txbuf_len = pd_header_cnt_le(msg->header) * 4;
183 	txsize_len = hdr_len + txbuf_len - 1;
184 
185 	/* Write message header sizeof(u16) to USB_PDPHY_TX_BUFFER_HDR_REG */
186 	ret = regmap_bulk_write(pmic_typec_pdphy->regmap,
187 				pmic_typec_pdphy->base + USB_PDPHY_TX_BUFFER_HDR_REG,
188 				&msg->header, hdr_len);
189 	if (ret)
190 		goto done;
191 
192 	/* Write payload to USB_PDPHY_TX_BUFFER_DATA_REG for txbuf_len */
193 	if (txbuf_len) {
194 		ret = regmap_bulk_write(pmic_typec_pdphy->regmap,
195 					pmic_typec_pdphy->base + USB_PDPHY_TX_BUFFER_DATA_REG,
196 					&msg->payload, txbuf_len);
197 		if (ret)
198 			goto done;
199 	}
200 
201 	/* Write total length ((header + data) - 1) to USB_PDPHY_TX_SIZE_REG */
202 	ret = regmap_write(pmic_typec_pdphy->regmap,
203 			   pmic_typec_pdphy->base + USB_PDPHY_TX_SIZE_REG,
204 			   txsize_len);
205 	if (ret)
206 		goto done;
207 
208 	/* Clear TX control register */
209 	ret = qcom_pmic_typec_pdphy_clear_tx_control_reg(pmic_typec_pdphy);
210 	if (ret)
211 		goto done;
212 
213 	/* Initiate transmit with retry count as indicated by PD revision */
214 	val = TX_CONTROL_FRAME_TYPE(type) | TX_CONTROL_SEND_MSG;
215 	if (pd_header_rev(msg->header) == PD_REV30)
216 		val |= TX_CONTROL_RETRY_COUNT(2);
217 	else
218 		val |= TX_CONTROL_RETRY_COUNT(3);
219 
220 	ret = regmap_write(pmic_typec_pdphy->regmap,
221 			   pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, val);
222 
223 done:
224 	spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
225 
226 	if (ret) {
227 		dev_err(dev, "pd_transmit_payload: hdr %*ph data %*ph ret %d\n",
228 			hdr_len, &msg->header, txbuf_len, &msg->payload, ret);
229 	}
230 
231 	return ret;
232 }
233 
234 int qcom_pmic_typec_pdphy_pd_transmit(struct pmic_typec_pdphy *pmic_typec_pdphy,
235 				      enum tcpm_transmit_type type,
236 				      const struct pd_message *msg,
237 				      unsigned int negotiated_rev)
238 {
239 	struct device *dev = pmic_typec_pdphy->dev;
240 	int ret;
241 
242 	if (msg) {
243 		ret = qcom_pmic_typec_pdphy_pd_transmit_payload(pmic_typec_pdphy,
244 								type, msg,
245 								negotiated_rev);
246 	} else {
247 		ret = qcom_pmic_typec_pdphy_pd_transmit_signal(pmic_typec_pdphy,
248 							       type,
249 							       negotiated_rev);
250 	}
251 
252 	if (ret)
253 		dev_dbg(dev, "pd_transmit: type %x result %d\n", type, ret);
254 
255 	return ret;
256 }
257 
258 static void qcom_pmic_typec_pdphy_pd_receive(struct pmic_typec_pdphy *pmic_typec_pdphy)
259 {
260 	struct device *dev = pmic_typec_pdphy->dev;
261 	struct pd_message msg;
262 	unsigned int size, rx_status;
263 	unsigned long flags;
264 	int ret;
265 
266 	spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
267 
268 	ret = regmap_read(pmic_typec_pdphy->regmap,
269 			  pmic_typec_pdphy->base + USB_PDPHY_RX_SIZE_REG, &size);
270 	if (ret)
271 		goto done;
272 
273 	/* Hardware requires +1 of the real read value to be passed */
274 	if (size < 1 || size > sizeof(msg.payload) + 1) {
275 		dev_dbg(dev, "pd_receive: invalid size %d\n", size);
276 		goto done;
277 	}
278 
279 	size += 1;
280 	ret = regmap_read(pmic_typec_pdphy->regmap,
281 			  pmic_typec_pdphy->base + USB_PDPHY_RX_STATUS_REG,
282 			  &rx_status);
283 
284 	if (ret)
285 		goto done;
286 
287 	ret = regmap_bulk_read(pmic_typec_pdphy->regmap,
288 			       pmic_typec_pdphy->base + USB_PDPHY_RX_BUFFER_REG,
289 			       (u8 *)&msg, size);
290 	if (ret)
291 		goto done;
292 
293 	/* Return ownership of RX buffer to hardware */
294 	ret = regmap_write(pmic_typec_pdphy->regmap,
295 			   pmic_typec_pdphy->base + USB_PDPHY_RX_ACKNOWLEDGE_REG, 0);
296 
297 done:
298 	spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
299 
300 	if (!ret) {
301 		dev_vdbg(dev, "pd_receive: handing %d bytes to tcpm\n", size);
302 		tcpm_pd_receive(pmic_typec_pdphy->tcpm_port, &msg);
303 	}
304 }
305 
306 static irqreturn_t qcom_pmic_typec_pdphy_isr(int irq, void *dev_id)
307 {
308 	struct pmic_typec_pdphy_irq_data *irq_data = dev_id;
309 	struct pmic_typec_pdphy *pmic_typec_pdphy = irq_data->pmic_typec_pdphy;
310 	struct device *dev = pmic_typec_pdphy->dev;
311 
312 	switch (irq_data->virq) {
313 	case PMIC_PDPHY_SIG_TX_IRQ:
314 		dev_err(dev, "isr: tx_sig\n");
315 		break;
316 	case PMIC_PDPHY_SIG_RX_IRQ:
317 		schedule_work(&pmic_typec_pdphy->reset_work);
318 		break;
319 	case PMIC_PDPHY_MSG_TX_IRQ:
320 		tcpm_pd_transmit_complete(pmic_typec_pdphy->tcpm_port,
321 					  TCPC_TX_SUCCESS);
322 		break;
323 	case PMIC_PDPHY_MSG_RX_IRQ:
324 		qcom_pmic_typec_pdphy_pd_receive(pmic_typec_pdphy);
325 		break;
326 	case PMIC_PDPHY_MSG_TX_FAIL_IRQ:
327 		tcpm_pd_transmit_complete(pmic_typec_pdphy->tcpm_port,
328 					  TCPC_TX_FAILED);
329 		break;
330 	case PMIC_PDPHY_MSG_TX_DISCARD_IRQ:
331 		tcpm_pd_transmit_complete(pmic_typec_pdphy->tcpm_port,
332 					  TCPC_TX_DISCARDED);
333 		break;
334 	}
335 
336 	return IRQ_HANDLED;
337 }
338 
339 int qcom_pmic_typec_pdphy_set_pd_rx(struct pmic_typec_pdphy *pmic_typec_pdphy, bool on)
340 {
341 	unsigned long flags;
342 	int ret;
343 
344 	spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
345 
346 	ret = regmap_write(pmic_typec_pdphy->regmap,
347 			   pmic_typec_pdphy->base + USB_PDPHY_RX_ACKNOWLEDGE_REG, !on);
348 
349 	spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
350 
351 	dev_dbg(pmic_typec_pdphy->dev, "set_pd_rx: %s\n", on ? "on" : "off");
352 
353 	return ret;
354 }
355 
356 int qcom_pmic_typec_pdphy_set_roles(struct pmic_typec_pdphy *pmic_typec_pdphy,
357 				    bool data_role_host, bool power_role_src)
358 {
359 	struct device *dev = pmic_typec_pdphy->dev;
360 	unsigned long flags;
361 	int ret;
362 
363 	spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
364 
365 	ret = regmap_update_bits(pmic_typec_pdphy->regmap,
366 				 pmic_typec_pdphy->base + USB_PDPHY_MSG_CONFIG_REG,
367 				 MSG_CONFIG_PORT_DATA_ROLE |
368 				 MSG_CONFIG_PORT_POWER_ROLE,
369 				 data_role_host << 3 | power_role_src << 2);
370 
371 	spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
372 
373 	dev_dbg(dev, "pdphy_set_roles: data_role_host=%d power_role_src=%d\n",
374 		data_role_host, power_role_src);
375 
376 	return ret;
377 }
378 
379 static int qcom_pmic_typec_pdphy_enable(struct pmic_typec_pdphy *pmic_typec_pdphy)
380 {
381 	struct device *dev = pmic_typec_pdphy->dev;
382 	int ret;
383 
384 	ret = regulator_enable(pmic_typec_pdphy->vdd_pdphy);
385 	if (ret)
386 		return ret;
387 
388 	/* PD 2.0, DR=TYPEC_DEVICE, PR=TYPEC_SINK */
389 	ret = regmap_update_bits(pmic_typec_pdphy->regmap,
390 				 pmic_typec_pdphy->base + USB_PDPHY_MSG_CONFIG_REG,
391 				 MSG_CONFIG_SPEC_REV_MASK, PD_REV20);
392 	if (ret)
393 		goto done;
394 
395 	ret = regmap_write(pmic_typec_pdphy->regmap,
396 			   pmic_typec_pdphy->base + USB_PDPHY_EN_CONTROL_REG, 0);
397 	if (ret)
398 		goto done;
399 
400 	ret = regmap_write(pmic_typec_pdphy->regmap,
401 			   pmic_typec_pdphy->base + USB_PDPHY_EN_CONTROL_REG,
402 			   CONTROL_ENABLE);
403 	if (ret)
404 		goto done;
405 
406 	qcom_pmic_typec_pdphy_reset_off(pmic_typec_pdphy);
407 done:
408 	if (ret) {
409 		regulator_disable(pmic_typec_pdphy->vdd_pdphy);
410 		dev_err(dev, "pdphy_enable fail %d\n", ret);
411 	}
412 
413 	return ret;
414 }
415 
416 static int qcom_pmic_typec_pdphy_disable(struct pmic_typec_pdphy *pmic_typec_pdphy)
417 {
418 	int ret;
419 
420 	qcom_pmic_typec_pdphy_reset_on(pmic_typec_pdphy);
421 
422 	ret = regmap_write(pmic_typec_pdphy->regmap,
423 			   pmic_typec_pdphy->base + USB_PDPHY_EN_CONTROL_REG, 0);
424 
425 	regulator_disable(pmic_typec_pdphy->vdd_pdphy);
426 
427 	return ret;
428 }
429 
430 static int pmic_typec_pdphy_reset(struct pmic_typec_pdphy *pmic_typec_pdphy)
431 {
432 	int ret;
433 
434 	ret = qcom_pmic_typec_pdphy_disable(pmic_typec_pdphy);
435 	if (ret)
436 		goto done;
437 
438 	usleep_range(400, 500);
439 	ret = qcom_pmic_typec_pdphy_enable(pmic_typec_pdphy);
440 done:
441 	return ret;
442 }
443 
444 int qcom_pmic_typec_pdphy_start(struct pmic_typec_pdphy *pmic_typec_pdphy,
445 				struct tcpm_port *tcpm_port)
446 {
447 	int i;
448 	int ret;
449 
450 	pmic_typec_pdphy->tcpm_port = tcpm_port;
451 
452 	ret = pmic_typec_pdphy_reset(pmic_typec_pdphy);
453 	if (ret)
454 		return ret;
455 
456 	for (i = 0; i < pmic_typec_pdphy->nr_irqs; i++)
457 		enable_irq(pmic_typec_pdphy->irq_data[i].irq);
458 
459 	return 0;
460 }
461 
462 void qcom_pmic_typec_pdphy_stop(struct pmic_typec_pdphy *pmic_typec_pdphy)
463 {
464 	int i;
465 
466 	for (i = 0; i < pmic_typec_pdphy->nr_irqs; i++)
467 		disable_irq(pmic_typec_pdphy->irq_data[i].irq);
468 
469 	qcom_pmic_typec_pdphy_reset_on(pmic_typec_pdphy);
470 }
471 
472 struct pmic_typec_pdphy *qcom_pmic_typec_pdphy_alloc(struct device *dev)
473 {
474 	return devm_kzalloc(dev, sizeof(struct pmic_typec_pdphy), GFP_KERNEL);
475 }
476 
477 int qcom_pmic_typec_pdphy_probe(struct platform_device *pdev,
478 				struct pmic_typec_pdphy *pmic_typec_pdphy,
479 				struct pmic_typec_pdphy_resources *res,
480 				struct regmap *regmap,
481 				u32 base)
482 {
483 	struct device *dev = &pdev->dev;
484 	struct pmic_typec_pdphy_irq_data *irq_data;
485 	int i, ret, irq;
486 
487 	if (!res->nr_irqs || res->nr_irqs > PMIC_PDPHY_MAX_IRQS)
488 		return -EINVAL;
489 
490 	irq_data = devm_kzalloc(dev, sizeof(*irq_data) * res->nr_irqs,
491 				GFP_KERNEL);
492 	if (!irq_data)
493 		return -ENOMEM;
494 
495 	pmic_typec_pdphy->vdd_pdphy = devm_regulator_get(dev, "vdd-pdphy");
496 	if (IS_ERR(pmic_typec_pdphy->vdd_pdphy))
497 		return PTR_ERR(pmic_typec_pdphy->vdd_pdphy);
498 
499 	pmic_typec_pdphy->dev = dev;
500 	pmic_typec_pdphy->base = base;
501 	pmic_typec_pdphy->regmap = regmap;
502 	pmic_typec_pdphy->nr_irqs = res->nr_irqs;
503 	pmic_typec_pdphy->irq_data = irq_data;
504 	spin_lock_init(&pmic_typec_pdphy->lock);
505 	INIT_WORK(&pmic_typec_pdphy->reset_work, qcom_pmic_typec_pdphy_sig_reset_work);
506 
507 	for (i = 0; i < res->nr_irqs; i++, irq_data++) {
508 		irq = platform_get_irq_byname(pdev, res->irq_params[i].irq_name);
509 		if (irq < 0)
510 			return irq;
511 
512 		irq_data->pmic_typec_pdphy = pmic_typec_pdphy;
513 		irq_data->irq = irq;
514 		irq_data->virq = res->irq_params[i].virq;
515 
516 		ret = devm_request_threaded_irq(dev, irq, NULL,
517 						qcom_pmic_typec_pdphy_isr,
518 						IRQF_ONESHOT | IRQF_NO_AUTOEN,
519 						res->irq_params[i].irq_name,
520 						irq_data);
521 		if (ret)
522 			return ret;
523 	}
524 
525 	return 0;
526 }
527