1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright 2015-2017 Google, Inc
4 *
5 * USB Type-C Port Controller Interface.
6 */
7
8 #include <linux/delay.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/i2c.h>
12 #include <linux/interrupt.h>
13 #include <linux/property.h>
14 #include <linux/regmap.h>
15 #include <linux/usb/pd.h>
16 #include <linux/usb/tcpci.h>
17 #include <linux/usb/tcpm.h>
18 #include <linux/usb/typec.h>
19
20 #define PD_RETRY_COUNT_DEFAULT 3
21 #define PD_RETRY_COUNT_3_0_OR_HIGHER 2
22 #define AUTO_DISCHARGE_DEFAULT_THRESHOLD_MV 3500
23 #define VSINKPD_MIN_IR_DROP_MV 750
24 #define VSRC_NEW_MIN_PERCENT 95
25 #define VSRC_VALID_MIN_MV 500
26 #define VPPS_NEW_MIN_PERCENT 95
27 #define VPPS_VALID_MIN_MV 100
28 #define VSINKDISCONNECT_PD_MIN_PERCENT 90
29 #define VPPS_SHUTDOWN_MIN_PERCENT 85
30
31 struct tcpci {
32 struct device *dev;
33
34 struct tcpm_port *port;
35
36 struct regmap *regmap;
37 unsigned int alert_mask;
38
39 bool controls_vbus;
40
41 struct tcpc_dev tcpc;
42 struct tcpci_data *data;
43 };
44
45 struct tcpci_chip {
46 struct tcpci *tcpci;
47 struct tcpci_data data;
48 };
49
tcpci_get_tcpm_port(struct tcpci * tcpci)50 struct tcpm_port *tcpci_get_tcpm_port(struct tcpci *tcpci)
51 {
52 return tcpci->port;
53 }
54 EXPORT_SYMBOL_GPL(tcpci_get_tcpm_port);
55
tcpc_to_tcpci(struct tcpc_dev * tcpc)56 static inline struct tcpci *tcpc_to_tcpci(struct tcpc_dev *tcpc)
57 {
58 return container_of(tcpc, struct tcpci, tcpc);
59 }
60
tcpci_read16(struct tcpci * tcpci,unsigned int reg,u16 * val)61 static int tcpci_read16(struct tcpci *tcpci, unsigned int reg, u16 *val)
62 {
63 return regmap_raw_read(tcpci->regmap, reg, val, sizeof(u16));
64 }
65
tcpci_write16(struct tcpci * tcpci,unsigned int reg,u16 val)66 static int tcpci_write16(struct tcpci *tcpci, unsigned int reg, u16 val)
67 {
68 return regmap_raw_write(tcpci->regmap, reg, &val, sizeof(u16));
69 }
70
tcpci_set_cc(struct tcpc_dev * tcpc,enum typec_cc_status cc)71 static int tcpci_set_cc(struct tcpc_dev *tcpc, enum typec_cc_status cc)
72 {
73 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
74 bool vconn_pres;
75 enum typec_cc_polarity polarity = TYPEC_POLARITY_CC1;
76 unsigned int reg;
77 int ret;
78
79 ret = regmap_read(tcpci->regmap, TCPC_POWER_STATUS, ®);
80 if (ret < 0)
81 return ret;
82
83 vconn_pres = !!(reg & TCPC_POWER_STATUS_VCONN_PRES);
84 if (vconn_pres) {
85 ret = regmap_read(tcpci->regmap, TCPC_TCPC_CTRL, ®);
86 if (ret < 0)
87 return ret;
88
89 if (reg & TCPC_TCPC_CTRL_ORIENTATION)
90 polarity = TYPEC_POLARITY_CC2;
91 }
92
93 switch (cc) {
94 case TYPEC_CC_RA:
95 reg = (TCPC_ROLE_CTRL_CC_RA << TCPC_ROLE_CTRL_CC1_SHIFT) |
96 (TCPC_ROLE_CTRL_CC_RA << TCPC_ROLE_CTRL_CC2_SHIFT);
97 break;
98 case TYPEC_CC_RD:
99 reg = (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT) |
100 (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT);
101 break;
102 case TYPEC_CC_RP_DEF:
103 reg = (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT) |
104 (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT) |
105 (TCPC_ROLE_CTRL_RP_VAL_DEF <<
106 TCPC_ROLE_CTRL_RP_VAL_SHIFT);
107 break;
108 case TYPEC_CC_RP_1_5:
109 reg = (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT) |
110 (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT) |
111 (TCPC_ROLE_CTRL_RP_VAL_1_5 <<
112 TCPC_ROLE_CTRL_RP_VAL_SHIFT);
113 break;
114 case TYPEC_CC_RP_3_0:
115 reg = (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT) |
116 (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT) |
117 (TCPC_ROLE_CTRL_RP_VAL_3_0 <<
118 TCPC_ROLE_CTRL_RP_VAL_SHIFT);
119 break;
120 case TYPEC_CC_OPEN:
121 default:
122 reg = (TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC1_SHIFT) |
123 (TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC2_SHIFT);
124 break;
125 }
126
127 if (vconn_pres) {
128 if (polarity == TYPEC_POLARITY_CC2) {
129 reg &= ~(TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT);
130 reg |= (TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC1_SHIFT);
131 } else {
132 reg &= ~(TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT);
133 reg |= (TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC2_SHIFT);
134 }
135 }
136
137 ret = regmap_write(tcpci->regmap, TCPC_ROLE_CTRL, reg);
138 if (ret < 0)
139 return ret;
140
141 return 0;
142 }
143
tcpci_apply_rc(struct tcpc_dev * tcpc,enum typec_cc_status cc,enum typec_cc_polarity polarity)144 static int tcpci_apply_rc(struct tcpc_dev *tcpc, enum typec_cc_status cc,
145 enum typec_cc_polarity polarity)
146 {
147 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
148 unsigned int reg;
149 int ret;
150
151 ret = regmap_read(tcpci->regmap, TCPC_ROLE_CTRL, ®);
152 if (ret < 0)
153 return ret;
154
155 /*
156 * APPLY_RC state is when ROLE_CONTROL.CC1 != ROLE_CONTROL.CC2 and vbus autodischarge on
157 * disconnect is disabled. Bail out when ROLE_CONTROL.CC1 != ROLE_CONTROL.CC2.
158 */
159 if (((reg & (TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT)) >>
160 TCPC_ROLE_CTRL_CC2_SHIFT) !=
161 ((reg & (TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT)) >>
162 TCPC_ROLE_CTRL_CC1_SHIFT))
163 return 0;
164
165 return regmap_update_bits(tcpci->regmap, TCPC_ROLE_CTRL, polarity == TYPEC_POLARITY_CC1 ?
166 TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT :
167 TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT,
168 TCPC_ROLE_CTRL_CC_OPEN);
169 }
170
tcpci_start_toggling(struct tcpc_dev * tcpc,enum typec_port_type port_type,enum typec_cc_status cc)171 static int tcpci_start_toggling(struct tcpc_dev *tcpc,
172 enum typec_port_type port_type,
173 enum typec_cc_status cc)
174 {
175 int ret;
176 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
177 unsigned int reg = TCPC_ROLE_CTRL_DRP;
178
179 if (port_type != TYPEC_PORT_DRP)
180 return -EOPNOTSUPP;
181
182 /* Handle vendor drp toggling */
183 if (tcpci->data->start_drp_toggling) {
184 ret = tcpci->data->start_drp_toggling(tcpci, tcpci->data, cc);
185 if (ret < 0)
186 return ret;
187 }
188
189 switch (cc) {
190 default:
191 case TYPEC_CC_RP_DEF:
192 reg |= (TCPC_ROLE_CTRL_RP_VAL_DEF <<
193 TCPC_ROLE_CTRL_RP_VAL_SHIFT);
194 break;
195 case TYPEC_CC_RP_1_5:
196 reg |= (TCPC_ROLE_CTRL_RP_VAL_1_5 <<
197 TCPC_ROLE_CTRL_RP_VAL_SHIFT);
198 break;
199 case TYPEC_CC_RP_3_0:
200 reg |= (TCPC_ROLE_CTRL_RP_VAL_3_0 <<
201 TCPC_ROLE_CTRL_RP_VAL_SHIFT);
202 break;
203 }
204
205 if (cc == TYPEC_CC_RD)
206 reg |= (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT) |
207 (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT);
208 else
209 reg |= (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT) |
210 (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT);
211 ret = regmap_write(tcpci->regmap, TCPC_ROLE_CTRL, reg);
212 if (ret < 0)
213 return ret;
214 return regmap_write(tcpci->regmap, TCPC_COMMAND,
215 TCPC_CMD_LOOK4CONNECTION);
216 }
217
tcpci_get_cc(struct tcpc_dev * tcpc,enum typec_cc_status * cc1,enum typec_cc_status * cc2)218 static int tcpci_get_cc(struct tcpc_dev *tcpc,
219 enum typec_cc_status *cc1, enum typec_cc_status *cc2)
220 {
221 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
222 unsigned int reg, role_control;
223 int ret;
224
225 ret = regmap_read(tcpci->regmap, TCPC_ROLE_CTRL, &role_control);
226 if (ret < 0)
227 return ret;
228
229 ret = regmap_read(tcpci->regmap, TCPC_CC_STATUS, ®);
230 if (ret < 0)
231 return ret;
232
233 *cc1 = tcpci_to_typec_cc((reg >> TCPC_CC_STATUS_CC1_SHIFT) &
234 TCPC_CC_STATUS_CC1_MASK,
235 reg & TCPC_CC_STATUS_TERM ||
236 tcpc_presenting_rd(role_control, CC1));
237 *cc2 = tcpci_to_typec_cc((reg >> TCPC_CC_STATUS_CC2_SHIFT) &
238 TCPC_CC_STATUS_CC2_MASK,
239 reg & TCPC_CC_STATUS_TERM ||
240 tcpc_presenting_rd(role_control, CC2));
241
242 return 0;
243 }
244
tcpci_set_polarity(struct tcpc_dev * tcpc,enum typec_cc_polarity polarity)245 static int tcpci_set_polarity(struct tcpc_dev *tcpc,
246 enum typec_cc_polarity polarity)
247 {
248 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
249 unsigned int reg;
250 int ret;
251 enum typec_cc_status cc1, cc2;
252
253 /* Obtain Rp setting from role control */
254 ret = regmap_read(tcpci->regmap, TCPC_ROLE_CTRL, ®);
255 if (ret < 0)
256 return ret;
257
258 ret = tcpci_get_cc(tcpc, &cc1, &cc2);
259 if (ret < 0)
260 return ret;
261
262 /*
263 * When port has drp toggling enabled, ROLE_CONTROL would only have the initial
264 * terminations for the toggling and does not indicate the final cc
265 * terminations when ConnectionResult is 0 i.e. drp toggling stops and
266 * the connection is resolved. Infer port role from TCPC_CC_STATUS based on the
267 * terminations seen. The port role is then used to set the cc terminations.
268 */
269 if (reg & TCPC_ROLE_CTRL_DRP) {
270 /* Disable DRP for the OPEN setting to take effect */
271 reg = reg & ~TCPC_ROLE_CTRL_DRP;
272
273 if (polarity == TYPEC_POLARITY_CC2) {
274 reg &= ~(TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT);
275 /* Local port is source */
276 if (cc2 == TYPEC_CC_RD)
277 /* Role control would have the Rp setting when DRP was enabled */
278 reg |= TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT;
279 else
280 reg |= TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT;
281 } else {
282 reg &= ~(TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT);
283 /* Local port is source */
284 if (cc1 == TYPEC_CC_RD)
285 /* Role control would have the Rp setting when DRP was enabled */
286 reg |= TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT;
287 else
288 reg |= TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT;
289 }
290 }
291
292 if (polarity == TYPEC_POLARITY_CC2)
293 reg |= TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC1_SHIFT;
294 else
295 reg |= TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC2_SHIFT;
296 ret = regmap_write(tcpci->regmap, TCPC_ROLE_CTRL, reg);
297 if (ret < 0)
298 return ret;
299
300 return regmap_write(tcpci->regmap, TCPC_TCPC_CTRL,
301 (polarity == TYPEC_POLARITY_CC2) ?
302 TCPC_TCPC_CTRL_ORIENTATION : 0);
303 }
304
tcpci_set_partner_usb_comm_capable(struct tcpc_dev * tcpc,bool capable)305 static void tcpci_set_partner_usb_comm_capable(struct tcpc_dev *tcpc, bool capable)
306 {
307 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
308
309 if (tcpci->data->set_partner_usb_comm_capable)
310 tcpci->data->set_partner_usb_comm_capable(tcpci, tcpci->data, capable);
311 }
312
tcpci_set_vconn(struct tcpc_dev * tcpc,bool enable)313 static int tcpci_set_vconn(struct tcpc_dev *tcpc, bool enable)
314 {
315 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
316 int ret;
317
318 /* Handle vendor set vconn */
319 if (tcpci->data->set_vconn) {
320 ret = tcpci->data->set_vconn(tcpci, tcpci->data, enable);
321 if (ret < 0)
322 return ret;
323 }
324
325 return regmap_update_bits(tcpci->regmap, TCPC_POWER_CTRL,
326 TCPC_POWER_CTRL_VCONN_ENABLE,
327 enable ? TCPC_POWER_CTRL_VCONN_ENABLE : 0);
328 }
329
tcpci_enable_auto_vbus_discharge(struct tcpc_dev * dev,bool enable)330 static int tcpci_enable_auto_vbus_discharge(struct tcpc_dev *dev, bool enable)
331 {
332 struct tcpci *tcpci = tcpc_to_tcpci(dev);
333 int ret;
334
335 ret = regmap_update_bits(tcpci->regmap, TCPC_POWER_CTRL, TCPC_POWER_CTRL_AUTO_DISCHARGE,
336 enable ? TCPC_POWER_CTRL_AUTO_DISCHARGE : 0);
337 return ret;
338 }
339
tcpci_set_auto_vbus_discharge_threshold(struct tcpc_dev * dev,enum typec_pwr_opmode mode,bool pps_active,u32 requested_vbus_voltage_mv,u32 apdo_min_voltage_mv)340 static int tcpci_set_auto_vbus_discharge_threshold(struct tcpc_dev *dev, enum typec_pwr_opmode mode,
341 bool pps_active, u32 requested_vbus_voltage_mv,
342 u32 apdo_min_voltage_mv)
343 {
344 struct tcpci *tcpci = tcpc_to_tcpci(dev);
345 unsigned int pwr_ctrl, threshold = 0;
346 int ret;
347
348 /*
349 * Indicates that vbus is going to go away due PR_SWAP, hard reset etc.
350 * Do not discharge vbus here.
351 */
352 if (requested_vbus_voltage_mv == 0)
353 goto write_thresh;
354
355 ret = regmap_read(tcpci->regmap, TCPC_POWER_CTRL, &pwr_ctrl);
356 if (ret < 0)
357 return ret;
358
359 if (pwr_ctrl & TCPC_FAST_ROLE_SWAP_EN) {
360 /* To prevent disconnect when the source is fast role swap is capable. */
361 threshold = AUTO_DISCHARGE_DEFAULT_THRESHOLD_MV;
362 } else if (mode == TYPEC_PWR_MODE_PD) {
363 if (pps_active)
364 /*
365 * To prevent disconnect when the source is in Current Limit Mode.
366 * Set the threshold to the lowest possible voltage vPpsShutdown (min)
367 */
368 threshold = VPPS_SHUTDOWN_MIN_PERCENT * apdo_min_voltage_mv / 100 -
369 VSINKPD_MIN_IR_DROP_MV;
370 else
371 threshold = ((VSRC_NEW_MIN_PERCENT * requested_vbus_voltage_mv / 100) -
372 VSINKPD_MIN_IR_DROP_MV - VSRC_VALID_MIN_MV) *
373 VSINKDISCONNECT_PD_MIN_PERCENT / 100;
374 } else {
375 /* 3.5V for non-pd sink */
376 threshold = AUTO_DISCHARGE_DEFAULT_THRESHOLD_MV;
377 }
378
379 threshold = threshold / TCPC_VBUS_SINK_DISCONNECT_THRESH_LSB_MV;
380
381 if (threshold > TCPC_VBUS_SINK_DISCONNECT_THRESH_MAX)
382 return -EINVAL;
383
384 write_thresh:
385 return tcpci_write16(tcpci, TCPC_VBUS_SINK_DISCONNECT_THRESH, threshold);
386 }
387
tcpci_enable_frs(struct tcpc_dev * dev,bool enable)388 static int tcpci_enable_frs(struct tcpc_dev *dev, bool enable)
389 {
390 struct tcpci *tcpci = tcpc_to_tcpci(dev);
391 int ret;
392
393 /* To prevent disconnect during FRS, set disconnect threshold to 3.5V */
394 ret = tcpci_write16(tcpci, TCPC_VBUS_SINK_DISCONNECT_THRESH, enable ? 0 : 0x8c);
395 if (ret < 0)
396 return ret;
397
398 ret = regmap_update_bits(tcpci->regmap, TCPC_POWER_CTRL, TCPC_FAST_ROLE_SWAP_EN, enable ?
399 TCPC_FAST_ROLE_SWAP_EN : 0);
400
401 return ret;
402 }
403
tcpci_frs_sourcing_vbus(struct tcpc_dev * dev)404 static void tcpci_frs_sourcing_vbus(struct tcpc_dev *dev)
405 {
406 struct tcpci *tcpci = tcpc_to_tcpci(dev);
407
408 if (tcpci->data->frs_sourcing_vbus)
409 tcpci->data->frs_sourcing_vbus(tcpci, tcpci->data);
410 }
411
tcpci_check_contaminant(struct tcpc_dev * dev)412 static void tcpci_check_contaminant(struct tcpc_dev *dev)
413 {
414 struct tcpci *tcpci = tcpc_to_tcpci(dev);
415
416 if (tcpci->data->check_contaminant)
417 tcpci->data->check_contaminant(tcpci, tcpci->data);
418 }
419
tcpci_set_bist_data(struct tcpc_dev * tcpc,bool enable)420 static int tcpci_set_bist_data(struct tcpc_dev *tcpc, bool enable)
421 {
422 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
423
424 return regmap_update_bits(tcpci->regmap, TCPC_TCPC_CTRL, TCPC_TCPC_CTRL_BIST_TM,
425 enable ? TCPC_TCPC_CTRL_BIST_TM : 0);
426 }
427
tcpci_set_roles(struct tcpc_dev * tcpc,bool attached,enum typec_role role,enum typec_data_role data)428 static int tcpci_set_roles(struct tcpc_dev *tcpc, bool attached,
429 enum typec_role role, enum typec_data_role data)
430 {
431 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
432 unsigned int reg;
433 int ret;
434
435 reg = PD_REV20 << TCPC_MSG_HDR_INFO_REV_SHIFT;
436 if (role == TYPEC_SOURCE)
437 reg |= TCPC_MSG_HDR_INFO_PWR_ROLE;
438 if (data == TYPEC_HOST)
439 reg |= TCPC_MSG_HDR_INFO_DATA_ROLE;
440 ret = regmap_write(tcpci->regmap, TCPC_MSG_HDR_INFO, reg);
441 if (ret < 0)
442 return ret;
443
444 return 0;
445 }
446
tcpci_set_pd_rx(struct tcpc_dev * tcpc,bool enable)447 static int tcpci_set_pd_rx(struct tcpc_dev *tcpc, bool enable)
448 {
449 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
450 unsigned int reg = 0;
451 int ret;
452
453 if (enable)
454 reg = TCPC_RX_DETECT_SOP | TCPC_RX_DETECT_HARD_RESET;
455 ret = regmap_write(tcpci->regmap, TCPC_RX_DETECT, reg);
456 if (ret < 0)
457 return ret;
458
459 return 0;
460 }
461
tcpci_get_vbus(struct tcpc_dev * tcpc)462 static int tcpci_get_vbus(struct tcpc_dev *tcpc)
463 {
464 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
465 unsigned int reg;
466 int ret;
467
468 ret = regmap_read(tcpci->regmap, TCPC_POWER_STATUS, ®);
469 if (ret < 0)
470 return ret;
471
472 return !!(reg & TCPC_POWER_STATUS_VBUS_PRES);
473 }
474
tcpci_is_vbus_vsafe0v(struct tcpc_dev * tcpc)475 static bool tcpci_is_vbus_vsafe0v(struct tcpc_dev *tcpc)
476 {
477 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
478 unsigned int reg;
479 int ret;
480
481 ret = regmap_read(tcpci->regmap, TCPC_EXTENDED_STATUS, ®);
482 if (ret < 0)
483 return false;
484
485 return !!(reg & TCPC_EXTENDED_STATUS_VSAFE0V);
486 }
487
tcpci_set_vbus(struct tcpc_dev * tcpc,bool source,bool sink)488 static int tcpci_set_vbus(struct tcpc_dev *tcpc, bool source, bool sink)
489 {
490 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
491 int ret;
492
493 if (tcpci->data->set_vbus) {
494 ret = tcpci->data->set_vbus(tcpci, tcpci->data, source, sink);
495 /* Bypass when ret > 0 */
496 if (ret != 0)
497 return ret < 0 ? ret : 0;
498 }
499
500 /* Disable both source and sink first before enabling anything */
501
502 if (!source) {
503 ret = regmap_write(tcpci->regmap, TCPC_COMMAND,
504 TCPC_CMD_DISABLE_SRC_VBUS);
505 if (ret < 0)
506 return ret;
507 }
508
509 if (!sink) {
510 ret = regmap_write(tcpci->regmap, TCPC_COMMAND,
511 TCPC_CMD_DISABLE_SINK_VBUS);
512 if (ret < 0)
513 return ret;
514 }
515
516 if (source) {
517 ret = regmap_write(tcpci->regmap, TCPC_COMMAND,
518 TCPC_CMD_SRC_VBUS_DEFAULT);
519 if (ret < 0)
520 return ret;
521 }
522
523 if (sink) {
524 ret = regmap_write(tcpci->regmap, TCPC_COMMAND,
525 TCPC_CMD_SINK_VBUS);
526 if (ret < 0)
527 return ret;
528 }
529
530 return 0;
531 }
532
tcpci_pd_transmit(struct tcpc_dev * tcpc,enum tcpm_transmit_type type,const struct pd_message * msg,unsigned int negotiated_rev)533 static int tcpci_pd_transmit(struct tcpc_dev *tcpc, enum tcpm_transmit_type type,
534 const struct pd_message *msg, unsigned int negotiated_rev)
535 {
536 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
537 u16 header = msg ? le16_to_cpu(msg->header) : 0;
538 unsigned int reg, cnt;
539 int ret;
540
541 cnt = msg ? pd_header_cnt(header) * 4 : 0;
542 /**
543 * TCPCI spec forbids direct access of TCPC_TX_DATA.
544 * But, since some of the chipsets offer this capability,
545 * it's fair to support both.
546 */
547 if (tcpci->data->TX_BUF_BYTE_x_hidden) {
548 u8 buf[TCPC_TRANSMIT_BUFFER_MAX_LEN] = {0,};
549 u8 pos = 0;
550
551 /* Payload + header + TCPC_TX_BYTE_CNT */
552 buf[pos++] = cnt + 2;
553
554 if (msg)
555 memcpy(&buf[pos], &msg->header, sizeof(msg->header));
556
557 pos += sizeof(header);
558
559 if (cnt > 0)
560 memcpy(&buf[pos], msg->payload, cnt);
561
562 pos += cnt;
563 ret = regmap_raw_write(tcpci->regmap, TCPC_TX_BYTE_CNT, buf, pos);
564 if (ret < 0)
565 return ret;
566 } else {
567 ret = regmap_write(tcpci->regmap, TCPC_TX_BYTE_CNT, cnt + 2);
568 if (ret < 0)
569 return ret;
570
571 ret = tcpci_write16(tcpci, TCPC_TX_HDR, header);
572 if (ret < 0)
573 return ret;
574
575 if (cnt > 0) {
576 ret = regmap_raw_write(tcpci->regmap, TCPC_TX_DATA, &msg->payload, cnt);
577 if (ret < 0)
578 return ret;
579 }
580 }
581
582 /* nRetryCount is 3 in PD2.0 spec where 2 in PD3.0 spec */
583 reg = ((negotiated_rev > PD_REV20 ? PD_RETRY_COUNT_3_0_OR_HIGHER : PD_RETRY_COUNT_DEFAULT)
584 << TCPC_TRANSMIT_RETRY_SHIFT) | (type << TCPC_TRANSMIT_TYPE_SHIFT);
585 ret = regmap_write(tcpci->regmap, TCPC_TRANSMIT, reg);
586 if (ret < 0)
587 return ret;
588
589 return 0;
590 }
591
tcpci_init(struct tcpc_dev * tcpc)592 static int tcpci_init(struct tcpc_dev *tcpc)
593 {
594 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
595 unsigned long timeout = jiffies + msecs_to_jiffies(2000); /* XXX */
596 unsigned int reg;
597 int ret;
598
599 while (time_before_eq(jiffies, timeout)) {
600 ret = regmap_read(tcpci->regmap, TCPC_POWER_STATUS, ®);
601 if (ret < 0)
602 return ret;
603 if (!(reg & TCPC_POWER_STATUS_UNINIT))
604 break;
605 usleep_range(10000, 20000);
606 }
607 if (time_after(jiffies, timeout))
608 return -ETIMEDOUT;
609
610 ret = tcpci_write16(tcpci, TCPC_FAULT_STATUS, TCPC_FAULT_STATUS_ALL_REG_RST_TO_DEFAULT);
611 if (ret < 0)
612 return ret;
613
614 /* Handle vendor init */
615 if (tcpci->data->init) {
616 ret = tcpci->data->init(tcpci, tcpci->data);
617 if (ret < 0)
618 return ret;
619 }
620
621 /* Clear all events */
622 ret = tcpci_write16(tcpci, TCPC_ALERT, 0xffff);
623 if (ret < 0)
624 return ret;
625
626 if (tcpci->controls_vbus)
627 reg = TCPC_POWER_STATUS_VBUS_PRES;
628 else
629 reg = 0;
630 ret = regmap_write(tcpci->regmap, TCPC_POWER_STATUS_MASK, reg);
631 if (ret < 0)
632 return ret;
633
634 /* Enable Vbus detection */
635 ret = regmap_write(tcpci->regmap, TCPC_COMMAND,
636 TCPC_CMD_ENABLE_VBUS_DETECT);
637 if (ret < 0)
638 return ret;
639
640 reg = TCPC_ALERT_TX_SUCCESS | TCPC_ALERT_TX_FAILED |
641 TCPC_ALERT_TX_DISCARDED | TCPC_ALERT_RX_STATUS |
642 TCPC_ALERT_RX_HARD_RST | TCPC_ALERT_CC_STATUS;
643 if (tcpci->controls_vbus)
644 reg |= TCPC_ALERT_POWER_STATUS;
645 /* Enable VSAFE0V status interrupt when detecting VSAFE0V is supported */
646 if (tcpci->data->vbus_vsafe0v) {
647 reg |= TCPC_ALERT_EXTENDED_STATUS;
648 ret = regmap_write(tcpci->regmap, TCPC_EXTENDED_STATUS_MASK,
649 TCPC_EXTENDED_STATUS_VSAFE0V);
650 if (ret < 0)
651 return ret;
652 }
653
654 tcpci->alert_mask = reg;
655
656 return tcpci_write16(tcpci, TCPC_ALERT_MASK, reg);
657 }
658
tcpci_irq(struct tcpci * tcpci)659 irqreturn_t tcpci_irq(struct tcpci *tcpci)
660 {
661 u16 status;
662 int ret;
663 unsigned int raw;
664
665 tcpci_read16(tcpci, TCPC_ALERT, &status);
666
667 /*
668 * Clear alert status for everything except RX_STATUS, which shouldn't
669 * be cleared until we have successfully retrieved message.
670 */
671 if (status & ~TCPC_ALERT_RX_STATUS)
672 tcpci_write16(tcpci, TCPC_ALERT,
673 status & ~TCPC_ALERT_RX_STATUS);
674
675 if (status & TCPC_ALERT_CC_STATUS)
676 tcpm_cc_change(tcpci->port);
677
678 if (status & TCPC_ALERT_POWER_STATUS) {
679 regmap_read(tcpci->regmap, TCPC_POWER_STATUS_MASK, &raw);
680 /*
681 * If power status mask has been reset, then the TCPC
682 * has reset.
683 */
684 if (raw == 0xff)
685 tcpm_tcpc_reset(tcpci->port);
686 else
687 tcpm_vbus_change(tcpci->port);
688 }
689
690 if (status & TCPC_ALERT_RX_STATUS) {
691 struct pd_message msg;
692 unsigned int cnt, payload_cnt;
693 u16 header;
694
695 regmap_read(tcpci->regmap, TCPC_RX_BYTE_CNT, &cnt);
696 /*
697 * 'cnt' corresponds to READABLE_BYTE_COUNT in section 4.4.14
698 * of the TCPCI spec [Rev 2.0 Ver 1.0 October 2017] and is
699 * defined in table 4-36 as one greater than the number of
700 * bytes received. And that number includes the header. So:
701 */
702 if (cnt > 3)
703 payload_cnt = cnt - (1 + sizeof(msg.header));
704 else
705 payload_cnt = 0;
706
707 tcpci_read16(tcpci, TCPC_RX_HDR, &header);
708 msg.header = cpu_to_le16(header);
709
710 if (WARN_ON(payload_cnt > sizeof(msg.payload)))
711 payload_cnt = sizeof(msg.payload);
712
713 if (payload_cnt > 0)
714 regmap_raw_read(tcpci->regmap, TCPC_RX_DATA,
715 &msg.payload, payload_cnt);
716
717 /* Read complete, clear RX status alert bit */
718 tcpci_write16(tcpci, TCPC_ALERT, TCPC_ALERT_RX_STATUS);
719
720 tcpm_pd_receive(tcpci->port, &msg);
721 }
722
723 if (tcpci->data->vbus_vsafe0v && (status & TCPC_ALERT_EXTENDED_STATUS)) {
724 ret = regmap_read(tcpci->regmap, TCPC_EXTENDED_STATUS, &raw);
725 if (!ret && (raw & TCPC_EXTENDED_STATUS_VSAFE0V))
726 tcpm_vbus_change(tcpci->port);
727 }
728
729 if (status & TCPC_ALERT_RX_HARD_RST)
730 tcpm_pd_hard_reset(tcpci->port);
731
732 if (status & TCPC_ALERT_TX_SUCCESS)
733 tcpm_pd_transmit_complete(tcpci->port, TCPC_TX_SUCCESS);
734 else if (status & TCPC_ALERT_TX_DISCARDED)
735 tcpm_pd_transmit_complete(tcpci->port, TCPC_TX_DISCARDED);
736 else if (status & TCPC_ALERT_TX_FAILED)
737 tcpm_pd_transmit_complete(tcpci->port, TCPC_TX_FAILED);
738
739 return IRQ_RETVAL(status & tcpci->alert_mask);
740 }
741 EXPORT_SYMBOL_GPL(tcpci_irq);
742
_tcpci_irq(int irq,void * dev_id)743 static irqreturn_t _tcpci_irq(int irq, void *dev_id)
744 {
745 struct tcpci_chip *chip = dev_id;
746
747 return tcpci_irq(chip->tcpci);
748 }
749
750 static const struct regmap_config tcpci_regmap_config = {
751 .reg_bits = 8,
752 .val_bits = 8,
753
754 .max_register = 0x7F, /* 0x80 .. 0xFF are vendor defined */
755 };
756
tcpci_parse_config(struct tcpci * tcpci)757 static int tcpci_parse_config(struct tcpci *tcpci)
758 {
759 tcpci->controls_vbus = true; /* XXX */
760
761 tcpci->tcpc.fwnode = device_get_named_child_node(tcpci->dev,
762 "connector");
763 if (!tcpci->tcpc.fwnode) {
764 dev_err(tcpci->dev, "Can't find connector node.\n");
765 return -EINVAL;
766 }
767
768 return 0;
769 }
770
tcpci_register_port(struct device * dev,struct tcpci_data * data)771 struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data)
772 {
773 struct tcpci *tcpci;
774 int err;
775
776 tcpci = devm_kzalloc(dev, sizeof(*tcpci), GFP_KERNEL);
777 if (!tcpci)
778 return ERR_PTR(-ENOMEM);
779
780 tcpci->dev = dev;
781 tcpci->data = data;
782 tcpci->regmap = data->regmap;
783
784 tcpci->tcpc.init = tcpci_init;
785 tcpci->tcpc.get_vbus = tcpci_get_vbus;
786 tcpci->tcpc.set_vbus = tcpci_set_vbus;
787 tcpci->tcpc.set_cc = tcpci_set_cc;
788 tcpci->tcpc.apply_rc = tcpci_apply_rc;
789 tcpci->tcpc.get_cc = tcpci_get_cc;
790 tcpci->tcpc.set_polarity = tcpci_set_polarity;
791 tcpci->tcpc.set_vconn = tcpci_set_vconn;
792 tcpci->tcpc.start_toggling = tcpci_start_toggling;
793
794 tcpci->tcpc.set_pd_rx = tcpci_set_pd_rx;
795 tcpci->tcpc.set_roles = tcpci_set_roles;
796 tcpci->tcpc.pd_transmit = tcpci_pd_transmit;
797 tcpci->tcpc.set_bist_data = tcpci_set_bist_data;
798 tcpci->tcpc.enable_frs = tcpci_enable_frs;
799 tcpci->tcpc.frs_sourcing_vbus = tcpci_frs_sourcing_vbus;
800 tcpci->tcpc.set_partner_usb_comm_capable = tcpci_set_partner_usb_comm_capable;
801
802 if (tcpci->data->check_contaminant)
803 tcpci->tcpc.check_contaminant = tcpci_check_contaminant;
804
805 if (tcpci->data->auto_discharge_disconnect) {
806 tcpci->tcpc.enable_auto_vbus_discharge = tcpci_enable_auto_vbus_discharge;
807 tcpci->tcpc.set_auto_vbus_discharge_threshold =
808 tcpci_set_auto_vbus_discharge_threshold;
809 regmap_update_bits(tcpci->regmap, TCPC_POWER_CTRL, TCPC_POWER_CTRL_BLEED_DISCHARGE,
810 TCPC_POWER_CTRL_BLEED_DISCHARGE);
811 }
812
813 if (tcpci->data->vbus_vsafe0v)
814 tcpci->tcpc.is_vbus_vsafe0v = tcpci_is_vbus_vsafe0v;
815
816 err = tcpci_parse_config(tcpci);
817 if (err < 0)
818 return ERR_PTR(err);
819
820 tcpci->port = tcpm_register_port(tcpci->dev, &tcpci->tcpc);
821 if (IS_ERR(tcpci->port)) {
822 fwnode_handle_put(tcpci->tcpc.fwnode);
823 return ERR_CAST(tcpci->port);
824 }
825
826 return tcpci;
827 }
828 EXPORT_SYMBOL_GPL(tcpci_register_port);
829
tcpci_unregister_port(struct tcpci * tcpci)830 void tcpci_unregister_port(struct tcpci *tcpci)
831 {
832 tcpm_unregister_port(tcpci->port);
833 fwnode_handle_put(tcpci->tcpc.fwnode);
834 }
835 EXPORT_SYMBOL_GPL(tcpci_unregister_port);
836
tcpci_probe(struct i2c_client * client)837 static int tcpci_probe(struct i2c_client *client)
838 {
839 struct tcpci_chip *chip;
840 int err;
841 u16 val = 0;
842
843 chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
844 if (!chip)
845 return -ENOMEM;
846
847 chip->data.regmap = devm_regmap_init_i2c(client, &tcpci_regmap_config);
848 if (IS_ERR(chip->data.regmap))
849 return PTR_ERR(chip->data.regmap);
850
851 i2c_set_clientdata(client, chip);
852
853 /* Disable chip interrupts before requesting irq */
854 err = regmap_raw_write(chip->data.regmap, TCPC_ALERT_MASK, &val,
855 sizeof(u16));
856 if (err < 0)
857 return err;
858
859 chip->tcpci = tcpci_register_port(&client->dev, &chip->data);
860 if (IS_ERR(chip->tcpci))
861 return PTR_ERR(chip->tcpci);
862
863 err = devm_request_threaded_irq(&client->dev, client->irq, NULL,
864 _tcpci_irq,
865 IRQF_SHARED | IRQF_ONESHOT | IRQF_TRIGGER_LOW,
866 dev_name(&client->dev), chip);
867 if (err < 0) {
868 tcpci_unregister_port(chip->tcpci);
869 return err;
870 }
871
872 return 0;
873 }
874
tcpci_remove(struct i2c_client * client)875 static void tcpci_remove(struct i2c_client *client)
876 {
877 struct tcpci_chip *chip = i2c_get_clientdata(client);
878 int err;
879
880 /* Disable chip interrupts before unregistering port */
881 err = tcpci_write16(chip->tcpci, TCPC_ALERT_MASK, 0);
882 if (err < 0)
883 dev_warn(&client->dev, "Failed to disable irqs (%pe)\n", ERR_PTR(err));
884
885 tcpci_unregister_port(chip->tcpci);
886 }
887
888 static const struct i2c_device_id tcpci_id[] = {
889 { "tcpci", 0 },
890 { }
891 };
892 MODULE_DEVICE_TABLE(i2c, tcpci_id);
893
894 #ifdef CONFIG_OF
895 static const struct of_device_id tcpci_of_match[] = {
896 { .compatible = "nxp,ptn5110", },
897 { .compatible = "tcpci", },
898 {},
899 };
900 MODULE_DEVICE_TABLE(of, tcpci_of_match);
901 #endif
902
903 static struct i2c_driver tcpci_i2c_driver = {
904 .driver = {
905 .name = "tcpci",
906 .of_match_table = of_match_ptr(tcpci_of_match),
907 },
908 .probe = tcpci_probe,
909 .remove = tcpci_remove,
910 .id_table = tcpci_id,
911 };
912 module_i2c_driver(tcpci_i2c_driver);
913
914 MODULE_DESCRIPTION("USB Type-C Port Controller Interface driver");
915 MODULE_LICENSE("GPL");
916