1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * UCSI driver for Cypress CCGx Type-C controller
4 *
5 * Copyright (C) 2017-2018 NVIDIA Corporation. All rights reserved.
6 * Author: Ajay Gupta <ajayg@nvidia.com>
7 *
8 * Some code borrowed from drivers/usb/typec/ucsi/ucsi_acpi.c
9 */
10 #include <linux/acpi.h>
11 #include <linux/delay.h>
12 #include <linux/firmware.h>
13 #include <linux/i2c.h>
14 #include <linux/module.h>
15 #include <linux/pci.h>
16 #include <linux/platform_device.h>
17 #include <linux/pm.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/usb/typec_dp.h>
20
21 #include <asm/unaligned.h>
22 #include "ucsi.h"
23
24 enum enum_fw_mode {
25 BOOT, /* bootloader */
26 FW1, /* FW partition-1 (contains secondary fw) */
27 FW2, /* FW partition-2 (contains primary fw) */
28 FW_INVALID,
29 };
30
31 #define CCGX_RAB_DEVICE_MODE 0x0000
32 #define CCGX_RAB_INTR_REG 0x0006
33 #define DEV_INT BIT(0)
34 #define PORT0_INT BIT(1)
35 #define PORT1_INT BIT(2)
36 #define UCSI_READ_INT BIT(7)
37 #define CCGX_RAB_JUMP_TO_BOOT 0x0007
38 #define TO_BOOT 'J'
39 #define TO_ALT_FW 'A'
40 #define CCGX_RAB_RESET_REQ 0x0008
41 #define RESET_SIG 'R'
42 #define CMD_RESET_I2C 0x0
43 #define CMD_RESET_DEV 0x1
44 #define CCGX_RAB_ENTER_FLASHING 0x000A
45 #define FLASH_ENTER_SIG 'P'
46 #define CCGX_RAB_VALIDATE_FW 0x000B
47 #define CCGX_RAB_FLASH_ROW_RW 0x000C
48 #define FLASH_SIG 'F'
49 #define FLASH_RD_CMD 0x0
50 #define FLASH_WR_CMD 0x1
51 #define FLASH_FWCT1_WR_CMD 0x2
52 #define FLASH_FWCT2_WR_CMD 0x3
53 #define FLASH_FWCT_SIG_WR_CMD 0x4
54 #define CCGX_RAB_READ_ALL_VER 0x0010
55 #define CCGX_RAB_READ_FW2_VER 0x0020
56 #define CCGX_RAB_UCSI_CONTROL 0x0039
57 #define CCGX_RAB_UCSI_CONTROL_START BIT(0)
58 #define CCGX_RAB_UCSI_CONTROL_STOP BIT(1)
59 #define CCGX_RAB_UCSI_DATA_BLOCK(offset) (0xf000 | ((offset) & 0xff))
60 #define REG_FLASH_RW_MEM 0x0200
61 #define DEV_REG_IDX CCGX_RAB_DEVICE_MODE
62 #define CCGX_RAB_PDPORT_ENABLE 0x002C
63 #define PDPORT_1 BIT(0)
64 #define PDPORT_2 BIT(1)
65 #define CCGX_RAB_RESPONSE 0x007E
66 #define ASYNC_EVENT BIT(7)
67
68 /* CCGx events & async msg codes */
69 #define RESET_COMPLETE 0x80
70 #define EVENT_INDEX RESET_COMPLETE
71 #define PORT_CONNECT_DET 0x84
72 #define PORT_DISCONNECT_DET 0x85
73 #define ROLE_SWAP_COMPELETE 0x87
74
75 /* ccg firmware */
76 #define CYACD_LINE_SIZE 527
77 #define CCG4_ROW_SIZE 256
78 #define FW1_METADATA_ROW 0x1FF
79 #define FW2_METADATA_ROW 0x1FE
80 #define FW_CFG_TABLE_SIG_SIZE 256
81
82 static int secondary_fw_min_ver = 41;
83
84 enum enum_flash_mode {
85 SECONDARY_BL, /* update secondary using bootloader */
86 PRIMARY, /* update primary using secondary */
87 SECONDARY, /* update secondary using primary */
88 FLASH_NOT_NEEDED, /* update not required */
89 FLASH_INVALID,
90 };
91
92 static const char * const ccg_fw_names[] = {
93 "ccg_boot.cyacd",
94 "ccg_primary.cyacd",
95 "ccg_secondary.cyacd"
96 };
97
98 struct ccg_dev_info {
99 #define CCG_DEVINFO_FWMODE_SHIFT (0)
100 #define CCG_DEVINFO_FWMODE_MASK (0x3 << CCG_DEVINFO_FWMODE_SHIFT)
101 #define CCG_DEVINFO_PDPORTS_SHIFT (2)
102 #define CCG_DEVINFO_PDPORTS_MASK (0x3 << CCG_DEVINFO_PDPORTS_SHIFT)
103 u8 mode;
104 u8 bl_mode;
105 __le16 silicon_id;
106 __le16 bl_last_row;
107 } __packed;
108
109 struct version_format {
110 __le16 build;
111 u8 patch;
112 u8 ver;
113 #define CCG_VERSION_PATCH(x) ((x) << 16)
114 #define CCG_VERSION(x) ((x) << 24)
115 #define CCG_VERSION_MIN_SHIFT (0)
116 #define CCG_VERSION_MIN_MASK (0xf << CCG_VERSION_MIN_SHIFT)
117 #define CCG_VERSION_MAJ_SHIFT (4)
118 #define CCG_VERSION_MAJ_MASK (0xf << CCG_VERSION_MAJ_SHIFT)
119 } __packed;
120
121 /*
122 * Firmware version 3.1.10 or earlier, built for NVIDIA has known issue
123 * of missing interrupt when a device is connected for runtime resume
124 */
125 #define CCG_FW_BUILD_NVIDIA (('n' << 8) | 'v')
126 #define CCG_OLD_FW_VERSION (CCG_VERSION(0x31) | CCG_VERSION_PATCH(10))
127
128 /* Firmware for Tegra doesn't support UCSI ALT command, built
129 * for NVIDIA has known issue of reporting wrong capability info
130 */
131 #define CCG_FW_BUILD_NVIDIA_TEGRA (('g' << 8) | 'n')
132
133 /* Altmode offset for NVIDIA Function Test Board (FTB) */
134 #define NVIDIA_FTB_DP_OFFSET (2)
135 #define NVIDIA_FTB_DBG_OFFSET (3)
136
137 struct version_info {
138 struct version_format base;
139 struct version_format app;
140 };
141
142 struct fw_config_table {
143 u32 identity;
144 u16 table_size;
145 u8 fwct_version;
146 u8 is_key_change;
147 u8 guid[16];
148 struct version_format base;
149 struct version_format app;
150 u8 primary_fw_digest[32];
151 u32 key_exp_length;
152 u8 key_modulus[256];
153 u8 key_exp[4];
154 };
155
156 /* CCGx response codes */
157 enum ccg_resp_code {
158 CMD_NO_RESP = 0x00,
159 CMD_SUCCESS = 0x02,
160 FLASH_DATA_AVAILABLE = 0x03,
161 CMD_INVALID = 0x05,
162 FLASH_UPDATE_FAIL = 0x07,
163 INVALID_FW = 0x08,
164 INVALID_ARG = 0x09,
165 CMD_NOT_SUPPORT = 0x0A,
166 TRANSACTION_FAIL = 0x0C,
167 PD_CMD_FAIL = 0x0D,
168 UNDEF_ERROR = 0x0F,
169 INVALID_RESP = 0x10,
170 };
171
172 #define CCG_EVENT_MAX (EVENT_INDEX + 43)
173
174 struct ccg_cmd {
175 u16 reg;
176 u32 data;
177 int len;
178 u32 delay; /* ms delay for cmd timeout */
179 };
180
181 struct ccg_resp {
182 u8 code;
183 u8 length;
184 };
185
186 struct ucsi_ccg_altmode {
187 u16 svid;
188 u32 mid;
189 u8 linked_idx;
190 u8 active_idx;
191 #define UCSI_MULTI_DP_INDEX (0xff)
192 bool checked;
193 } __packed;
194
195 struct ucsi_ccg {
196 struct device *dev;
197 struct ucsi *ucsi;
198 struct i2c_client *client;
199
200 struct ccg_dev_info info;
201 /* version info for boot, primary and secondary */
202 struct version_info version[FW2 + 1];
203 u32 fw_version;
204 /* CCG HPI communication flags */
205 unsigned long flags;
206 #define RESET_PENDING 0
207 #define DEV_CMD_PENDING 1
208 struct ccg_resp dev_resp;
209 u8 cmd_resp;
210 int port_num;
211 int irq;
212 struct work_struct work;
213 struct mutex lock; /* to sync between user and driver thread */
214
215 /* fw build with vendor information */
216 u16 fw_build;
217 struct work_struct pm_work;
218
219 struct completion complete;
220
221 u64 last_cmd_sent;
222 bool has_multiple_dp;
223 struct ucsi_ccg_altmode orig[UCSI_MAX_ALTMODES];
224 struct ucsi_ccg_altmode updated[UCSI_MAX_ALTMODES];
225 };
226
ccg_read(struct ucsi_ccg * uc,u16 rab,u8 * data,u32 len)227 static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
228 {
229 struct i2c_client *client = uc->client;
230 const struct i2c_adapter_quirks *quirks = client->adapter->quirks;
231 unsigned char buf[2];
232 struct i2c_msg msgs[] = {
233 {
234 .addr = client->addr,
235 .flags = 0x0,
236 .len = sizeof(buf),
237 .buf = buf,
238 },
239 {
240 .addr = client->addr,
241 .flags = I2C_M_RD,
242 .buf = data,
243 },
244 };
245 u32 rlen, rem_len = len, max_read_len = len;
246 int status;
247
248 /* check any max_read_len limitation on i2c adapter */
249 if (quirks && quirks->max_read_len)
250 max_read_len = quirks->max_read_len;
251
252 pm_runtime_get_sync(uc->dev);
253 while (rem_len > 0) {
254 msgs[1].buf = &data[len - rem_len];
255 rlen = min_t(u16, rem_len, max_read_len);
256 msgs[1].len = rlen;
257 put_unaligned_le16(rab, buf);
258 status = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
259 if (status < 0) {
260 dev_err(uc->dev, "i2c_transfer failed %d\n", status);
261 pm_runtime_put_sync(uc->dev);
262 return status;
263 }
264 rab += rlen;
265 rem_len -= rlen;
266 }
267
268 pm_runtime_put_sync(uc->dev);
269 return 0;
270 }
271
ccg_write(struct ucsi_ccg * uc,u16 rab,const u8 * data,u32 len)272 static int ccg_write(struct ucsi_ccg *uc, u16 rab, const u8 *data, u32 len)
273 {
274 struct i2c_client *client = uc->client;
275 unsigned char *buf;
276 struct i2c_msg msgs[] = {
277 {
278 .addr = client->addr,
279 .flags = 0x0,
280 }
281 };
282 int status;
283
284 buf = kzalloc(len + sizeof(rab), GFP_KERNEL);
285 if (!buf)
286 return -ENOMEM;
287
288 put_unaligned_le16(rab, buf);
289 memcpy(buf + sizeof(rab), data, len);
290
291 msgs[0].len = len + sizeof(rab);
292 msgs[0].buf = buf;
293
294 pm_runtime_get_sync(uc->dev);
295 status = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
296 if (status < 0) {
297 dev_err(uc->dev, "i2c_transfer failed %d\n", status);
298 pm_runtime_put_sync(uc->dev);
299 kfree(buf);
300 return status;
301 }
302
303 pm_runtime_put_sync(uc->dev);
304 kfree(buf);
305 return 0;
306 }
307
ucsi_ccg_init(struct ucsi_ccg * uc)308 static int ucsi_ccg_init(struct ucsi_ccg *uc)
309 {
310 unsigned int count = 10;
311 u8 data;
312 int status;
313
314 data = CCGX_RAB_UCSI_CONTROL_STOP;
315 status = ccg_write(uc, CCGX_RAB_UCSI_CONTROL, &data, sizeof(data));
316 if (status < 0)
317 return status;
318
319 data = CCGX_RAB_UCSI_CONTROL_START;
320 status = ccg_write(uc, CCGX_RAB_UCSI_CONTROL, &data, sizeof(data));
321 if (status < 0)
322 return status;
323
324 /*
325 * Flush CCGx RESPONSE queue by acking interrupts. Above ucsi control
326 * register write will push response which must be cleared.
327 */
328 do {
329 status = ccg_read(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
330 if (status < 0)
331 return status;
332
333 if (!(data & DEV_INT))
334 return 0;
335
336 status = ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
337 if (status < 0)
338 return status;
339
340 usleep_range(10000, 11000);
341 } while (--count);
342
343 return -ETIMEDOUT;
344 }
345
ucsi_ccg_update_get_current_cam_cmd(struct ucsi_ccg * uc,u8 * data)346 static void ucsi_ccg_update_get_current_cam_cmd(struct ucsi_ccg *uc, u8 *data)
347 {
348 u8 cam, new_cam;
349
350 cam = data[0];
351 new_cam = uc->orig[cam].linked_idx;
352 uc->updated[new_cam].active_idx = cam;
353 data[0] = new_cam;
354 }
355
ucsi_ccg_update_altmodes(struct ucsi * ucsi,struct ucsi_altmode * orig,struct ucsi_altmode * updated)356 static bool ucsi_ccg_update_altmodes(struct ucsi *ucsi,
357 struct ucsi_altmode *orig,
358 struct ucsi_altmode *updated)
359 {
360 struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
361 struct ucsi_ccg_altmode *alt, *new_alt;
362 int i, j, k = 0;
363 bool found = false;
364
365 alt = uc->orig;
366 new_alt = uc->updated;
367 memset(uc->updated, 0, sizeof(uc->updated));
368
369 /*
370 * Copy original connector altmodes to new structure.
371 * We need this before second loop since second loop
372 * checks for duplicate altmodes.
373 */
374 for (i = 0; i < UCSI_MAX_ALTMODES; i++) {
375 alt[i].svid = orig[i].svid;
376 alt[i].mid = orig[i].mid;
377 if (!alt[i].svid)
378 break;
379 }
380
381 for (i = 0; i < UCSI_MAX_ALTMODES; i++) {
382 if (!alt[i].svid)
383 break;
384
385 /* already checked and considered */
386 if (alt[i].checked)
387 continue;
388
389 if (!DP_CONF_GET_PIN_ASSIGN(alt[i].mid)) {
390 /* Found Non DP altmode */
391 new_alt[k].svid = alt[i].svid;
392 new_alt[k].mid |= alt[i].mid;
393 new_alt[k].linked_idx = i;
394 alt[i].linked_idx = k;
395 updated[k].svid = new_alt[k].svid;
396 updated[k].mid = new_alt[k].mid;
397 k++;
398 continue;
399 }
400
401 for (j = i + 1; j < UCSI_MAX_ALTMODES; j++) {
402 if (alt[i].svid != alt[j].svid ||
403 !DP_CONF_GET_PIN_ASSIGN(alt[j].mid)) {
404 continue;
405 } else {
406 /* Found duplicate DP mode */
407 new_alt[k].svid = alt[i].svid;
408 new_alt[k].mid |= alt[i].mid | alt[j].mid;
409 new_alt[k].linked_idx = UCSI_MULTI_DP_INDEX;
410 alt[i].linked_idx = k;
411 alt[j].linked_idx = k;
412 alt[j].checked = true;
413 found = true;
414 }
415 }
416 if (found) {
417 uc->has_multiple_dp = true;
418 } else {
419 /* Didn't find any duplicate DP altmode */
420 new_alt[k].svid = alt[i].svid;
421 new_alt[k].mid |= alt[i].mid;
422 new_alt[k].linked_idx = i;
423 alt[i].linked_idx = k;
424 }
425 updated[k].svid = new_alt[k].svid;
426 updated[k].mid = new_alt[k].mid;
427 k++;
428 }
429 return found;
430 }
431
ucsi_ccg_update_set_new_cam_cmd(struct ucsi_ccg * uc,struct ucsi_connector * con,u64 * cmd)432 static void ucsi_ccg_update_set_new_cam_cmd(struct ucsi_ccg *uc,
433 struct ucsi_connector *con,
434 u64 *cmd)
435 {
436 struct ucsi_ccg_altmode *new_port, *port;
437 struct typec_altmode *alt = NULL;
438 u8 new_cam, cam, pin;
439 bool enter_new_mode;
440 int i, j, k = 0xff;
441
442 port = uc->orig;
443 new_cam = UCSI_SET_NEW_CAM_GET_AM(*cmd);
444 if (new_cam >= ARRAY_SIZE(uc->updated))
445 return;
446 new_port = &uc->updated[new_cam];
447 cam = new_port->linked_idx;
448 enter_new_mode = UCSI_SET_NEW_CAM_ENTER(*cmd);
449
450 /*
451 * If CAM is UCSI_MULTI_DP_INDEX then this is DP altmode
452 * with multiple DP mode. Find out CAM for best pin assignment
453 * among all DP mode. Priorite pin E->D->C after making sure
454 * the partner supports that pin.
455 */
456 if (cam == UCSI_MULTI_DP_INDEX) {
457 if (enter_new_mode) {
458 for (i = 0; con->partner_altmode[i]; i++) {
459 alt = con->partner_altmode[i];
460 if (alt->svid == new_port->svid)
461 break;
462 }
463 /*
464 * alt will always be non NULL since this is
465 * UCSI_SET_NEW_CAM command and so there will be
466 * at least one con->partner_altmode[i] with svid
467 * matching with new_port->svid.
468 */
469 for (j = 0; port[j].svid; j++) {
470 pin = DP_CONF_GET_PIN_ASSIGN(port[j].mid);
471 if (alt && port[j].svid == alt->svid &&
472 (pin & DP_CONF_GET_PIN_ASSIGN(alt->vdo))) {
473 /* prioritize pin E->D->C */
474 if (k == 0xff || (k != 0xff && pin >
475 DP_CONF_GET_PIN_ASSIGN(port[k].mid))
476 ) {
477 k = j;
478 }
479 }
480 }
481 cam = k;
482 new_port->active_idx = cam;
483 } else {
484 cam = new_port->active_idx;
485 }
486 }
487 *cmd &= ~UCSI_SET_NEW_CAM_AM_MASK;
488 *cmd |= UCSI_SET_NEW_CAM_SET_AM(cam);
489 }
490
491 /*
492 * Change the order of vdo values of NVIDIA test device FTB
493 * (Function Test Board) which reports altmode list with vdo=0x3
494 * first and then vdo=0x. Current logic to assign mode value is
495 * based on order in altmode list and it causes a mismatch of CON
496 * and SOP altmodes since NVIDIA GPU connector has order of vdo=0x1
497 * first and then vdo=0x3
498 */
ucsi_ccg_nvidia_altmode(struct ucsi_ccg * uc,struct ucsi_altmode * alt)499 static void ucsi_ccg_nvidia_altmode(struct ucsi_ccg *uc,
500 struct ucsi_altmode *alt)
501 {
502 switch (UCSI_ALTMODE_OFFSET(uc->last_cmd_sent)) {
503 case NVIDIA_FTB_DP_OFFSET:
504 if (alt[0].mid == USB_TYPEC_NVIDIA_VLINK_DBG_VDO)
505 alt[0].mid = USB_TYPEC_NVIDIA_VLINK_DP_VDO |
506 DP_CAP_DP_SIGNALING | DP_CAP_USB |
507 DP_CONF_SET_PIN_ASSIGN(BIT(DP_PIN_ASSIGN_E));
508 break;
509 case NVIDIA_FTB_DBG_OFFSET:
510 if (alt[0].mid == USB_TYPEC_NVIDIA_VLINK_DP_VDO)
511 alt[0].mid = USB_TYPEC_NVIDIA_VLINK_DBG_VDO;
512 break;
513 default:
514 break;
515 }
516 }
517
ucsi_ccg_read(struct ucsi * ucsi,unsigned int offset,void * val,size_t val_len)518 static int ucsi_ccg_read(struct ucsi *ucsi, unsigned int offset,
519 void *val, size_t val_len)
520 {
521 struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
522 u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(offset);
523 struct ucsi_capability *cap;
524 struct ucsi_altmode *alt;
525 int ret;
526
527 ret = ccg_read(uc, reg, val, val_len);
528 if (ret)
529 return ret;
530
531 if (offset != UCSI_MESSAGE_IN)
532 return ret;
533
534 switch (UCSI_COMMAND(uc->last_cmd_sent)) {
535 case UCSI_GET_CURRENT_CAM:
536 if (uc->has_multiple_dp)
537 ucsi_ccg_update_get_current_cam_cmd(uc, (u8 *)val);
538 break;
539 case UCSI_GET_ALTERNATE_MODES:
540 if (UCSI_ALTMODE_RECIPIENT(uc->last_cmd_sent) ==
541 UCSI_RECIPIENT_SOP) {
542 alt = val;
543 if (alt[0].svid == USB_TYPEC_NVIDIA_VLINK_SID)
544 ucsi_ccg_nvidia_altmode(uc, alt);
545 }
546 break;
547 case UCSI_GET_CAPABILITY:
548 if (uc->fw_build == CCG_FW_BUILD_NVIDIA_TEGRA) {
549 cap = val;
550 cap->features &= ~UCSI_CAP_ALT_MODE_DETAILS;
551 }
552 break;
553 default:
554 break;
555 }
556 uc->last_cmd_sent = 0;
557
558 return ret;
559 }
560
ucsi_ccg_async_write(struct ucsi * ucsi,unsigned int offset,const void * val,size_t val_len)561 static int ucsi_ccg_async_write(struct ucsi *ucsi, unsigned int offset,
562 const void *val, size_t val_len)
563 {
564 u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(offset);
565
566 return ccg_write(ucsi_get_drvdata(ucsi), reg, val, val_len);
567 }
568
ucsi_ccg_sync_write(struct ucsi * ucsi,unsigned int offset,const void * val,size_t val_len)569 static int ucsi_ccg_sync_write(struct ucsi *ucsi, unsigned int offset,
570 const void *val, size_t val_len)
571 {
572 struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
573 struct ucsi_connector *con;
574 int con_index;
575 int ret;
576
577 mutex_lock(&uc->lock);
578 pm_runtime_get_sync(uc->dev);
579 set_bit(DEV_CMD_PENDING, &uc->flags);
580
581 if (offset == UCSI_CONTROL && val_len == sizeof(uc->last_cmd_sent)) {
582 uc->last_cmd_sent = *(u64 *)val;
583
584 if (UCSI_COMMAND(uc->last_cmd_sent) == UCSI_SET_NEW_CAM &&
585 uc->has_multiple_dp) {
586 con_index = (uc->last_cmd_sent >> 16) &
587 UCSI_CMD_CONNECTOR_MASK;
588 if (con_index == 0) {
589 ret = -EINVAL;
590 goto err_put;
591 }
592 con = &uc->ucsi->connector[con_index - 1];
593 ucsi_ccg_update_set_new_cam_cmd(uc, con, (u64 *)val);
594 }
595 }
596
597 ret = ucsi_ccg_async_write(ucsi, offset, val, val_len);
598 if (ret)
599 goto err_clear_bit;
600
601 if (!wait_for_completion_timeout(&uc->complete, msecs_to_jiffies(5000)))
602 ret = -ETIMEDOUT;
603
604 err_clear_bit:
605 clear_bit(DEV_CMD_PENDING, &uc->flags);
606 err_put:
607 pm_runtime_put_sync(uc->dev);
608 mutex_unlock(&uc->lock);
609
610 return ret;
611 }
612
613 static const struct ucsi_operations ucsi_ccg_ops = {
614 .read = ucsi_ccg_read,
615 .sync_write = ucsi_ccg_sync_write,
616 .async_write = ucsi_ccg_async_write,
617 .update_altmodes = ucsi_ccg_update_altmodes
618 };
619
ccg_irq_handler(int irq,void * data)620 static irqreturn_t ccg_irq_handler(int irq, void *data)
621 {
622 u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(UCSI_CCI);
623 struct ucsi_ccg *uc = data;
624 u8 intr_reg;
625 u32 cci;
626 int ret;
627
628 ret = ccg_read(uc, CCGX_RAB_INTR_REG, &intr_reg, sizeof(intr_reg));
629 if (ret)
630 return ret;
631
632 ret = ccg_read(uc, reg, (void *)&cci, sizeof(cci));
633 if (ret)
634 goto err_clear_irq;
635
636 if (UCSI_CCI_CONNECTOR(cci))
637 ucsi_connector_change(uc->ucsi, UCSI_CCI_CONNECTOR(cci));
638
639 if (test_bit(DEV_CMD_PENDING, &uc->flags) &&
640 cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))
641 complete(&uc->complete);
642
643 err_clear_irq:
644 ccg_write(uc, CCGX_RAB_INTR_REG, &intr_reg, sizeof(intr_reg));
645
646 return IRQ_HANDLED;
647 }
648
ccg_request_irq(struct ucsi_ccg * uc)649 static int ccg_request_irq(struct ucsi_ccg *uc)
650 {
651 unsigned long flags = IRQF_ONESHOT;
652
653 if (!dev_fwnode(uc->dev))
654 flags |= IRQF_TRIGGER_HIGH;
655
656 return request_threaded_irq(uc->irq, NULL, ccg_irq_handler, flags, dev_name(uc->dev), uc);
657 }
658
ccg_pm_workaround_work(struct work_struct * pm_work)659 static void ccg_pm_workaround_work(struct work_struct *pm_work)
660 {
661 ccg_irq_handler(0, container_of(pm_work, struct ucsi_ccg, pm_work));
662 }
663
get_fw_info(struct ucsi_ccg * uc)664 static int get_fw_info(struct ucsi_ccg *uc)
665 {
666 int err;
667
668 err = ccg_read(uc, CCGX_RAB_READ_ALL_VER, (u8 *)(&uc->version),
669 sizeof(uc->version));
670 if (err < 0)
671 return err;
672
673 uc->fw_version = CCG_VERSION(uc->version[FW2].app.ver) |
674 CCG_VERSION_PATCH(uc->version[FW2].app.patch);
675
676 err = ccg_read(uc, CCGX_RAB_DEVICE_MODE, (u8 *)(&uc->info),
677 sizeof(uc->info));
678 if (err < 0)
679 return err;
680
681 return 0;
682 }
683
invalid_async_evt(int code)684 static inline bool invalid_async_evt(int code)
685 {
686 return (code >= CCG_EVENT_MAX) || (code < EVENT_INDEX);
687 }
688
ccg_process_response(struct ucsi_ccg * uc)689 static void ccg_process_response(struct ucsi_ccg *uc)
690 {
691 struct device *dev = uc->dev;
692
693 if (uc->dev_resp.code & ASYNC_EVENT) {
694 if (uc->dev_resp.code == RESET_COMPLETE) {
695 if (test_bit(RESET_PENDING, &uc->flags))
696 uc->cmd_resp = uc->dev_resp.code;
697 get_fw_info(uc);
698 }
699 if (invalid_async_evt(uc->dev_resp.code))
700 dev_err(dev, "invalid async evt %d\n",
701 uc->dev_resp.code);
702 } else {
703 if (test_bit(DEV_CMD_PENDING, &uc->flags)) {
704 uc->cmd_resp = uc->dev_resp.code;
705 clear_bit(DEV_CMD_PENDING, &uc->flags);
706 } else {
707 dev_err(dev, "dev resp 0x%04x but no cmd pending\n",
708 uc->dev_resp.code);
709 }
710 }
711 }
712
ccg_read_response(struct ucsi_ccg * uc)713 static int ccg_read_response(struct ucsi_ccg *uc)
714 {
715 unsigned long target = jiffies + msecs_to_jiffies(1000);
716 struct device *dev = uc->dev;
717 u8 intval;
718 int status;
719
720 /* wait for interrupt status to get updated */
721 do {
722 status = ccg_read(uc, CCGX_RAB_INTR_REG, &intval,
723 sizeof(intval));
724 if (status < 0)
725 return status;
726
727 if (intval & DEV_INT)
728 break;
729 usleep_range(500, 600);
730 } while (time_is_after_jiffies(target));
731
732 if (time_is_before_jiffies(target)) {
733 dev_err(dev, "response timeout error\n");
734 return -ETIME;
735 }
736
737 status = ccg_read(uc, CCGX_RAB_RESPONSE, (u8 *)&uc->dev_resp,
738 sizeof(uc->dev_resp));
739 if (status < 0)
740 return status;
741
742 status = ccg_write(uc, CCGX_RAB_INTR_REG, &intval, sizeof(intval));
743 if (status < 0)
744 return status;
745
746 return 0;
747 }
748
749 /* Caller must hold uc->lock */
ccg_send_command(struct ucsi_ccg * uc,struct ccg_cmd * cmd)750 static int ccg_send_command(struct ucsi_ccg *uc, struct ccg_cmd *cmd)
751 {
752 struct device *dev = uc->dev;
753 int ret;
754
755 switch (cmd->reg & 0xF000) {
756 case DEV_REG_IDX:
757 set_bit(DEV_CMD_PENDING, &uc->flags);
758 break;
759 default:
760 dev_err(dev, "invalid cmd register\n");
761 break;
762 }
763
764 ret = ccg_write(uc, cmd->reg, (u8 *)&cmd->data, cmd->len);
765 if (ret < 0)
766 return ret;
767
768 msleep(cmd->delay);
769
770 ret = ccg_read_response(uc);
771 if (ret < 0) {
772 dev_err(dev, "response read error\n");
773 switch (cmd->reg & 0xF000) {
774 case DEV_REG_IDX:
775 clear_bit(DEV_CMD_PENDING, &uc->flags);
776 break;
777 default:
778 dev_err(dev, "invalid cmd register\n");
779 break;
780 }
781 return -EIO;
782 }
783 ccg_process_response(uc);
784
785 return uc->cmd_resp;
786 }
787
ccg_cmd_enter_flashing(struct ucsi_ccg * uc)788 static int ccg_cmd_enter_flashing(struct ucsi_ccg *uc)
789 {
790 struct ccg_cmd cmd;
791 int ret;
792
793 cmd.reg = CCGX_RAB_ENTER_FLASHING;
794 cmd.data = FLASH_ENTER_SIG;
795 cmd.len = 1;
796 cmd.delay = 50;
797
798 mutex_lock(&uc->lock);
799
800 ret = ccg_send_command(uc, &cmd);
801
802 mutex_unlock(&uc->lock);
803
804 if (ret != CMD_SUCCESS) {
805 dev_err(uc->dev, "enter flashing failed ret=%d\n", ret);
806 return ret;
807 }
808
809 return 0;
810 }
811
ccg_cmd_reset(struct ucsi_ccg * uc)812 static int ccg_cmd_reset(struct ucsi_ccg *uc)
813 {
814 struct ccg_cmd cmd;
815 u8 *p;
816 int ret;
817
818 p = (u8 *)&cmd.data;
819 cmd.reg = CCGX_RAB_RESET_REQ;
820 p[0] = RESET_SIG;
821 p[1] = CMD_RESET_DEV;
822 cmd.len = 2;
823 cmd.delay = 5000;
824
825 mutex_lock(&uc->lock);
826
827 set_bit(RESET_PENDING, &uc->flags);
828
829 ret = ccg_send_command(uc, &cmd);
830 if (ret != RESET_COMPLETE)
831 goto err_clear_flag;
832
833 ret = 0;
834
835 err_clear_flag:
836 clear_bit(RESET_PENDING, &uc->flags);
837
838 mutex_unlock(&uc->lock);
839
840 return ret;
841 }
842
ccg_cmd_port_control(struct ucsi_ccg * uc,bool enable)843 static int ccg_cmd_port_control(struct ucsi_ccg *uc, bool enable)
844 {
845 struct ccg_cmd cmd;
846 int ret;
847
848 cmd.reg = CCGX_RAB_PDPORT_ENABLE;
849 if (enable)
850 cmd.data = (uc->port_num == 1) ?
851 PDPORT_1 : (PDPORT_1 | PDPORT_2);
852 else
853 cmd.data = 0x0;
854 cmd.len = 1;
855 cmd.delay = 10;
856
857 mutex_lock(&uc->lock);
858
859 ret = ccg_send_command(uc, &cmd);
860
861 mutex_unlock(&uc->lock);
862
863 if (ret != CMD_SUCCESS) {
864 dev_err(uc->dev, "port control failed ret=%d\n", ret);
865 return ret;
866 }
867 return 0;
868 }
869
ccg_cmd_jump_boot_mode(struct ucsi_ccg * uc,int bl_mode)870 static int ccg_cmd_jump_boot_mode(struct ucsi_ccg *uc, int bl_mode)
871 {
872 struct ccg_cmd cmd;
873 int ret;
874
875 cmd.reg = CCGX_RAB_JUMP_TO_BOOT;
876
877 if (bl_mode)
878 cmd.data = TO_BOOT;
879 else
880 cmd.data = TO_ALT_FW;
881
882 cmd.len = 1;
883 cmd.delay = 100;
884
885 mutex_lock(&uc->lock);
886
887 set_bit(RESET_PENDING, &uc->flags);
888
889 ret = ccg_send_command(uc, &cmd);
890 if (ret != RESET_COMPLETE)
891 goto err_clear_flag;
892
893 ret = 0;
894
895 err_clear_flag:
896 clear_bit(RESET_PENDING, &uc->flags);
897
898 mutex_unlock(&uc->lock);
899
900 return ret;
901 }
902
903 static int
ccg_cmd_write_flash_row(struct ucsi_ccg * uc,u16 row,const void * data,u8 fcmd)904 ccg_cmd_write_flash_row(struct ucsi_ccg *uc, u16 row,
905 const void *data, u8 fcmd)
906 {
907 struct i2c_client *client = uc->client;
908 struct ccg_cmd cmd;
909 u8 buf[CCG4_ROW_SIZE + 2];
910 u8 *p;
911 int ret;
912
913 /* Copy the data into the flash read/write memory. */
914 put_unaligned_le16(REG_FLASH_RW_MEM, buf);
915
916 memcpy(buf + 2, data, CCG4_ROW_SIZE);
917
918 mutex_lock(&uc->lock);
919
920 ret = i2c_master_send(client, buf, CCG4_ROW_SIZE + 2);
921 if (ret != CCG4_ROW_SIZE + 2) {
922 dev_err(uc->dev, "REG_FLASH_RW_MEM write fail %d\n", ret);
923 mutex_unlock(&uc->lock);
924 return ret < 0 ? ret : -EIO;
925 }
926
927 /* Use the FLASH_ROW_READ_WRITE register to trigger */
928 /* writing of data to the desired flash row */
929 p = (u8 *)&cmd.data;
930 cmd.reg = CCGX_RAB_FLASH_ROW_RW;
931 p[0] = FLASH_SIG;
932 p[1] = fcmd;
933 put_unaligned_le16(row, &p[2]);
934 cmd.len = 4;
935 cmd.delay = 50;
936 if (fcmd == FLASH_FWCT_SIG_WR_CMD)
937 cmd.delay += 400;
938 if (row == 510)
939 cmd.delay += 220;
940 ret = ccg_send_command(uc, &cmd);
941
942 mutex_unlock(&uc->lock);
943
944 if (ret != CMD_SUCCESS) {
945 dev_err(uc->dev, "write flash row failed ret=%d\n", ret);
946 return ret;
947 }
948
949 return 0;
950 }
951
ccg_cmd_validate_fw(struct ucsi_ccg * uc,unsigned int fwid)952 static int ccg_cmd_validate_fw(struct ucsi_ccg *uc, unsigned int fwid)
953 {
954 struct ccg_cmd cmd;
955 int ret;
956
957 cmd.reg = CCGX_RAB_VALIDATE_FW;
958 cmd.data = fwid;
959 cmd.len = 1;
960 cmd.delay = 500;
961
962 mutex_lock(&uc->lock);
963
964 ret = ccg_send_command(uc, &cmd);
965
966 mutex_unlock(&uc->lock);
967
968 if (ret != CMD_SUCCESS)
969 return ret;
970
971 return 0;
972 }
973
ccg_check_vendor_version(struct ucsi_ccg * uc,struct version_format * app,struct fw_config_table * fw_cfg)974 static bool ccg_check_vendor_version(struct ucsi_ccg *uc,
975 struct version_format *app,
976 struct fw_config_table *fw_cfg)
977 {
978 struct device *dev = uc->dev;
979
980 /* Check if the fw build is for supported vendors */
981 if (le16_to_cpu(app->build) != uc->fw_build) {
982 dev_info(dev, "current fw is not from supported vendor\n");
983 return false;
984 }
985
986 /* Check if the new fw build is for supported vendors */
987 if (le16_to_cpu(fw_cfg->app.build) != uc->fw_build) {
988 dev_info(dev, "new fw is not from supported vendor\n");
989 return false;
990 }
991 return true;
992 }
993
ccg_check_fw_version(struct ucsi_ccg * uc,const char * fw_name,struct version_format * app)994 static bool ccg_check_fw_version(struct ucsi_ccg *uc, const char *fw_name,
995 struct version_format *app)
996 {
997 const struct firmware *fw = NULL;
998 struct device *dev = uc->dev;
999 struct fw_config_table fw_cfg;
1000 u32 cur_version, new_version;
1001 bool is_later = false;
1002
1003 if (request_firmware(&fw, fw_name, dev) != 0) {
1004 dev_err(dev, "error: Failed to open cyacd file %s\n", fw_name);
1005 return false;
1006 }
1007
1008 /*
1009 * check if signed fw
1010 * last part of fw image is fw cfg table and signature
1011 */
1012 if (fw->size < sizeof(fw_cfg) + FW_CFG_TABLE_SIG_SIZE)
1013 goto out_release_firmware;
1014
1015 memcpy((uint8_t *)&fw_cfg, fw->data + fw->size -
1016 sizeof(fw_cfg) - FW_CFG_TABLE_SIG_SIZE, sizeof(fw_cfg));
1017
1018 if (fw_cfg.identity != ('F' | 'W' << 8 | 'C' << 16 | 'T' << 24)) {
1019 dev_info(dev, "not a signed image\n");
1020 goto out_release_firmware;
1021 }
1022
1023 /* compare input version with FWCT version */
1024 cur_version = le16_to_cpu(app->build) | CCG_VERSION_PATCH(app->patch) |
1025 CCG_VERSION(app->ver);
1026
1027 new_version = le16_to_cpu(fw_cfg.app.build) |
1028 CCG_VERSION_PATCH(fw_cfg.app.patch) |
1029 CCG_VERSION(fw_cfg.app.ver);
1030
1031 if (!ccg_check_vendor_version(uc, app, &fw_cfg))
1032 goto out_release_firmware;
1033
1034 if (new_version > cur_version)
1035 is_later = true;
1036
1037 out_release_firmware:
1038 release_firmware(fw);
1039 return is_later;
1040 }
1041
ccg_fw_update_needed(struct ucsi_ccg * uc,enum enum_flash_mode * mode)1042 static int ccg_fw_update_needed(struct ucsi_ccg *uc,
1043 enum enum_flash_mode *mode)
1044 {
1045 struct device *dev = uc->dev;
1046 int err;
1047 struct version_info version[3];
1048
1049 err = ccg_read(uc, CCGX_RAB_DEVICE_MODE, (u8 *)(&uc->info),
1050 sizeof(uc->info));
1051 if (err) {
1052 dev_err(dev, "read device mode failed\n");
1053 return err;
1054 }
1055
1056 err = ccg_read(uc, CCGX_RAB_READ_ALL_VER, (u8 *)version,
1057 sizeof(version));
1058 if (err) {
1059 dev_err(dev, "read device mode failed\n");
1060 return err;
1061 }
1062
1063 if (memcmp(&version[FW1], "\0\0\0\0\0\0\0\0",
1064 sizeof(struct version_info)) == 0) {
1065 dev_info(dev, "secondary fw is not flashed\n");
1066 *mode = SECONDARY_BL;
1067 } else if (le16_to_cpu(version[FW1].base.build) <
1068 secondary_fw_min_ver) {
1069 dev_info(dev, "secondary fw version is too low (< %d)\n",
1070 secondary_fw_min_ver);
1071 *mode = SECONDARY;
1072 } else if (memcmp(&version[FW2], "\0\0\0\0\0\0\0\0",
1073 sizeof(struct version_info)) == 0) {
1074 dev_info(dev, "primary fw is not flashed\n");
1075 *mode = PRIMARY;
1076 } else if (ccg_check_fw_version(uc, ccg_fw_names[PRIMARY],
1077 &version[FW2].app)) {
1078 dev_info(dev, "found primary fw with later version\n");
1079 *mode = PRIMARY;
1080 } else {
1081 dev_info(dev, "secondary and primary fw are the latest\n");
1082 *mode = FLASH_NOT_NEEDED;
1083 }
1084 return 0;
1085 }
1086
do_flash(struct ucsi_ccg * uc,enum enum_flash_mode mode)1087 static int do_flash(struct ucsi_ccg *uc, enum enum_flash_mode mode)
1088 {
1089 struct device *dev = uc->dev;
1090 const struct firmware *fw = NULL;
1091 const char *p, *s;
1092 const char *eof;
1093 int err, row, len, line_sz, line_cnt = 0;
1094 unsigned long start_time = jiffies;
1095 struct fw_config_table fw_cfg;
1096 u8 fw_cfg_sig[FW_CFG_TABLE_SIG_SIZE];
1097 u8 *wr_buf;
1098
1099 err = request_firmware(&fw, ccg_fw_names[mode], dev);
1100 if (err) {
1101 dev_err(dev, "request %s failed err=%d\n",
1102 ccg_fw_names[mode], err);
1103 return err;
1104 }
1105
1106 if (((uc->info.mode & CCG_DEVINFO_FWMODE_MASK) >>
1107 CCG_DEVINFO_FWMODE_SHIFT) == FW2) {
1108 err = ccg_cmd_port_control(uc, false);
1109 if (err < 0)
1110 goto release_fw;
1111 err = ccg_cmd_jump_boot_mode(uc, 0);
1112 if (err < 0)
1113 goto release_fw;
1114 }
1115
1116 eof = fw->data + fw->size;
1117
1118 /*
1119 * check if signed fw
1120 * last part of fw image is fw cfg table and signature
1121 */
1122 if (fw->size < sizeof(fw_cfg) + sizeof(fw_cfg_sig))
1123 goto not_signed_fw;
1124
1125 memcpy((uint8_t *)&fw_cfg, fw->data + fw->size -
1126 sizeof(fw_cfg) - sizeof(fw_cfg_sig), sizeof(fw_cfg));
1127
1128 if (fw_cfg.identity != ('F' | ('W' << 8) | ('C' << 16) | ('T' << 24))) {
1129 dev_info(dev, "not a signed image\n");
1130 goto not_signed_fw;
1131 }
1132 eof = fw->data + fw->size - sizeof(fw_cfg) - sizeof(fw_cfg_sig);
1133
1134 memcpy((uint8_t *)&fw_cfg_sig,
1135 fw->data + fw->size - sizeof(fw_cfg_sig), sizeof(fw_cfg_sig));
1136
1137 /* flash fw config table and signature first */
1138 err = ccg_cmd_write_flash_row(uc, 0, (u8 *)&fw_cfg,
1139 FLASH_FWCT1_WR_CMD);
1140 if (err)
1141 goto release_fw;
1142
1143 err = ccg_cmd_write_flash_row(uc, 0, (u8 *)&fw_cfg + CCG4_ROW_SIZE,
1144 FLASH_FWCT2_WR_CMD);
1145 if (err)
1146 goto release_fw;
1147
1148 err = ccg_cmd_write_flash_row(uc, 0, &fw_cfg_sig,
1149 FLASH_FWCT_SIG_WR_CMD);
1150 if (err)
1151 goto release_fw;
1152
1153 not_signed_fw:
1154 wr_buf = kzalloc(CCG4_ROW_SIZE + 4, GFP_KERNEL);
1155 if (!wr_buf) {
1156 err = -ENOMEM;
1157 goto release_fw;
1158 }
1159
1160 err = ccg_cmd_enter_flashing(uc);
1161 if (err)
1162 goto release_mem;
1163
1164 /*****************************************************************
1165 * CCG firmware image (.cyacd) file line format
1166 *
1167 * :00rrrrllll[dd....]cc/r/n
1168 *
1169 * :00 header
1170 * rrrr is row number to flash (4 char)
1171 * llll is data len to flash (4 char)
1172 * dd is a data field represents one byte of data (512 char)
1173 * cc is checksum (2 char)
1174 * \r\n newline
1175 *
1176 * Total length: 3 + 4 + 4 + 512 + 2 + 2 = 527
1177 *
1178 *****************************************************************/
1179
1180 p = strnchr(fw->data, fw->size, ':');
1181 while (p < eof) {
1182 s = strnchr(p + 1, eof - p - 1, ':');
1183
1184 if (!s)
1185 s = eof;
1186
1187 line_sz = s - p;
1188
1189 if (line_sz != CYACD_LINE_SIZE) {
1190 dev_err(dev, "Bad FW format line_sz=%d\n", line_sz);
1191 err = -EINVAL;
1192 goto release_mem;
1193 }
1194
1195 if (hex2bin(wr_buf, p + 3, CCG4_ROW_SIZE + 4)) {
1196 err = -EINVAL;
1197 goto release_mem;
1198 }
1199
1200 row = get_unaligned_be16(wr_buf);
1201 len = get_unaligned_be16(&wr_buf[2]);
1202
1203 if (len != CCG4_ROW_SIZE) {
1204 err = -EINVAL;
1205 goto release_mem;
1206 }
1207
1208 err = ccg_cmd_write_flash_row(uc, row, wr_buf + 4,
1209 FLASH_WR_CMD);
1210 if (err)
1211 goto release_mem;
1212
1213 line_cnt++;
1214 p = s;
1215 }
1216
1217 dev_info(dev, "total %d row flashed. time: %dms\n",
1218 line_cnt, jiffies_to_msecs(jiffies - start_time));
1219
1220 err = ccg_cmd_validate_fw(uc, (mode == PRIMARY) ? FW2 : FW1);
1221 if (err)
1222 dev_err(dev, "%s validation failed err=%d\n",
1223 (mode == PRIMARY) ? "FW2" : "FW1", err);
1224 else
1225 dev_info(dev, "%s validated\n",
1226 (mode == PRIMARY) ? "FW2" : "FW1");
1227
1228 err = ccg_cmd_port_control(uc, false);
1229 if (err < 0)
1230 goto release_mem;
1231
1232 err = ccg_cmd_reset(uc);
1233 if (err < 0)
1234 goto release_mem;
1235
1236 err = ccg_cmd_port_control(uc, true);
1237 if (err < 0)
1238 goto release_mem;
1239
1240 release_mem:
1241 kfree(wr_buf);
1242
1243 release_fw:
1244 release_firmware(fw);
1245 return err;
1246 }
1247
1248 /*******************************************************************************
1249 * CCG4 has two copies of the firmware in addition to the bootloader.
1250 * If the device is running FW1, FW2 can be updated with the new version.
1251 * Dual firmware mode allows the CCG device to stay in a PD contract and support
1252 * USB PD and Type-C functionality while a firmware update is in progress.
1253 ******************************************************************************/
ccg_fw_update(struct ucsi_ccg * uc,enum enum_flash_mode flash_mode)1254 static int ccg_fw_update(struct ucsi_ccg *uc, enum enum_flash_mode flash_mode)
1255 {
1256 int err = 0;
1257
1258 while (flash_mode != FLASH_NOT_NEEDED) {
1259 err = do_flash(uc, flash_mode);
1260 if (err < 0)
1261 return err;
1262 err = ccg_fw_update_needed(uc, &flash_mode);
1263 if (err < 0)
1264 return err;
1265 }
1266 dev_info(uc->dev, "CCG FW update successful\n");
1267
1268 return err;
1269 }
1270
ccg_restart(struct ucsi_ccg * uc)1271 static int ccg_restart(struct ucsi_ccg *uc)
1272 {
1273 struct device *dev = uc->dev;
1274 int status;
1275
1276 status = ucsi_ccg_init(uc);
1277 if (status < 0) {
1278 dev_err(dev, "ucsi_ccg_start fail, err=%d\n", status);
1279 return status;
1280 }
1281
1282 status = ccg_request_irq(uc);
1283 if (status < 0) {
1284 dev_err(dev, "request_threaded_irq failed - %d\n", status);
1285 return status;
1286 }
1287
1288 status = ucsi_register(uc->ucsi);
1289 if (status) {
1290 dev_err(uc->dev, "failed to register the interface\n");
1291 return status;
1292 }
1293
1294 pm_runtime_enable(uc->dev);
1295 return 0;
1296 }
1297
ccg_update_firmware(struct work_struct * work)1298 static void ccg_update_firmware(struct work_struct *work)
1299 {
1300 struct ucsi_ccg *uc = container_of(work, struct ucsi_ccg, work);
1301 enum enum_flash_mode flash_mode;
1302 int status;
1303
1304 status = ccg_fw_update_needed(uc, &flash_mode);
1305 if (status < 0)
1306 return;
1307
1308 if (flash_mode != FLASH_NOT_NEEDED) {
1309 ucsi_unregister(uc->ucsi);
1310 pm_runtime_disable(uc->dev);
1311 free_irq(uc->irq, uc);
1312
1313 ccg_fw_update(uc, flash_mode);
1314 ccg_restart(uc);
1315 }
1316 }
1317
do_flash_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)1318 static ssize_t do_flash_store(struct device *dev,
1319 struct device_attribute *attr,
1320 const char *buf, size_t n)
1321 {
1322 struct ucsi_ccg *uc = i2c_get_clientdata(to_i2c_client(dev));
1323 bool flash;
1324
1325 if (kstrtobool(buf, &flash))
1326 return -EINVAL;
1327
1328 if (!flash)
1329 return n;
1330
1331 if (uc->fw_build == 0x0) {
1332 dev_err(dev, "fail to flash FW due to missing FW build info\n");
1333 return -EINVAL;
1334 }
1335
1336 schedule_work(&uc->work);
1337 return n;
1338 }
1339
1340 static DEVICE_ATTR_WO(do_flash);
1341
1342 static struct attribute *ucsi_ccg_attrs[] = {
1343 &dev_attr_do_flash.attr,
1344 NULL,
1345 };
1346 ATTRIBUTE_GROUPS(ucsi_ccg);
1347
ucsi_ccg_probe(struct i2c_client * client)1348 static int ucsi_ccg_probe(struct i2c_client *client)
1349 {
1350 struct device *dev = &client->dev;
1351 struct ucsi_ccg *uc;
1352 const char *fw_name;
1353 int status;
1354
1355 uc = devm_kzalloc(dev, sizeof(*uc), GFP_KERNEL);
1356 if (!uc)
1357 return -ENOMEM;
1358
1359 uc->dev = dev;
1360 uc->client = client;
1361 uc->irq = client->irq;
1362 mutex_init(&uc->lock);
1363 init_completion(&uc->complete);
1364 INIT_WORK(&uc->work, ccg_update_firmware);
1365 INIT_WORK(&uc->pm_work, ccg_pm_workaround_work);
1366
1367 /* Only fail FW flashing when FW build information is not provided */
1368 status = device_property_read_string(dev, "firmware-name", &fw_name);
1369 if (!status) {
1370 if (!strcmp(fw_name, "nvidia,jetson-agx-xavier"))
1371 uc->fw_build = CCG_FW_BUILD_NVIDIA_TEGRA;
1372 else if (!strcmp(fw_name, "nvidia,gpu"))
1373 uc->fw_build = CCG_FW_BUILD_NVIDIA;
1374 if (!uc->fw_build)
1375 dev_err(uc->dev, "failed to get FW build information\n");
1376 }
1377
1378 /* reset ccg device and initialize ucsi */
1379 status = ucsi_ccg_init(uc);
1380 if (status < 0) {
1381 dev_err(uc->dev, "ucsi_ccg_init failed - %d\n", status);
1382 return status;
1383 }
1384
1385 status = get_fw_info(uc);
1386 if (status < 0) {
1387 dev_err(uc->dev, "get_fw_info failed - %d\n", status);
1388 return status;
1389 }
1390
1391 uc->port_num = 1;
1392
1393 if (uc->info.mode & CCG_DEVINFO_PDPORTS_MASK)
1394 uc->port_num++;
1395
1396 uc->ucsi = ucsi_create(dev, &ucsi_ccg_ops);
1397 if (IS_ERR(uc->ucsi))
1398 return PTR_ERR(uc->ucsi);
1399
1400 ucsi_set_drvdata(uc->ucsi, uc);
1401
1402 status = ccg_request_irq(uc);
1403 if (status < 0) {
1404 dev_err(uc->dev, "request_threaded_irq failed - %d\n", status);
1405 goto out_ucsi_destroy;
1406 }
1407
1408 status = ucsi_register(uc->ucsi);
1409 if (status)
1410 goto out_free_irq;
1411
1412 i2c_set_clientdata(client, uc);
1413
1414 pm_runtime_set_active(uc->dev);
1415 pm_runtime_enable(uc->dev);
1416 pm_runtime_use_autosuspend(uc->dev);
1417 pm_runtime_set_autosuspend_delay(uc->dev, 5000);
1418 pm_runtime_idle(uc->dev);
1419
1420 return 0;
1421
1422 out_free_irq:
1423 free_irq(uc->irq, uc);
1424 out_ucsi_destroy:
1425 ucsi_destroy(uc->ucsi);
1426
1427 return status;
1428 }
1429
ucsi_ccg_remove(struct i2c_client * client)1430 static void ucsi_ccg_remove(struct i2c_client *client)
1431 {
1432 struct ucsi_ccg *uc = i2c_get_clientdata(client);
1433
1434 cancel_work_sync(&uc->pm_work);
1435 cancel_work_sync(&uc->work);
1436 pm_runtime_disable(uc->dev);
1437 ucsi_unregister(uc->ucsi);
1438 ucsi_destroy(uc->ucsi);
1439 free_irq(uc->irq, uc);
1440 }
1441
1442 static const struct of_device_id ucsi_ccg_of_match_table[] = {
1443 { .compatible = "cypress,cypd4226", },
1444 { /* sentinel */ }
1445 };
1446 MODULE_DEVICE_TABLE(of, ucsi_ccg_of_match_table);
1447
1448 static const struct i2c_device_id ucsi_ccg_device_id[] = {
1449 {"ccgx-ucsi", 0},
1450 {}
1451 };
1452 MODULE_DEVICE_TABLE(i2c, ucsi_ccg_device_id);
1453
1454 static const struct acpi_device_id amd_i2c_ucsi_match[] = {
1455 {"AMDI0042"},
1456 {}
1457 };
1458 MODULE_DEVICE_TABLE(acpi, amd_i2c_ucsi_match);
1459
ucsi_ccg_resume(struct device * dev)1460 static int ucsi_ccg_resume(struct device *dev)
1461 {
1462 struct i2c_client *client = to_i2c_client(dev);
1463 struct ucsi_ccg *uc = i2c_get_clientdata(client);
1464
1465 return ucsi_resume(uc->ucsi);
1466 }
1467
ucsi_ccg_runtime_suspend(struct device * dev)1468 static int ucsi_ccg_runtime_suspend(struct device *dev)
1469 {
1470 return 0;
1471 }
1472
ucsi_ccg_runtime_resume(struct device * dev)1473 static int ucsi_ccg_runtime_resume(struct device *dev)
1474 {
1475 struct i2c_client *client = to_i2c_client(dev);
1476 struct ucsi_ccg *uc = i2c_get_clientdata(client);
1477
1478 /*
1479 * Firmware version 3.1.10 or earlier, built for NVIDIA has known issue
1480 * of missing interrupt when a device is connected for runtime resume.
1481 * Schedule a work to call ISR as a workaround.
1482 */
1483 if (uc->fw_build == CCG_FW_BUILD_NVIDIA &&
1484 uc->fw_version <= CCG_OLD_FW_VERSION)
1485 schedule_work(&uc->pm_work);
1486
1487 return 0;
1488 }
1489
1490 static const struct dev_pm_ops ucsi_ccg_pm = {
1491 .resume = ucsi_ccg_resume,
1492 .runtime_suspend = ucsi_ccg_runtime_suspend,
1493 .runtime_resume = ucsi_ccg_runtime_resume,
1494 };
1495
1496 static struct i2c_driver ucsi_ccg_driver = {
1497 .driver = {
1498 .name = "ucsi_ccg",
1499 .pm = &ucsi_ccg_pm,
1500 .dev_groups = ucsi_ccg_groups,
1501 .acpi_match_table = amd_i2c_ucsi_match,
1502 .of_match_table = ucsi_ccg_of_match_table,
1503 },
1504 .probe = ucsi_ccg_probe,
1505 .remove = ucsi_ccg_remove,
1506 .id_table = ucsi_ccg_device_id,
1507 };
1508
1509 module_i2c_driver(ucsi_ccg_driver);
1510
1511 MODULE_AUTHOR("Ajay Gupta <ajayg@nvidia.com>");
1512 MODULE_DESCRIPTION("UCSI driver for Cypress CCGx Type-C controller");
1513 MODULE_LICENSE("GPL v2");
1514