xref: /openbmc/linux/drivers/usb/typec/ucsi/ucsi_ccg.c (revision 24f68eb5bf14a74027946970a18bc902e19d986a)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * UCSI driver for Cypress CCGx Type-C controller
4   *
5   * Copyright (C) 2017-2018 NVIDIA Corporation. All rights reserved.
6   * Author: Ajay Gupta <ajayg@nvidia.com>
7   *
8   * Some code borrowed from drivers/usb/typec/ucsi/ucsi_acpi.c
9   */
10  #include <linux/acpi.h>
11  #include <linux/delay.h>
12  #include <linux/firmware.h>
13  #include <linux/i2c.h>
14  #include <linux/module.h>
15  #include <linux/pci.h>
16  #include <linux/platform_device.h>
17  #include <linux/pm.h>
18  #include <linux/pm_runtime.h>
19  #include <linux/usb/typec_dp.h>
20  
21  #include <asm/unaligned.h>
22  #include "ucsi.h"
23  
24  enum enum_fw_mode {
25  	BOOT,   /* bootloader */
26  	FW1,    /* FW partition-1 (contains secondary fw) */
27  	FW2,    /* FW partition-2 (contains primary fw) */
28  	FW_INVALID,
29  };
30  
31  #define CCGX_RAB_DEVICE_MODE			0x0000
32  #define CCGX_RAB_INTR_REG			0x0006
33  #define  DEV_INT				BIT(0)
34  #define  PORT0_INT				BIT(1)
35  #define  PORT1_INT				BIT(2)
36  #define  UCSI_READ_INT				BIT(7)
37  #define CCGX_RAB_JUMP_TO_BOOT			0x0007
38  #define  TO_BOOT				'J'
39  #define  TO_ALT_FW				'A'
40  #define CCGX_RAB_RESET_REQ			0x0008
41  #define  RESET_SIG				'R'
42  #define  CMD_RESET_I2C				0x0
43  #define  CMD_RESET_DEV				0x1
44  #define CCGX_RAB_ENTER_FLASHING			0x000A
45  #define  FLASH_ENTER_SIG			'P'
46  #define CCGX_RAB_VALIDATE_FW			0x000B
47  #define CCGX_RAB_FLASH_ROW_RW			0x000C
48  #define  FLASH_SIG				'F'
49  #define  FLASH_RD_CMD				0x0
50  #define  FLASH_WR_CMD				0x1
51  #define  FLASH_FWCT1_WR_CMD			0x2
52  #define  FLASH_FWCT2_WR_CMD			0x3
53  #define  FLASH_FWCT_SIG_WR_CMD			0x4
54  #define CCGX_RAB_READ_ALL_VER			0x0010
55  #define CCGX_RAB_READ_FW2_VER			0x0020
56  #define CCGX_RAB_UCSI_CONTROL			0x0039
57  #define CCGX_RAB_UCSI_CONTROL_START		BIT(0)
58  #define CCGX_RAB_UCSI_CONTROL_STOP		BIT(1)
59  #define CCGX_RAB_UCSI_DATA_BLOCK(offset)	(0xf000 | ((offset) & 0xff))
60  #define REG_FLASH_RW_MEM        0x0200
61  #define DEV_REG_IDX				CCGX_RAB_DEVICE_MODE
62  #define CCGX_RAB_PDPORT_ENABLE			0x002C
63  #define  PDPORT_1		BIT(0)
64  #define  PDPORT_2		BIT(1)
65  #define CCGX_RAB_RESPONSE			0x007E
66  #define  ASYNC_EVENT				BIT(7)
67  
68  /* CCGx events & async msg codes */
69  #define RESET_COMPLETE		0x80
70  #define EVENT_INDEX		RESET_COMPLETE
71  #define PORT_CONNECT_DET	0x84
72  #define PORT_DISCONNECT_DET	0x85
73  #define ROLE_SWAP_COMPELETE	0x87
74  
75  /* ccg firmware */
76  #define CYACD_LINE_SIZE         527
77  #define CCG4_ROW_SIZE           256
78  #define FW1_METADATA_ROW        0x1FF
79  #define FW2_METADATA_ROW        0x1FE
80  #define FW_CFG_TABLE_SIG_SIZE	256
81  
82  static int secondary_fw_min_ver = 41;
83  
84  enum enum_flash_mode {
85  	SECONDARY_BL,	/* update secondary using bootloader */
86  	PRIMARY,	/* update primary using secondary */
87  	SECONDARY,	/* update secondary using primary */
88  	FLASH_NOT_NEEDED,	/* update not required */
89  	FLASH_INVALID,
90  };
91  
92  static const char * const ccg_fw_names[] = {
93  	"ccg_boot.cyacd",
94  	"ccg_primary.cyacd",
95  	"ccg_secondary.cyacd"
96  };
97  
98  struct ccg_dev_info {
99  #define CCG_DEVINFO_FWMODE_SHIFT (0)
100  #define CCG_DEVINFO_FWMODE_MASK (0x3 << CCG_DEVINFO_FWMODE_SHIFT)
101  #define CCG_DEVINFO_PDPORTS_SHIFT (2)
102  #define CCG_DEVINFO_PDPORTS_MASK (0x3 << CCG_DEVINFO_PDPORTS_SHIFT)
103  	u8 mode;
104  	u8 bl_mode;
105  	__le16 silicon_id;
106  	__le16 bl_last_row;
107  } __packed;
108  
109  struct version_format {
110  	__le16 build;
111  	u8 patch;
112  	u8 ver;
113  #define CCG_VERSION_PATCH(x) ((x) << 16)
114  #define CCG_VERSION(x)	((x) << 24)
115  #define CCG_VERSION_MIN_SHIFT (0)
116  #define CCG_VERSION_MIN_MASK (0xf << CCG_VERSION_MIN_SHIFT)
117  #define CCG_VERSION_MAJ_SHIFT (4)
118  #define CCG_VERSION_MAJ_MASK (0xf << CCG_VERSION_MAJ_SHIFT)
119  } __packed;
120  
121  /*
122   * Firmware version 3.1.10 or earlier, built for NVIDIA has known issue
123   * of missing interrupt when a device is connected for runtime resume
124   */
125  #define CCG_FW_BUILD_NVIDIA	(('n' << 8) | 'v')
126  #define CCG_OLD_FW_VERSION	(CCG_VERSION(0x31) | CCG_VERSION_PATCH(10))
127  
128  /* Firmware for Tegra doesn't support UCSI ALT command, built
129   * for NVIDIA has known issue of reporting wrong capability info
130   */
131  #define CCG_FW_BUILD_NVIDIA_TEGRA	(('g' << 8) | 'n')
132  
133  /* Altmode offset for NVIDIA Function Test Board (FTB) */
134  #define NVIDIA_FTB_DP_OFFSET	(2)
135  #define NVIDIA_FTB_DBG_OFFSET	(3)
136  
137  struct version_info {
138  	struct version_format base;
139  	struct version_format app;
140  };
141  
142  struct fw_config_table {
143  	u32 identity;
144  	u16 table_size;
145  	u8 fwct_version;
146  	u8 is_key_change;
147  	u8 guid[16];
148  	struct version_format base;
149  	struct version_format app;
150  	u8 primary_fw_digest[32];
151  	u32 key_exp_length;
152  	u8 key_modulus[256];
153  	u8 key_exp[4];
154  };
155  
156  /* CCGx response codes */
157  enum ccg_resp_code {
158  	CMD_NO_RESP             = 0x00,
159  	CMD_SUCCESS             = 0x02,
160  	FLASH_DATA_AVAILABLE    = 0x03,
161  	CMD_INVALID             = 0x05,
162  	FLASH_UPDATE_FAIL       = 0x07,
163  	INVALID_FW              = 0x08,
164  	INVALID_ARG             = 0x09,
165  	CMD_NOT_SUPPORT         = 0x0A,
166  	TRANSACTION_FAIL        = 0x0C,
167  	PD_CMD_FAIL             = 0x0D,
168  	UNDEF_ERROR             = 0x0F,
169  	INVALID_RESP		= 0x10,
170  };
171  
172  #define CCG_EVENT_MAX	(EVENT_INDEX + 43)
173  
174  struct ccg_cmd {
175  	u16 reg;
176  	u32 data;
177  	int len;
178  	u32 delay; /* ms delay for cmd timeout  */
179  };
180  
181  struct ccg_resp {
182  	u8 code;
183  	u8 length;
184  };
185  
186  struct ucsi_ccg_altmode {
187  	u16 svid;
188  	u32 mid;
189  	u8 linked_idx;
190  	u8 active_idx;
191  #define UCSI_MULTI_DP_INDEX	(0xff)
192  	bool checked;
193  } __packed;
194  
195  struct ucsi_ccg {
196  	struct device *dev;
197  	struct ucsi *ucsi;
198  	struct i2c_client *client;
199  
200  	struct ccg_dev_info info;
201  	/* version info for boot, primary and secondary */
202  	struct version_info version[FW2 + 1];
203  	u32 fw_version;
204  	/* CCG HPI communication flags */
205  	unsigned long flags;
206  #define RESET_PENDING	0
207  #define DEV_CMD_PENDING	1
208  	struct ccg_resp dev_resp;
209  	u8 cmd_resp;
210  	int port_num;
211  	int irq;
212  	struct work_struct work;
213  	struct mutex lock; /* to sync between user and driver thread */
214  
215  	/* fw build with vendor information */
216  	u16 fw_build;
217  	struct work_struct pm_work;
218  
219  	struct completion complete;
220  
221  	u64 last_cmd_sent;
222  	bool has_multiple_dp;
223  	struct ucsi_ccg_altmode orig[UCSI_MAX_ALTMODES];
224  	struct ucsi_ccg_altmode updated[UCSI_MAX_ALTMODES];
225  };
226  
ccg_read(struct ucsi_ccg * uc,u16 rab,u8 * data,u32 len)227  static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
228  {
229  	struct i2c_client *client = uc->client;
230  	const struct i2c_adapter_quirks *quirks = client->adapter->quirks;
231  	unsigned char buf[2];
232  	struct i2c_msg msgs[] = {
233  		{
234  			.addr	= client->addr,
235  			.flags  = 0x0,
236  			.len	= sizeof(buf),
237  			.buf	= buf,
238  		},
239  		{
240  			.addr	= client->addr,
241  			.flags  = I2C_M_RD,
242  			.buf	= data,
243  		},
244  	};
245  	u32 rlen, rem_len = len, max_read_len = len;
246  	int status;
247  
248  	/* check any max_read_len limitation on i2c adapter */
249  	if (quirks && quirks->max_read_len)
250  		max_read_len = quirks->max_read_len;
251  
252  	pm_runtime_get_sync(uc->dev);
253  	while (rem_len > 0) {
254  		msgs[1].buf = &data[len - rem_len];
255  		rlen = min_t(u16, rem_len, max_read_len);
256  		msgs[1].len = rlen;
257  		put_unaligned_le16(rab, buf);
258  		status = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
259  		if (status < 0) {
260  			dev_err(uc->dev, "i2c_transfer failed %d\n", status);
261  			pm_runtime_put_sync(uc->dev);
262  			return status;
263  		}
264  		rab += rlen;
265  		rem_len -= rlen;
266  	}
267  
268  	pm_runtime_put_sync(uc->dev);
269  	return 0;
270  }
271  
ccg_write(struct ucsi_ccg * uc,u16 rab,const u8 * data,u32 len)272  static int ccg_write(struct ucsi_ccg *uc, u16 rab, const u8 *data, u32 len)
273  {
274  	struct i2c_client *client = uc->client;
275  	unsigned char *buf;
276  	struct i2c_msg msgs[] = {
277  		{
278  			.addr	= client->addr,
279  			.flags  = 0x0,
280  		}
281  	};
282  	int status;
283  
284  	buf = kzalloc(len + sizeof(rab), GFP_KERNEL);
285  	if (!buf)
286  		return -ENOMEM;
287  
288  	put_unaligned_le16(rab, buf);
289  	memcpy(buf + sizeof(rab), data, len);
290  
291  	msgs[0].len = len + sizeof(rab);
292  	msgs[0].buf = buf;
293  
294  	pm_runtime_get_sync(uc->dev);
295  	status = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
296  	if (status < 0) {
297  		dev_err(uc->dev, "i2c_transfer failed %d\n", status);
298  		pm_runtime_put_sync(uc->dev);
299  		kfree(buf);
300  		return status;
301  	}
302  
303  	pm_runtime_put_sync(uc->dev);
304  	kfree(buf);
305  	return 0;
306  }
307  
ucsi_ccg_init(struct ucsi_ccg * uc)308  static int ucsi_ccg_init(struct ucsi_ccg *uc)
309  {
310  	unsigned int count = 10;
311  	u8 data;
312  	int status;
313  
314  	data = CCGX_RAB_UCSI_CONTROL_STOP;
315  	status = ccg_write(uc, CCGX_RAB_UCSI_CONTROL, &data, sizeof(data));
316  	if (status < 0)
317  		return status;
318  
319  	data = CCGX_RAB_UCSI_CONTROL_START;
320  	status = ccg_write(uc, CCGX_RAB_UCSI_CONTROL, &data, sizeof(data));
321  	if (status < 0)
322  		return status;
323  
324  	/*
325  	 * Flush CCGx RESPONSE queue by acking interrupts. Above ucsi control
326  	 * register write will push response which must be cleared.
327  	 */
328  	do {
329  		status = ccg_read(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
330  		if (status < 0)
331  			return status;
332  
333  		if (!(data & DEV_INT))
334  			return 0;
335  
336  		status = ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
337  		if (status < 0)
338  			return status;
339  
340  		usleep_range(10000, 11000);
341  	} while (--count);
342  
343  	return -ETIMEDOUT;
344  }
345  
ucsi_ccg_update_get_current_cam_cmd(struct ucsi_ccg * uc,u8 * data)346  static void ucsi_ccg_update_get_current_cam_cmd(struct ucsi_ccg *uc, u8 *data)
347  {
348  	u8 cam, new_cam;
349  
350  	cam = data[0];
351  	new_cam = uc->orig[cam].linked_idx;
352  	uc->updated[new_cam].active_idx = cam;
353  	data[0] = new_cam;
354  }
355  
ucsi_ccg_update_altmodes(struct ucsi * ucsi,struct ucsi_altmode * orig,struct ucsi_altmode * updated)356  static bool ucsi_ccg_update_altmodes(struct ucsi *ucsi,
357  				     struct ucsi_altmode *orig,
358  				     struct ucsi_altmode *updated)
359  {
360  	struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
361  	struct ucsi_ccg_altmode *alt, *new_alt;
362  	int i, j, k = 0;
363  	bool found = false;
364  
365  	alt = uc->orig;
366  	new_alt = uc->updated;
367  	memset(uc->updated, 0, sizeof(uc->updated));
368  
369  	/*
370  	 * Copy original connector altmodes to new structure.
371  	 * We need this before second loop since second loop
372  	 * checks for duplicate altmodes.
373  	 */
374  	for (i = 0; i < UCSI_MAX_ALTMODES; i++) {
375  		alt[i].svid = orig[i].svid;
376  		alt[i].mid = orig[i].mid;
377  		if (!alt[i].svid)
378  			break;
379  	}
380  
381  	for (i = 0; i < UCSI_MAX_ALTMODES; i++) {
382  		if (!alt[i].svid)
383  			break;
384  
385  		/* already checked and considered */
386  		if (alt[i].checked)
387  			continue;
388  
389  		if (!DP_CONF_GET_PIN_ASSIGN(alt[i].mid)) {
390  			/* Found Non DP altmode */
391  			new_alt[k].svid = alt[i].svid;
392  			new_alt[k].mid |= alt[i].mid;
393  			new_alt[k].linked_idx = i;
394  			alt[i].linked_idx = k;
395  			updated[k].svid = new_alt[k].svid;
396  			updated[k].mid = new_alt[k].mid;
397  			k++;
398  			continue;
399  		}
400  
401  		for (j = i + 1; j < UCSI_MAX_ALTMODES; j++) {
402  			if (alt[i].svid != alt[j].svid ||
403  			    !DP_CONF_GET_PIN_ASSIGN(alt[j].mid)) {
404  				continue;
405  			} else {
406  				/* Found duplicate DP mode */
407  				new_alt[k].svid = alt[i].svid;
408  				new_alt[k].mid |= alt[i].mid | alt[j].mid;
409  				new_alt[k].linked_idx = UCSI_MULTI_DP_INDEX;
410  				alt[i].linked_idx = k;
411  				alt[j].linked_idx = k;
412  				alt[j].checked = true;
413  				found = true;
414  			}
415  		}
416  		if (found) {
417  			uc->has_multiple_dp = true;
418  		} else {
419  			/* Didn't find any duplicate DP altmode */
420  			new_alt[k].svid = alt[i].svid;
421  			new_alt[k].mid |= alt[i].mid;
422  			new_alt[k].linked_idx = i;
423  			alt[i].linked_idx = k;
424  		}
425  		updated[k].svid = new_alt[k].svid;
426  		updated[k].mid = new_alt[k].mid;
427  		k++;
428  	}
429  	return found;
430  }
431  
ucsi_ccg_update_set_new_cam_cmd(struct ucsi_ccg * uc,struct ucsi_connector * con,u64 * cmd)432  static void ucsi_ccg_update_set_new_cam_cmd(struct ucsi_ccg *uc,
433  					    struct ucsi_connector *con,
434  					    u64 *cmd)
435  {
436  	struct ucsi_ccg_altmode *new_port, *port;
437  	struct typec_altmode *alt = NULL;
438  	u8 new_cam, cam, pin;
439  	bool enter_new_mode;
440  	int i, j, k = 0xff;
441  
442  	port = uc->orig;
443  	new_cam = UCSI_SET_NEW_CAM_GET_AM(*cmd);
444  	if (new_cam >= ARRAY_SIZE(uc->updated))
445  		return;
446  	new_port = &uc->updated[new_cam];
447  	cam = new_port->linked_idx;
448  	enter_new_mode = UCSI_SET_NEW_CAM_ENTER(*cmd);
449  
450  	/*
451  	 * If CAM is UCSI_MULTI_DP_INDEX then this is DP altmode
452  	 * with multiple DP mode. Find out CAM for best pin assignment
453  	 * among all DP mode. Priorite pin E->D->C after making sure
454  	 * the partner supports that pin.
455  	 */
456  	if (cam == UCSI_MULTI_DP_INDEX) {
457  		if (enter_new_mode) {
458  			for (i = 0; con->partner_altmode[i]; i++) {
459  				alt = con->partner_altmode[i];
460  				if (alt->svid == new_port->svid)
461  					break;
462  			}
463  			/*
464  			 * alt will always be non NULL since this is
465  			 * UCSI_SET_NEW_CAM command and so there will be
466  			 * at least one con->partner_altmode[i] with svid
467  			 * matching with new_port->svid.
468  			 */
469  			for (j = 0; port[j].svid; j++) {
470  				pin = DP_CONF_GET_PIN_ASSIGN(port[j].mid);
471  				if (alt && port[j].svid == alt->svid &&
472  				    (pin & DP_CONF_GET_PIN_ASSIGN(alt->vdo))) {
473  					/* prioritize pin E->D->C */
474  					if (k == 0xff || (k != 0xff && pin >
475  					    DP_CONF_GET_PIN_ASSIGN(port[k].mid))
476  					    ) {
477  						k = j;
478  					}
479  				}
480  			}
481  			cam = k;
482  			new_port->active_idx = cam;
483  		} else {
484  			cam = new_port->active_idx;
485  		}
486  	}
487  	*cmd &= ~UCSI_SET_NEW_CAM_AM_MASK;
488  	*cmd |= UCSI_SET_NEW_CAM_SET_AM(cam);
489  }
490  
491  /*
492   * Change the order of vdo values of NVIDIA test device FTB
493   * (Function Test Board) which reports altmode list with vdo=0x3
494   * first and then vdo=0x. Current logic to assign mode value is
495   * based on order in altmode list and it causes a mismatch of CON
496   * and SOP altmodes since NVIDIA GPU connector has order of vdo=0x1
497   * first and then vdo=0x3
498   */
ucsi_ccg_nvidia_altmode(struct ucsi_ccg * uc,struct ucsi_altmode * alt)499  static void ucsi_ccg_nvidia_altmode(struct ucsi_ccg *uc,
500  				    struct ucsi_altmode *alt)
501  {
502  	switch (UCSI_ALTMODE_OFFSET(uc->last_cmd_sent)) {
503  	case NVIDIA_FTB_DP_OFFSET:
504  		if (alt[0].mid == USB_TYPEC_NVIDIA_VLINK_DBG_VDO)
505  			alt[0].mid = USB_TYPEC_NVIDIA_VLINK_DP_VDO |
506  				DP_CAP_DP_SIGNALING | DP_CAP_USB |
507  				DP_CONF_SET_PIN_ASSIGN(BIT(DP_PIN_ASSIGN_E));
508  		break;
509  	case NVIDIA_FTB_DBG_OFFSET:
510  		if (alt[0].mid == USB_TYPEC_NVIDIA_VLINK_DP_VDO)
511  			alt[0].mid = USB_TYPEC_NVIDIA_VLINK_DBG_VDO;
512  		break;
513  	default:
514  		break;
515  	}
516  }
517  
ucsi_ccg_read(struct ucsi * ucsi,unsigned int offset,void * val,size_t val_len)518  static int ucsi_ccg_read(struct ucsi *ucsi, unsigned int offset,
519  			 void *val, size_t val_len)
520  {
521  	struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
522  	u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(offset);
523  	struct ucsi_capability *cap;
524  	struct ucsi_altmode *alt;
525  	int ret;
526  
527  	ret = ccg_read(uc, reg, val, val_len);
528  	if (ret)
529  		return ret;
530  
531  	if (offset != UCSI_MESSAGE_IN)
532  		return ret;
533  
534  	switch (UCSI_COMMAND(uc->last_cmd_sent)) {
535  	case UCSI_GET_CURRENT_CAM:
536  		if (uc->has_multiple_dp)
537  			ucsi_ccg_update_get_current_cam_cmd(uc, (u8 *)val);
538  		break;
539  	case UCSI_GET_ALTERNATE_MODES:
540  		if (UCSI_ALTMODE_RECIPIENT(uc->last_cmd_sent) ==
541  		    UCSI_RECIPIENT_SOP) {
542  			alt = val;
543  			if (alt[0].svid == USB_TYPEC_NVIDIA_VLINK_SID)
544  				ucsi_ccg_nvidia_altmode(uc, alt);
545  		}
546  		break;
547  	case UCSI_GET_CAPABILITY:
548  		if (uc->fw_build == CCG_FW_BUILD_NVIDIA_TEGRA) {
549  			cap = val;
550  			cap->features &= ~UCSI_CAP_ALT_MODE_DETAILS;
551  		}
552  		break;
553  	default:
554  		break;
555  	}
556  	uc->last_cmd_sent = 0;
557  
558  	return ret;
559  }
560  
ucsi_ccg_async_write(struct ucsi * ucsi,unsigned int offset,const void * val,size_t val_len)561  static int ucsi_ccg_async_write(struct ucsi *ucsi, unsigned int offset,
562  				const void *val, size_t val_len)
563  {
564  	u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(offset);
565  
566  	return ccg_write(ucsi_get_drvdata(ucsi), reg, val, val_len);
567  }
568  
ucsi_ccg_sync_write(struct ucsi * ucsi,unsigned int offset,const void * val,size_t val_len)569  static int ucsi_ccg_sync_write(struct ucsi *ucsi, unsigned int offset,
570  			       const void *val, size_t val_len)
571  {
572  	struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
573  	struct ucsi_connector *con;
574  	int con_index;
575  	int ret;
576  
577  	mutex_lock(&uc->lock);
578  	pm_runtime_get_sync(uc->dev);
579  	set_bit(DEV_CMD_PENDING, &uc->flags);
580  
581  	if (offset == UCSI_CONTROL && val_len == sizeof(uc->last_cmd_sent)) {
582  		uc->last_cmd_sent = *(u64 *)val;
583  
584  		if (UCSI_COMMAND(uc->last_cmd_sent) == UCSI_SET_NEW_CAM &&
585  		    uc->has_multiple_dp) {
586  			con_index = (uc->last_cmd_sent >> 16) &
587  				    UCSI_CMD_CONNECTOR_MASK;
588  			con = &uc->ucsi->connector[con_index - 1];
589  			ucsi_ccg_update_set_new_cam_cmd(uc, con, (u64 *)val);
590  		}
591  	}
592  
593  	ret = ucsi_ccg_async_write(ucsi, offset, val, val_len);
594  	if (ret)
595  		goto err_clear_bit;
596  
597  	if (!wait_for_completion_timeout(&uc->complete, msecs_to_jiffies(5000)))
598  		ret = -ETIMEDOUT;
599  
600  err_clear_bit:
601  	clear_bit(DEV_CMD_PENDING, &uc->flags);
602  	pm_runtime_put_sync(uc->dev);
603  	mutex_unlock(&uc->lock);
604  
605  	return ret;
606  }
607  
608  static const struct ucsi_operations ucsi_ccg_ops = {
609  	.read = ucsi_ccg_read,
610  	.sync_write = ucsi_ccg_sync_write,
611  	.async_write = ucsi_ccg_async_write,
612  	.update_altmodes = ucsi_ccg_update_altmodes
613  };
614  
ccg_irq_handler(int irq,void * data)615  static irqreturn_t ccg_irq_handler(int irq, void *data)
616  {
617  	u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(UCSI_CCI);
618  	struct ucsi_ccg *uc = data;
619  	u8 intr_reg;
620  	u32 cci;
621  	int ret;
622  
623  	ret = ccg_read(uc, CCGX_RAB_INTR_REG, &intr_reg, sizeof(intr_reg));
624  	if (ret)
625  		return ret;
626  
627  	ret = ccg_read(uc, reg, (void *)&cci, sizeof(cci));
628  	if (ret)
629  		goto err_clear_irq;
630  
631  	if (UCSI_CCI_CONNECTOR(cci))
632  		ucsi_connector_change(uc->ucsi, UCSI_CCI_CONNECTOR(cci));
633  
634  	if (test_bit(DEV_CMD_PENDING, &uc->flags) &&
635  	    cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))
636  		complete(&uc->complete);
637  
638  err_clear_irq:
639  	ccg_write(uc, CCGX_RAB_INTR_REG, &intr_reg, sizeof(intr_reg));
640  
641  	return IRQ_HANDLED;
642  }
643  
ccg_request_irq(struct ucsi_ccg * uc)644  static int ccg_request_irq(struct ucsi_ccg *uc)
645  {
646  	unsigned long flags = IRQF_ONESHOT;
647  
648  	if (!dev_fwnode(uc->dev))
649  		flags |= IRQF_TRIGGER_HIGH;
650  
651  	return request_threaded_irq(uc->irq, NULL, ccg_irq_handler, flags, dev_name(uc->dev), uc);
652  }
653  
ccg_pm_workaround_work(struct work_struct * pm_work)654  static void ccg_pm_workaround_work(struct work_struct *pm_work)
655  {
656  	ccg_irq_handler(0, container_of(pm_work, struct ucsi_ccg, pm_work));
657  }
658  
get_fw_info(struct ucsi_ccg * uc)659  static int get_fw_info(struct ucsi_ccg *uc)
660  {
661  	int err;
662  
663  	err = ccg_read(uc, CCGX_RAB_READ_ALL_VER, (u8 *)(&uc->version),
664  		       sizeof(uc->version));
665  	if (err < 0)
666  		return err;
667  
668  	uc->fw_version = CCG_VERSION(uc->version[FW2].app.ver) |
669  			CCG_VERSION_PATCH(uc->version[FW2].app.patch);
670  
671  	err = ccg_read(uc, CCGX_RAB_DEVICE_MODE, (u8 *)(&uc->info),
672  		       sizeof(uc->info));
673  	if (err < 0)
674  		return err;
675  
676  	return 0;
677  }
678  
invalid_async_evt(int code)679  static inline bool invalid_async_evt(int code)
680  {
681  	return (code >= CCG_EVENT_MAX) || (code < EVENT_INDEX);
682  }
683  
ccg_process_response(struct ucsi_ccg * uc)684  static void ccg_process_response(struct ucsi_ccg *uc)
685  {
686  	struct device *dev = uc->dev;
687  
688  	if (uc->dev_resp.code & ASYNC_EVENT) {
689  		if (uc->dev_resp.code == RESET_COMPLETE) {
690  			if (test_bit(RESET_PENDING, &uc->flags))
691  				uc->cmd_resp = uc->dev_resp.code;
692  			get_fw_info(uc);
693  		}
694  		if (invalid_async_evt(uc->dev_resp.code))
695  			dev_err(dev, "invalid async evt %d\n",
696  				uc->dev_resp.code);
697  	} else {
698  		if (test_bit(DEV_CMD_PENDING, &uc->flags)) {
699  			uc->cmd_resp = uc->dev_resp.code;
700  			clear_bit(DEV_CMD_PENDING, &uc->flags);
701  		} else {
702  			dev_err(dev, "dev resp 0x%04x but no cmd pending\n",
703  				uc->dev_resp.code);
704  		}
705  	}
706  }
707  
ccg_read_response(struct ucsi_ccg * uc)708  static int ccg_read_response(struct ucsi_ccg *uc)
709  {
710  	unsigned long target = jiffies + msecs_to_jiffies(1000);
711  	struct device *dev = uc->dev;
712  	u8 intval;
713  	int status;
714  
715  	/* wait for interrupt status to get updated */
716  	do {
717  		status = ccg_read(uc, CCGX_RAB_INTR_REG, &intval,
718  				  sizeof(intval));
719  		if (status < 0)
720  			return status;
721  
722  		if (intval & DEV_INT)
723  			break;
724  		usleep_range(500, 600);
725  	} while (time_is_after_jiffies(target));
726  
727  	if (time_is_before_jiffies(target)) {
728  		dev_err(dev, "response timeout error\n");
729  		return -ETIME;
730  	}
731  
732  	status = ccg_read(uc, CCGX_RAB_RESPONSE, (u8 *)&uc->dev_resp,
733  			  sizeof(uc->dev_resp));
734  	if (status < 0)
735  		return status;
736  
737  	status = ccg_write(uc, CCGX_RAB_INTR_REG, &intval, sizeof(intval));
738  	if (status < 0)
739  		return status;
740  
741  	return 0;
742  }
743  
744  /* Caller must hold uc->lock */
ccg_send_command(struct ucsi_ccg * uc,struct ccg_cmd * cmd)745  static int ccg_send_command(struct ucsi_ccg *uc, struct ccg_cmd *cmd)
746  {
747  	struct device *dev = uc->dev;
748  	int ret;
749  
750  	switch (cmd->reg & 0xF000) {
751  	case DEV_REG_IDX:
752  		set_bit(DEV_CMD_PENDING, &uc->flags);
753  		break;
754  	default:
755  		dev_err(dev, "invalid cmd register\n");
756  		break;
757  	}
758  
759  	ret = ccg_write(uc, cmd->reg, (u8 *)&cmd->data, cmd->len);
760  	if (ret < 0)
761  		return ret;
762  
763  	msleep(cmd->delay);
764  
765  	ret = ccg_read_response(uc);
766  	if (ret < 0) {
767  		dev_err(dev, "response read error\n");
768  		switch (cmd->reg & 0xF000) {
769  		case DEV_REG_IDX:
770  			clear_bit(DEV_CMD_PENDING, &uc->flags);
771  			break;
772  		default:
773  			dev_err(dev, "invalid cmd register\n");
774  			break;
775  		}
776  		return -EIO;
777  	}
778  	ccg_process_response(uc);
779  
780  	return uc->cmd_resp;
781  }
782  
ccg_cmd_enter_flashing(struct ucsi_ccg * uc)783  static int ccg_cmd_enter_flashing(struct ucsi_ccg *uc)
784  {
785  	struct ccg_cmd cmd;
786  	int ret;
787  
788  	cmd.reg = CCGX_RAB_ENTER_FLASHING;
789  	cmd.data = FLASH_ENTER_SIG;
790  	cmd.len = 1;
791  	cmd.delay = 50;
792  
793  	mutex_lock(&uc->lock);
794  
795  	ret = ccg_send_command(uc, &cmd);
796  
797  	mutex_unlock(&uc->lock);
798  
799  	if (ret != CMD_SUCCESS) {
800  		dev_err(uc->dev, "enter flashing failed ret=%d\n", ret);
801  		return ret;
802  	}
803  
804  	return 0;
805  }
806  
ccg_cmd_reset(struct ucsi_ccg * uc)807  static int ccg_cmd_reset(struct ucsi_ccg *uc)
808  {
809  	struct ccg_cmd cmd;
810  	u8 *p;
811  	int ret;
812  
813  	p = (u8 *)&cmd.data;
814  	cmd.reg = CCGX_RAB_RESET_REQ;
815  	p[0] = RESET_SIG;
816  	p[1] = CMD_RESET_DEV;
817  	cmd.len = 2;
818  	cmd.delay = 5000;
819  
820  	mutex_lock(&uc->lock);
821  
822  	set_bit(RESET_PENDING, &uc->flags);
823  
824  	ret = ccg_send_command(uc, &cmd);
825  	if (ret != RESET_COMPLETE)
826  		goto err_clear_flag;
827  
828  	ret = 0;
829  
830  err_clear_flag:
831  	clear_bit(RESET_PENDING, &uc->flags);
832  
833  	mutex_unlock(&uc->lock);
834  
835  	return ret;
836  }
837  
ccg_cmd_port_control(struct ucsi_ccg * uc,bool enable)838  static int ccg_cmd_port_control(struct ucsi_ccg *uc, bool enable)
839  {
840  	struct ccg_cmd cmd;
841  	int ret;
842  
843  	cmd.reg = CCGX_RAB_PDPORT_ENABLE;
844  	if (enable)
845  		cmd.data = (uc->port_num == 1) ?
846  			    PDPORT_1 : (PDPORT_1 | PDPORT_2);
847  	else
848  		cmd.data = 0x0;
849  	cmd.len = 1;
850  	cmd.delay = 10;
851  
852  	mutex_lock(&uc->lock);
853  
854  	ret = ccg_send_command(uc, &cmd);
855  
856  	mutex_unlock(&uc->lock);
857  
858  	if (ret != CMD_SUCCESS) {
859  		dev_err(uc->dev, "port control failed ret=%d\n", ret);
860  		return ret;
861  	}
862  	return 0;
863  }
864  
ccg_cmd_jump_boot_mode(struct ucsi_ccg * uc,int bl_mode)865  static int ccg_cmd_jump_boot_mode(struct ucsi_ccg *uc, int bl_mode)
866  {
867  	struct ccg_cmd cmd;
868  	int ret;
869  
870  	cmd.reg = CCGX_RAB_JUMP_TO_BOOT;
871  
872  	if (bl_mode)
873  		cmd.data = TO_BOOT;
874  	else
875  		cmd.data = TO_ALT_FW;
876  
877  	cmd.len = 1;
878  	cmd.delay = 100;
879  
880  	mutex_lock(&uc->lock);
881  
882  	set_bit(RESET_PENDING, &uc->flags);
883  
884  	ret = ccg_send_command(uc, &cmd);
885  	if (ret != RESET_COMPLETE)
886  		goto err_clear_flag;
887  
888  	ret = 0;
889  
890  err_clear_flag:
891  	clear_bit(RESET_PENDING, &uc->flags);
892  
893  	mutex_unlock(&uc->lock);
894  
895  	return ret;
896  }
897  
898  static int
ccg_cmd_write_flash_row(struct ucsi_ccg * uc,u16 row,const void * data,u8 fcmd)899  ccg_cmd_write_flash_row(struct ucsi_ccg *uc, u16 row,
900  			const void *data, u8 fcmd)
901  {
902  	struct i2c_client *client = uc->client;
903  	struct ccg_cmd cmd;
904  	u8 buf[CCG4_ROW_SIZE + 2];
905  	u8 *p;
906  	int ret;
907  
908  	/* Copy the data into the flash read/write memory. */
909  	put_unaligned_le16(REG_FLASH_RW_MEM, buf);
910  
911  	memcpy(buf + 2, data, CCG4_ROW_SIZE);
912  
913  	mutex_lock(&uc->lock);
914  
915  	ret = i2c_master_send(client, buf, CCG4_ROW_SIZE + 2);
916  	if (ret != CCG4_ROW_SIZE + 2) {
917  		dev_err(uc->dev, "REG_FLASH_RW_MEM write fail %d\n", ret);
918  		mutex_unlock(&uc->lock);
919  		return ret < 0 ? ret : -EIO;
920  	}
921  
922  	/* Use the FLASH_ROW_READ_WRITE register to trigger */
923  	/* writing of data to the desired flash row */
924  	p = (u8 *)&cmd.data;
925  	cmd.reg = CCGX_RAB_FLASH_ROW_RW;
926  	p[0] = FLASH_SIG;
927  	p[1] = fcmd;
928  	put_unaligned_le16(row, &p[2]);
929  	cmd.len = 4;
930  	cmd.delay = 50;
931  	if (fcmd == FLASH_FWCT_SIG_WR_CMD)
932  		cmd.delay += 400;
933  	if (row == 510)
934  		cmd.delay += 220;
935  	ret = ccg_send_command(uc, &cmd);
936  
937  	mutex_unlock(&uc->lock);
938  
939  	if (ret != CMD_SUCCESS) {
940  		dev_err(uc->dev, "write flash row failed ret=%d\n", ret);
941  		return ret;
942  	}
943  
944  	return 0;
945  }
946  
ccg_cmd_validate_fw(struct ucsi_ccg * uc,unsigned int fwid)947  static int ccg_cmd_validate_fw(struct ucsi_ccg *uc, unsigned int fwid)
948  {
949  	struct ccg_cmd cmd;
950  	int ret;
951  
952  	cmd.reg = CCGX_RAB_VALIDATE_FW;
953  	cmd.data = fwid;
954  	cmd.len = 1;
955  	cmd.delay = 500;
956  
957  	mutex_lock(&uc->lock);
958  
959  	ret = ccg_send_command(uc, &cmd);
960  
961  	mutex_unlock(&uc->lock);
962  
963  	if (ret != CMD_SUCCESS)
964  		return ret;
965  
966  	return 0;
967  }
968  
ccg_check_vendor_version(struct ucsi_ccg * uc,struct version_format * app,struct fw_config_table * fw_cfg)969  static bool ccg_check_vendor_version(struct ucsi_ccg *uc,
970  				     struct version_format *app,
971  				     struct fw_config_table *fw_cfg)
972  {
973  	struct device *dev = uc->dev;
974  
975  	/* Check if the fw build is for supported vendors */
976  	if (le16_to_cpu(app->build) != uc->fw_build) {
977  		dev_info(dev, "current fw is not from supported vendor\n");
978  		return false;
979  	}
980  
981  	/* Check if the new fw build is for supported vendors */
982  	if (le16_to_cpu(fw_cfg->app.build) != uc->fw_build) {
983  		dev_info(dev, "new fw is not from supported vendor\n");
984  		return false;
985  	}
986  	return true;
987  }
988  
ccg_check_fw_version(struct ucsi_ccg * uc,const char * fw_name,struct version_format * app)989  static bool ccg_check_fw_version(struct ucsi_ccg *uc, const char *fw_name,
990  				 struct version_format *app)
991  {
992  	const struct firmware *fw = NULL;
993  	struct device *dev = uc->dev;
994  	struct fw_config_table fw_cfg;
995  	u32 cur_version, new_version;
996  	bool is_later = false;
997  
998  	if (request_firmware(&fw, fw_name, dev) != 0) {
999  		dev_err(dev, "error: Failed to open cyacd file %s\n", fw_name);
1000  		return false;
1001  	}
1002  
1003  	/*
1004  	 * check if signed fw
1005  	 * last part of fw image is fw cfg table and signature
1006  	 */
1007  	if (fw->size < sizeof(fw_cfg) + FW_CFG_TABLE_SIG_SIZE)
1008  		goto out_release_firmware;
1009  
1010  	memcpy((uint8_t *)&fw_cfg, fw->data + fw->size -
1011  	       sizeof(fw_cfg) - FW_CFG_TABLE_SIG_SIZE, sizeof(fw_cfg));
1012  
1013  	if (fw_cfg.identity != ('F' | 'W' << 8 | 'C' << 16 | 'T' << 24)) {
1014  		dev_info(dev, "not a signed image\n");
1015  		goto out_release_firmware;
1016  	}
1017  
1018  	/* compare input version with FWCT version */
1019  	cur_version = le16_to_cpu(app->build) | CCG_VERSION_PATCH(app->patch) |
1020  			CCG_VERSION(app->ver);
1021  
1022  	new_version = le16_to_cpu(fw_cfg.app.build) |
1023  			CCG_VERSION_PATCH(fw_cfg.app.patch) |
1024  			CCG_VERSION(fw_cfg.app.ver);
1025  
1026  	if (!ccg_check_vendor_version(uc, app, &fw_cfg))
1027  		goto out_release_firmware;
1028  
1029  	if (new_version > cur_version)
1030  		is_later = true;
1031  
1032  out_release_firmware:
1033  	release_firmware(fw);
1034  	return is_later;
1035  }
1036  
ccg_fw_update_needed(struct ucsi_ccg * uc,enum enum_flash_mode * mode)1037  static int ccg_fw_update_needed(struct ucsi_ccg *uc,
1038  				enum enum_flash_mode *mode)
1039  {
1040  	struct device *dev = uc->dev;
1041  	int err;
1042  	struct version_info version[3];
1043  
1044  	err = ccg_read(uc, CCGX_RAB_DEVICE_MODE, (u8 *)(&uc->info),
1045  		       sizeof(uc->info));
1046  	if (err) {
1047  		dev_err(dev, "read device mode failed\n");
1048  		return err;
1049  	}
1050  
1051  	err = ccg_read(uc, CCGX_RAB_READ_ALL_VER, (u8 *)version,
1052  		       sizeof(version));
1053  	if (err) {
1054  		dev_err(dev, "read device mode failed\n");
1055  		return err;
1056  	}
1057  
1058  	if (memcmp(&version[FW1], "\0\0\0\0\0\0\0\0",
1059  		   sizeof(struct version_info)) == 0) {
1060  		dev_info(dev, "secondary fw is not flashed\n");
1061  		*mode = SECONDARY_BL;
1062  	} else if (le16_to_cpu(version[FW1].base.build) <
1063  		secondary_fw_min_ver) {
1064  		dev_info(dev, "secondary fw version is too low (< %d)\n",
1065  			 secondary_fw_min_ver);
1066  		*mode = SECONDARY;
1067  	} else if (memcmp(&version[FW2], "\0\0\0\0\0\0\0\0",
1068  		   sizeof(struct version_info)) == 0) {
1069  		dev_info(dev, "primary fw is not flashed\n");
1070  		*mode = PRIMARY;
1071  	} else if (ccg_check_fw_version(uc, ccg_fw_names[PRIMARY],
1072  		   &version[FW2].app)) {
1073  		dev_info(dev, "found primary fw with later version\n");
1074  		*mode = PRIMARY;
1075  	} else {
1076  		dev_info(dev, "secondary and primary fw are the latest\n");
1077  		*mode = FLASH_NOT_NEEDED;
1078  	}
1079  	return 0;
1080  }
1081  
do_flash(struct ucsi_ccg * uc,enum enum_flash_mode mode)1082  static int do_flash(struct ucsi_ccg *uc, enum enum_flash_mode mode)
1083  {
1084  	struct device *dev = uc->dev;
1085  	const struct firmware *fw = NULL;
1086  	const char *p, *s;
1087  	const char *eof;
1088  	int err, row, len, line_sz, line_cnt = 0;
1089  	unsigned long start_time = jiffies;
1090  	struct fw_config_table  fw_cfg;
1091  	u8 fw_cfg_sig[FW_CFG_TABLE_SIG_SIZE];
1092  	u8 *wr_buf;
1093  
1094  	err = request_firmware(&fw, ccg_fw_names[mode], dev);
1095  	if (err) {
1096  		dev_err(dev, "request %s failed err=%d\n",
1097  			ccg_fw_names[mode], err);
1098  		return err;
1099  	}
1100  
1101  	if (((uc->info.mode & CCG_DEVINFO_FWMODE_MASK) >>
1102  			CCG_DEVINFO_FWMODE_SHIFT) == FW2) {
1103  		err = ccg_cmd_port_control(uc, false);
1104  		if (err < 0)
1105  			goto release_fw;
1106  		err = ccg_cmd_jump_boot_mode(uc, 0);
1107  		if (err < 0)
1108  			goto release_fw;
1109  	}
1110  
1111  	eof = fw->data + fw->size;
1112  
1113  	/*
1114  	 * check if signed fw
1115  	 * last part of fw image is fw cfg table and signature
1116  	 */
1117  	if (fw->size < sizeof(fw_cfg) + sizeof(fw_cfg_sig))
1118  		goto not_signed_fw;
1119  
1120  	memcpy((uint8_t *)&fw_cfg, fw->data + fw->size -
1121  	       sizeof(fw_cfg) - sizeof(fw_cfg_sig), sizeof(fw_cfg));
1122  
1123  	if (fw_cfg.identity != ('F' | ('W' << 8) | ('C' << 16) | ('T' << 24))) {
1124  		dev_info(dev, "not a signed image\n");
1125  		goto not_signed_fw;
1126  	}
1127  	eof = fw->data + fw->size - sizeof(fw_cfg) - sizeof(fw_cfg_sig);
1128  
1129  	memcpy((uint8_t *)&fw_cfg_sig,
1130  	       fw->data + fw->size - sizeof(fw_cfg_sig), sizeof(fw_cfg_sig));
1131  
1132  	/* flash fw config table and signature first */
1133  	err = ccg_cmd_write_flash_row(uc, 0, (u8 *)&fw_cfg,
1134  				      FLASH_FWCT1_WR_CMD);
1135  	if (err)
1136  		goto release_fw;
1137  
1138  	err = ccg_cmd_write_flash_row(uc, 0, (u8 *)&fw_cfg + CCG4_ROW_SIZE,
1139  				      FLASH_FWCT2_WR_CMD);
1140  	if (err)
1141  		goto release_fw;
1142  
1143  	err = ccg_cmd_write_flash_row(uc, 0, &fw_cfg_sig,
1144  				      FLASH_FWCT_SIG_WR_CMD);
1145  	if (err)
1146  		goto release_fw;
1147  
1148  not_signed_fw:
1149  	wr_buf = kzalloc(CCG4_ROW_SIZE + 4, GFP_KERNEL);
1150  	if (!wr_buf) {
1151  		err = -ENOMEM;
1152  		goto release_fw;
1153  	}
1154  
1155  	err = ccg_cmd_enter_flashing(uc);
1156  	if (err)
1157  		goto release_mem;
1158  
1159  	/*****************************************************************
1160  	 * CCG firmware image (.cyacd) file line format
1161  	 *
1162  	 * :00rrrrllll[dd....]cc/r/n
1163  	 *
1164  	 * :00   header
1165  	 * rrrr is row number to flash				(4 char)
1166  	 * llll is data len to flash				(4 char)
1167  	 * dd   is a data field represents one byte of data	(512 char)
1168  	 * cc   is checksum					(2 char)
1169  	 * \r\n newline
1170  	 *
1171  	 * Total length: 3 + 4 + 4 + 512 + 2 + 2 = 527
1172  	 *
1173  	 *****************************************************************/
1174  
1175  	p = strnchr(fw->data, fw->size, ':');
1176  	while (p < eof) {
1177  		s = strnchr(p + 1, eof - p - 1, ':');
1178  
1179  		if (!s)
1180  			s = eof;
1181  
1182  		line_sz = s - p;
1183  
1184  		if (line_sz != CYACD_LINE_SIZE) {
1185  			dev_err(dev, "Bad FW format line_sz=%d\n", line_sz);
1186  			err =  -EINVAL;
1187  			goto release_mem;
1188  		}
1189  
1190  		if (hex2bin(wr_buf, p + 3, CCG4_ROW_SIZE + 4)) {
1191  			err =  -EINVAL;
1192  			goto release_mem;
1193  		}
1194  
1195  		row = get_unaligned_be16(wr_buf);
1196  		len = get_unaligned_be16(&wr_buf[2]);
1197  
1198  		if (len != CCG4_ROW_SIZE) {
1199  			err =  -EINVAL;
1200  			goto release_mem;
1201  		}
1202  
1203  		err = ccg_cmd_write_flash_row(uc, row, wr_buf + 4,
1204  					      FLASH_WR_CMD);
1205  		if (err)
1206  			goto release_mem;
1207  
1208  		line_cnt++;
1209  		p = s;
1210  	}
1211  
1212  	dev_info(dev, "total %d row flashed. time: %dms\n",
1213  		 line_cnt, jiffies_to_msecs(jiffies - start_time));
1214  
1215  	err = ccg_cmd_validate_fw(uc, (mode == PRIMARY) ? FW2 :  FW1);
1216  	if (err)
1217  		dev_err(dev, "%s validation failed err=%d\n",
1218  			(mode == PRIMARY) ? "FW2" :  "FW1", err);
1219  	else
1220  		dev_info(dev, "%s validated\n",
1221  			 (mode == PRIMARY) ? "FW2" :  "FW1");
1222  
1223  	err = ccg_cmd_port_control(uc, false);
1224  	if (err < 0)
1225  		goto release_mem;
1226  
1227  	err = ccg_cmd_reset(uc);
1228  	if (err < 0)
1229  		goto release_mem;
1230  
1231  	err = ccg_cmd_port_control(uc, true);
1232  	if (err < 0)
1233  		goto release_mem;
1234  
1235  release_mem:
1236  	kfree(wr_buf);
1237  
1238  release_fw:
1239  	release_firmware(fw);
1240  	return err;
1241  }
1242  
1243  /*******************************************************************************
1244   * CCG4 has two copies of the firmware in addition to the bootloader.
1245   * If the device is running FW1, FW2 can be updated with the new version.
1246   * Dual firmware mode allows the CCG device to stay in a PD contract and support
1247   * USB PD and Type-C functionality while a firmware update is in progress.
1248   ******************************************************************************/
ccg_fw_update(struct ucsi_ccg * uc,enum enum_flash_mode flash_mode)1249  static int ccg_fw_update(struct ucsi_ccg *uc, enum enum_flash_mode flash_mode)
1250  {
1251  	int err = 0;
1252  
1253  	while (flash_mode != FLASH_NOT_NEEDED) {
1254  		err = do_flash(uc, flash_mode);
1255  		if (err < 0)
1256  			return err;
1257  		err = ccg_fw_update_needed(uc, &flash_mode);
1258  		if (err < 0)
1259  			return err;
1260  	}
1261  	dev_info(uc->dev, "CCG FW update successful\n");
1262  
1263  	return err;
1264  }
1265  
ccg_restart(struct ucsi_ccg * uc)1266  static int ccg_restart(struct ucsi_ccg *uc)
1267  {
1268  	struct device *dev = uc->dev;
1269  	int status;
1270  
1271  	status = ucsi_ccg_init(uc);
1272  	if (status < 0) {
1273  		dev_err(dev, "ucsi_ccg_start fail, err=%d\n", status);
1274  		return status;
1275  	}
1276  
1277  	status = ccg_request_irq(uc);
1278  	if (status < 0) {
1279  		dev_err(dev, "request_threaded_irq failed - %d\n", status);
1280  		return status;
1281  	}
1282  
1283  	status = ucsi_register(uc->ucsi);
1284  	if (status) {
1285  		dev_err(uc->dev, "failed to register the interface\n");
1286  		return status;
1287  	}
1288  
1289  	pm_runtime_enable(uc->dev);
1290  	return 0;
1291  }
1292  
ccg_update_firmware(struct work_struct * work)1293  static void ccg_update_firmware(struct work_struct *work)
1294  {
1295  	struct ucsi_ccg *uc = container_of(work, struct ucsi_ccg, work);
1296  	enum enum_flash_mode flash_mode;
1297  	int status;
1298  
1299  	status = ccg_fw_update_needed(uc, &flash_mode);
1300  	if (status < 0)
1301  		return;
1302  
1303  	if (flash_mode != FLASH_NOT_NEEDED) {
1304  		ucsi_unregister(uc->ucsi);
1305  		pm_runtime_disable(uc->dev);
1306  		free_irq(uc->irq, uc);
1307  
1308  		ccg_fw_update(uc, flash_mode);
1309  		ccg_restart(uc);
1310  	}
1311  }
1312  
do_flash_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)1313  static ssize_t do_flash_store(struct device *dev,
1314  			      struct device_attribute *attr,
1315  			      const char *buf, size_t n)
1316  {
1317  	struct ucsi_ccg *uc = i2c_get_clientdata(to_i2c_client(dev));
1318  	bool flash;
1319  
1320  	if (kstrtobool(buf, &flash))
1321  		return -EINVAL;
1322  
1323  	if (!flash)
1324  		return n;
1325  
1326  	if (uc->fw_build == 0x0) {
1327  		dev_err(dev, "fail to flash FW due to missing FW build info\n");
1328  		return -EINVAL;
1329  	}
1330  
1331  	schedule_work(&uc->work);
1332  	return n;
1333  }
1334  
1335  static DEVICE_ATTR_WO(do_flash);
1336  
1337  static struct attribute *ucsi_ccg_attrs[] = {
1338  	&dev_attr_do_flash.attr,
1339  	NULL,
1340  };
1341  ATTRIBUTE_GROUPS(ucsi_ccg);
1342  
ucsi_ccg_probe(struct i2c_client * client)1343  static int ucsi_ccg_probe(struct i2c_client *client)
1344  {
1345  	struct device *dev = &client->dev;
1346  	struct ucsi_ccg *uc;
1347  	const char *fw_name;
1348  	int status;
1349  
1350  	uc = devm_kzalloc(dev, sizeof(*uc), GFP_KERNEL);
1351  	if (!uc)
1352  		return -ENOMEM;
1353  
1354  	uc->dev = dev;
1355  	uc->client = client;
1356  	uc->irq = client->irq;
1357  	mutex_init(&uc->lock);
1358  	init_completion(&uc->complete);
1359  	INIT_WORK(&uc->work, ccg_update_firmware);
1360  	INIT_WORK(&uc->pm_work, ccg_pm_workaround_work);
1361  
1362  	/* Only fail FW flashing when FW build information is not provided */
1363  	status = device_property_read_string(dev, "firmware-name", &fw_name);
1364  	if (!status) {
1365  		if (!strcmp(fw_name, "nvidia,jetson-agx-xavier"))
1366  			uc->fw_build = CCG_FW_BUILD_NVIDIA_TEGRA;
1367  		else if (!strcmp(fw_name, "nvidia,gpu"))
1368  			uc->fw_build = CCG_FW_BUILD_NVIDIA;
1369  	}
1370  
1371  	if (!uc->fw_build)
1372  		dev_err(uc->dev, "failed to get FW build information\n");
1373  
1374  	/* reset ccg device and initialize ucsi */
1375  	status = ucsi_ccg_init(uc);
1376  	if (status < 0) {
1377  		dev_err(uc->dev, "ucsi_ccg_init failed - %d\n", status);
1378  		return status;
1379  	}
1380  
1381  	status = get_fw_info(uc);
1382  	if (status < 0) {
1383  		dev_err(uc->dev, "get_fw_info failed - %d\n", status);
1384  		return status;
1385  	}
1386  
1387  	uc->port_num = 1;
1388  
1389  	if (uc->info.mode & CCG_DEVINFO_PDPORTS_MASK)
1390  		uc->port_num++;
1391  
1392  	uc->ucsi = ucsi_create(dev, &ucsi_ccg_ops);
1393  	if (IS_ERR(uc->ucsi))
1394  		return PTR_ERR(uc->ucsi);
1395  
1396  	ucsi_set_drvdata(uc->ucsi, uc);
1397  
1398  	status = ccg_request_irq(uc);
1399  	if (status < 0) {
1400  		dev_err(uc->dev, "request_threaded_irq failed - %d\n", status);
1401  		goto out_ucsi_destroy;
1402  	}
1403  
1404  	status = ucsi_register(uc->ucsi);
1405  	if (status)
1406  		goto out_free_irq;
1407  
1408  	i2c_set_clientdata(client, uc);
1409  
1410  	pm_runtime_set_active(uc->dev);
1411  	pm_runtime_enable(uc->dev);
1412  	pm_runtime_use_autosuspend(uc->dev);
1413  	pm_runtime_set_autosuspend_delay(uc->dev, 5000);
1414  	pm_runtime_idle(uc->dev);
1415  
1416  	return 0;
1417  
1418  out_free_irq:
1419  	free_irq(uc->irq, uc);
1420  out_ucsi_destroy:
1421  	ucsi_destroy(uc->ucsi);
1422  
1423  	return status;
1424  }
1425  
ucsi_ccg_remove(struct i2c_client * client)1426  static void ucsi_ccg_remove(struct i2c_client *client)
1427  {
1428  	struct ucsi_ccg *uc = i2c_get_clientdata(client);
1429  
1430  	cancel_work_sync(&uc->pm_work);
1431  	cancel_work_sync(&uc->work);
1432  	pm_runtime_disable(uc->dev);
1433  	ucsi_unregister(uc->ucsi);
1434  	ucsi_destroy(uc->ucsi);
1435  	free_irq(uc->irq, uc);
1436  }
1437  
1438  static const struct of_device_id ucsi_ccg_of_match_table[] = {
1439  		{ .compatible = "cypress,cypd4226", },
1440  		{ /* sentinel */ }
1441  };
1442  MODULE_DEVICE_TABLE(of, ucsi_ccg_of_match_table);
1443  
1444  static const struct i2c_device_id ucsi_ccg_device_id[] = {
1445  	{"ccgx-ucsi", 0},
1446  	{}
1447  };
1448  MODULE_DEVICE_TABLE(i2c, ucsi_ccg_device_id);
1449  
1450  static const struct acpi_device_id amd_i2c_ucsi_match[] = {
1451  	{"AMDI0042"},
1452  	{}
1453  };
1454  MODULE_DEVICE_TABLE(acpi, amd_i2c_ucsi_match);
1455  
ucsi_ccg_resume(struct device * dev)1456  static int ucsi_ccg_resume(struct device *dev)
1457  {
1458  	struct i2c_client *client = to_i2c_client(dev);
1459  	struct ucsi_ccg *uc = i2c_get_clientdata(client);
1460  
1461  	return ucsi_resume(uc->ucsi);
1462  }
1463  
ucsi_ccg_runtime_suspend(struct device * dev)1464  static int ucsi_ccg_runtime_suspend(struct device *dev)
1465  {
1466  	return 0;
1467  }
1468  
ucsi_ccg_runtime_resume(struct device * dev)1469  static int ucsi_ccg_runtime_resume(struct device *dev)
1470  {
1471  	struct i2c_client *client = to_i2c_client(dev);
1472  	struct ucsi_ccg *uc = i2c_get_clientdata(client);
1473  
1474  	/*
1475  	 * Firmware version 3.1.10 or earlier, built for NVIDIA has known issue
1476  	 * of missing interrupt when a device is connected for runtime resume.
1477  	 * Schedule a work to call ISR as a workaround.
1478  	 */
1479  	if (uc->fw_build == CCG_FW_BUILD_NVIDIA &&
1480  	    uc->fw_version <= CCG_OLD_FW_VERSION)
1481  		schedule_work(&uc->pm_work);
1482  
1483  	return 0;
1484  }
1485  
1486  static const struct dev_pm_ops ucsi_ccg_pm = {
1487  	.resume = ucsi_ccg_resume,
1488  	.runtime_suspend = ucsi_ccg_runtime_suspend,
1489  	.runtime_resume = ucsi_ccg_runtime_resume,
1490  };
1491  
1492  static struct i2c_driver ucsi_ccg_driver = {
1493  	.driver = {
1494  		.name = "ucsi_ccg",
1495  		.pm = &ucsi_ccg_pm,
1496  		.dev_groups = ucsi_ccg_groups,
1497  		.acpi_match_table = amd_i2c_ucsi_match,
1498  		.of_match_table = ucsi_ccg_of_match_table,
1499  	},
1500  	.probe = ucsi_ccg_probe,
1501  	.remove = ucsi_ccg_remove,
1502  	.id_table = ucsi_ccg_device_id,
1503  };
1504  
1505  module_i2c_driver(ucsi_ccg_driver);
1506  
1507  MODULE_AUTHOR("Ajay Gupta <ajayg@nvidia.com>");
1508  MODULE_DESCRIPTION("UCSI driver for Cypress CCGx Type-C controller");
1509  MODULE_LICENSE("GPL v2");
1510