1 // SPDX-License-Identifier: GPL-2.0-only
2 //
3 // rt712-sdca-sdw.c -- rt712 SDCA ALSA SoC audio driver
4 //
5 // Copyright(c) 2023 Realtek Semiconductor Corp.
6 //
7 //
8
9 #include <linux/delay.h>
10 #include <linux/device.h>
11 #include <linux/mod_devicetable.h>
12 #include <linux/module.h>
13 #include <linux/pm_runtime.h>
14 #include <linux/soundwire/sdw_registers.h>
15 #include "rt712-sdca.h"
16 #include "rt712-sdca-sdw.h"
17
rt712_sdca_readable_register(struct device * dev,unsigned int reg)18 static bool rt712_sdca_readable_register(struct device *dev, unsigned int reg)
19 {
20 switch (reg) {
21 case 0x201a ... 0x201f:
22 case 0x2029 ... 0x202a:
23 case 0x202d ... 0x2034:
24 case 0x2230 ... 0x2232:
25 case 0x2f01 ... 0x2f0a:
26 case 0x2f35 ... 0x2f36:
27 case 0x2f50:
28 case 0x2f54:
29 case 0x2f58 ... 0x2f5d:
30 case 0x3201:
31 case 0x320c:
32 case 0x3301 ... 0x3303:
33 case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT712_SDCA_ENT_GE49, RT712_SDCA_CTL_SELECTED_MODE, 0):
34 case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT712_SDCA_ENT_GE49, RT712_SDCA_CTL_DETECTED_MODE, 0):
35 case SDW_SDCA_CTL(FUNC_NUM_HID, RT712_SDCA_ENT_HID01, RT712_SDCA_CTL_HIDTX_CURRENT_OWNER, 0) ...
36 SDW_SDCA_CTL(FUNC_NUM_HID, RT712_SDCA_ENT_HID01, RT712_SDCA_CTL_HIDTX_MESSAGE_LENGTH, 0):
37 case RT712_BUF_ADDR_HID1 ... RT712_BUF_ADDR_HID2:
38 return true;
39 default:
40 return false;
41 }
42 }
43
rt712_sdca_volatile_register(struct device * dev,unsigned int reg)44 static bool rt712_sdca_volatile_register(struct device *dev, unsigned int reg)
45 {
46 switch (reg) {
47 case 0x201b:
48 case 0x201c:
49 case 0x201d:
50 case 0x201f:
51 case 0x202d ... 0x202f:
52 case 0x2230:
53 case 0x2f01:
54 case 0x2f35:
55 case 0x320c:
56 case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT712_SDCA_ENT_GE49, RT712_SDCA_CTL_DETECTED_MODE, 0):
57 case SDW_SDCA_CTL(FUNC_NUM_HID, RT712_SDCA_ENT_HID01, RT712_SDCA_CTL_HIDTX_CURRENT_OWNER, 0) ...
58 SDW_SDCA_CTL(FUNC_NUM_HID, RT712_SDCA_ENT_HID01, RT712_SDCA_CTL_HIDTX_MESSAGE_LENGTH, 0):
59 case RT712_BUF_ADDR_HID1 ... RT712_BUF_ADDR_HID2:
60 return true;
61 default:
62 return false;
63 }
64 }
65
rt712_sdca_mbq_readable_register(struct device * dev,unsigned int reg)66 static bool rt712_sdca_mbq_readable_register(struct device *dev, unsigned int reg)
67 {
68 switch (reg) {
69 case 0x2000000 ... 0x200008e:
70 case 0x5300000 ... 0x530000e:
71 case 0x5400000 ... 0x540000e:
72 case 0x5600000 ... 0x5600008:
73 case 0x5700000 ... 0x570000d:
74 case 0x5800000 ... 0x5800021:
75 case 0x5900000 ... 0x5900028:
76 case 0x5a00000 ... 0x5a00009:
77 case 0x5b00000 ... 0x5b00051:
78 case 0x5c00000 ... 0x5c0009a:
79 case 0x5d00000 ... 0x5d00009:
80 case 0x5f00000 ... 0x5f00030:
81 case 0x6100000 ... 0x6100068:
82 case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT712_SDCA_ENT_USER_FU05, RT712_SDCA_CTL_FU_VOLUME, CH_L):
83 case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT712_SDCA_ENT_USER_FU05, RT712_SDCA_CTL_FU_VOLUME, CH_R):
84 case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT712_SDCA_ENT_USER_FU0F, RT712_SDCA_CTL_FU_VOLUME, CH_L):
85 case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT712_SDCA_ENT_USER_FU0F, RT712_SDCA_CTL_FU_VOLUME, CH_R):
86 case SDW_SDCA_CTL(FUNC_NUM_AMP, RT712_SDCA_ENT_USER_FU06, RT712_SDCA_CTL_FU_VOLUME, CH_L):
87 case SDW_SDCA_CTL(FUNC_NUM_AMP, RT712_SDCA_ENT_USER_FU06, RT712_SDCA_CTL_FU_VOLUME, CH_R):
88 return true;
89 default:
90 return false;
91 }
92 }
93
rt712_sdca_mbq_volatile_register(struct device * dev,unsigned int reg)94 static bool rt712_sdca_mbq_volatile_register(struct device *dev, unsigned int reg)
95 {
96 switch (reg) {
97 case 0x2000000:
98 case 0x200001a:
99 case 0x2000024:
100 case 0x2000046:
101 case 0x200008a:
102 case 0x5800000:
103 case 0x5800001:
104 case 0x6100008:
105 return true;
106 default:
107 return false;
108 }
109 }
110
111 static const struct regmap_config rt712_sdca_regmap = {
112 .reg_bits = 32,
113 .val_bits = 8,
114 .readable_reg = rt712_sdca_readable_register,
115 .volatile_reg = rt712_sdca_volatile_register,
116 .max_register = 0x44ffffff,
117 .reg_defaults = rt712_sdca_reg_defaults,
118 .num_reg_defaults = ARRAY_SIZE(rt712_sdca_reg_defaults),
119 .cache_type = REGCACHE_MAPLE,
120 .use_single_read = true,
121 .use_single_write = true,
122 };
123
124 static const struct regmap_config rt712_sdca_mbq_regmap = {
125 .name = "sdw-mbq",
126 .reg_bits = 32,
127 .val_bits = 16,
128 .readable_reg = rt712_sdca_mbq_readable_register,
129 .volatile_reg = rt712_sdca_mbq_volatile_register,
130 .max_register = 0x41000312,
131 .reg_defaults = rt712_sdca_mbq_defaults,
132 .num_reg_defaults = ARRAY_SIZE(rt712_sdca_mbq_defaults),
133 .cache_type = REGCACHE_MAPLE,
134 .use_single_read = true,
135 .use_single_write = true,
136 };
137
rt712_sdca_update_status(struct sdw_slave * slave,enum sdw_slave_status status)138 static int rt712_sdca_update_status(struct sdw_slave *slave,
139 enum sdw_slave_status status)
140 {
141 struct rt712_sdca_priv *rt712 = dev_get_drvdata(&slave->dev);
142
143 if (status == SDW_SLAVE_UNATTACHED)
144 rt712->hw_init = false;
145
146 if (status == SDW_SLAVE_ATTACHED) {
147 if (rt712->hs_jack) {
148 /*
149 * Due to the SCP_SDCA_INTMASK will be cleared by any reset, and then
150 * if the device attached again, we will need to set the setting back.
151 * It could avoid losing the jack detection interrupt.
152 * This also could sync with the cache value as the rt712_sdca_jack_init set.
153 */
154 sdw_write_no_pm(rt712->slave, SDW_SCP_SDCA_INTMASK1,
155 SDW_SCP_SDCA_INTMASK_SDCA_0);
156 sdw_write_no_pm(rt712->slave, SDW_SCP_SDCA_INTMASK2,
157 SDW_SCP_SDCA_INTMASK_SDCA_8);
158 }
159 }
160
161 /*
162 * Perform initialization only if slave status is present and
163 * hw_init flag is false
164 */
165 if (rt712->hw_init || status != SDW_SLAVE_ATTACHED)
166 return 0;
167
168 /* perform I/O transfers required for Slave initialization */
169 return rt712_sdca_io_init(&slave->dev, slave);
170 }
171
rt712_sdca_read_prop(struct sdw_slave * slave)172 static int rt712_sdca_read_prop(struct sdw_slave *slave)
173 {
174 struct sdw_slave_prop *prop = &slave->prop;
175 int nval;
176 int i, j;
177 u32 bit;
178 unsigned long addr;
179 struct sdw_dpn_prop *dpn;
180
181 prop->scp_int1_mask = SDW_SCP_INT1_BUS_CLASH | SDW_SCP_INT1_PARITY;
182 prop->quirks = SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY;
183
184 prop->paging_support = true;
185
186 /* first we need to allocate memory for set bits in port lists */
187 prop->source_ports = BIT(4); /* BITMAP: 00010000 */
188 prop->sink_ports = BIT(3) | BIT(1); /* BITMAP: 00001010 */
189
190 nval = hweight32(prop->source_ports);
191 prop->src_dpn_prop = devm_kcalloc(&slave->dev, nval,
192 sizeof(*prop->src_dpn_prop), GFP_KERNEL);
193 if (!prop->src_dpn_prop)
194 return -ENOMEM;
195
196 i = 0;
197 dpn = prop->src_dpn_prop;
198 addr = prop->source_ports;
199 for_each_set_bit(bit, &addr, 32) {
200 dpn[i].num = bit;
201 dpn[i].type = SDW_DPN_FULL;
202 dpn[i].simple_ch_prep_sm = true;
203 dpn[i].ch_prep_timeout = 10;
204 i++;
205 }
206
207 /* do this again for sink now */
208 nval = hweight32(prop->sink_ports);
209 prop->sink_dpn_prop = devm_kcalloc(&slave->dev, nval,
210 sizeof(*prop->sink_dpn_prop), GFP_KERNEL);
211 if (!prop->sink_dpn_prop)
212 return -ENOMEM;
213
214 j = 0;
215 dpn = prop->sink_dpn_prop;
216 addr = prop->sink_ports;
217 for_each_set_bit(bit, &addr, 32) {
218 dpn[j].num = bit;
219 dpn[j].type = SDW_DPN_FULL;
220 dpn[j].simple_ch_prep_sm = true;
221 dpn[j].ch_prep_timeout = 10;
222 j++;
223 }
224
225 /* set the timeout values */
226 prop->clk_stop_timeout = 1380;
227
228 /* wake-up event */
229 prop->wake_capable = 1;
230
231 return 0;
232 }
233
rt712_sdca_interrupt_callback(struct sdw_slave * slave,struct sdw_slave_intr_status * status)234 static int rt712_sdca_interrupt_callback(struct sdw_slave *slave,
235 struct sdw_slave_intr_status *status)
236 {
237 struct rt712_sdca_priv *rt712 = dev_get_drvdata(&slave->dev);
238 int ret, stat;
239 int count = 0, retry = 3;
240 unsigned int sdca_cascade, scp_sdca_stat1, scp_sdca_stat2 = 0;
241
242 dev_dbg(&slave->dev,
243 "%s control_port_stat=%x, sdca_cascade=%x", __func__,
244 status->control_port, status->sdca_cascade);
245
246 if (cancel_delayed_work_sync(&rt712->jack_detect_work)) {
247 dev_warn(&slave->dev, "%s the pending delayed_work was cancelled", __func__);
248 /* avoid the HID owner doesn't change to device */
249 if (rt712->scp_sdca_stat2)
250 scp_sdca_stat2 = rt712->scp_sdca_stat2;
251 }
252
253 /*
254 * The critical section below intentionally protects a rather large piece of code.
255 * We don't want to allow the system suspend to disable an interrupt while we are
256 * processing it, which could be problematic given the quirky SoundWire interrupt
257 * scheme. We do want however to prevent new workqueues from being scheduled if
258 * the disable_irq flag was set during system suspend.
259 */
260 mutex_lock(&rt712->disable_irq_lock);
261
262 ret = sdw_read_no_pm(rt712->slave, SDW_SCP_SDCA_INT1);
263 if (ret < 0)
264 goto io_error;
265 rt712->scp_sdca_stat1 = ret;
266 ret = sdw_read_no_pm(rt712->slave, SDW_SCP_SDCA_INT2);
267 if (ret < 0)
268 goto io_error;
269 rt712->scp_sdca_stat2 = ret;
270 if (scp_sdca_stat2)
271 rt712->scp_sdca_stat2 |= scp_sdca_stat2;
272
273 do {
274 /* clear flag */
275 ret = sdw_read_no_pm(rt712->slave, SDW_SCP_SDCA_INT1);
276 if (ret < 0)
277 goto io_error;
278 if (ret & SDW_SCP_SDCA_INTMASK_SDCA_0) {
279 ret = sdw_write_no_pm(rt712->slave, SDW_SCP_SDCA_INT1,
280 SDW_SCP_SDCA_INTMASK_SDCA_0);
281 if (ret < 0)
282 goto io_error;
283 }
284 ret = sdw_read_no_pm(rt712->slave, SDW_SCP_SDCA_INT2);
285 if (ret < 0)
286 goto io_error;
287 if (ret & SDW_SCP_SDCA_INTMASK_SDCA_8) {
288 ret = sdw_write_no_pm(rt712->slave, SDW_SCP_SDCA_INT2,
289 SDW_SCP_SDCA_INTMASK_SDCA_8);
290 if (ret < 0)
291 goto io_error;
292 }
293
294 /* check if flag clear or not */
295 ret = sdw_read_no_pm(rt712->slave, SDW_DP0_INT);
296 if (ret < 0)
297 goto io_error;
298 sdca_cascade = ret & SDW_DP0_SDCA_CASCADE;
299
300 ret = sdw_read_no_pm(rt712->slave, SDW_SCP_SDCA_INT1);
301 if (ret < 0)
302 goto io_error;
303 scp_sdca_stat1 = ret & SDW_SCP_SDCA_INTMASK_SDCA_0;
304
305 ret = sdw_read_no_pm(rt712->slave, SDW_SCP_SDCA_INT2);
306 if (ret < 0)
307 goto io_error;
308 scp_sdca_stat2 = ret & SDW_SCP_SDCA_INTMASK_SDCA_8;
309
310 stat = scp_sdca_stat1 || scp_sdca_stat2 || sdca_cascade;
311
312 count++;
313 } while (stat != 0 && count < retry);
314
315 if (stat)
316 dev_warn(&slave->dev,
317 "%s scp_sdca_stat1=0x%x, scp_sdca_stat2=0x%x\n", __func__,
318 rt712->scp_sdca_stat1, rt712->scp_sdca_stat2);
319
320 if (status->sdca_cascade && !rt712->disable_irq)
321 mod_delayed_work(system_power_efficient_wq,
322 &rt712->jack_detect_work, msecs_to_jiffies(30));
323
324 mutex_unlock(&rt712->disable_irq_lock);
325
326 return 0;
327
328 io_error:
329 mutex_unlock(&rt712->disable_irq_lock);
330 pr_err_ratelimited("IO error in %s, ret %d\n", __func__, ret);
331 return ret;
332 }
333
334 static struct sdw_slave_ops rt712_sdca_slave_ops = {
335 .read_prop = rt712_sdca_read_prop,
336 .interrupt_callback = rt712_sdca_interrupt_callback,
337 .update_status = rt712_sdca_update_status,
338 };
339
rt712_sdca_sdw_probe(struct sdw_slave * slave,const struct sdw_device_id * id)340 static int rt712_sdca_sdw_probe(struct sdw_slave *slave,
341 const struct sdw_device_id *id)
342 {
343 struct regmap *regmap, *mbq_regmap;
344
345 /* Regmap Initialization */
346 mbq_regmap = devm_regmap_init_sdw_mbq(slave, &rt712_sdca_mbq_regmap);
347 if (IS_ERR(mbq_regmap))
348 return PTR_ERR(mbq_regmap);
349
350 regmap = devm_regmap_init_sdw(slave, &rt712_sdca_regmap);
351 if (IS_ERR(regmap))
352 return PTR_ERR(regmap);
353
354 return rt712_sdca_init(&slave->dev, regmap, mbq_regmap, slave);
355 }
356
rt712_sdca_sdw_remove(struct sdw_slave * slave)357 static int rt712_sdca_sdw_remove(struct sdw_slave *slave)
358 {
359 struct rt712_sdca_priv *rt712 = dev_get_drvdata(&slave->dev);
360
361 if (rt712->hw_init) {
362 cancel_delayed_work_sync(&rt712->jack_detect_work);
363 cancel_delayed_work_sync(&rt712->jack_btn_check_work);
364 }
365
366 pm_runtime_disable(&slave->dev);
367
368 mutex_destroy(&rt712->calibrate_mutex);
369 mutex_destroy(&rt712->disable_irq_lock);
370
371 return 0;
372 }
373
374 static const struct sdw_device_id rt712_sdca_id[] = {
375 SDW_SLAVE_ENTRY_EXT(0x025d, 0x712, 0x3, 0x1, 0),
376 SDW_SLAVE_ENTRY_EXT(0x025d, 0x713, 0x3, 0x1, 0),
377 SDW_SLAVE_ENTRY_EXT(0x025d, 0x716, 0x3, 0x1, 0),
378 SDW_SLAVE_ENTRY_EXT(0x025d, 0x717, 0x3, 0x1, 0),
379 {},
380 };
381 MODULE_DEVICE_TABLE(sdw, rt712_sdca_id);
382
rt712_sdca_dev_suspend(struct device * dev)383 static int __maybe_unused rt712_sdca_dev_suspend(struct device *dev)
384 {
385 struct rt712_sdca_priv *rt712 = dev_get_drvdata(dev);
386
387 if (!rt712->hw_init)
388 return 0;
389
390 cancel_delayed_work_sync(&rt712->jack_detect_work);
391 cancel_delayed_work_sync(&rt712->jack_btn_check_work);
392
393 regcache_cache_only(rt712->regmap, true);
394 regcache_cache_only(rt712->mbq_regmap, true);
395
396 return 0;
397 }
398
rt712_sdca_dev_system_suspend(struct device * dev)399 static int __maybe_unused rt712_sdca_dev_system_suspend(struct device *dev)
400 {
401 struct rt712_sdca_priv *rt712_sdca = dev_get_drvdata(dev);
402 struct sdw_slave *slave = dev_to_sdw_dev(dev);
403 int ret1, ret2;
404
405 if (!rt712_sdca->hw_init)
406 return 0;
407
408 /*
409 * prevent new interrupts from being handled after the
410 * deferred work completes and before the parent disables
411 * interrupts on the link
412 */
413 mutex_lock(&rt712_sdca->disable_irq_lock);
414 rt712_sdca->disable_irq = true;
415 ret1 = sdw_update_no_pm(slave, SDW_SCP_SDCA_INTMASK1,
416 SDW_SCP_SDCA_INTMASK_SDCA_0, 0);
417 ret2 = sdw_update_no_pm(slave, SDW_SCP_SDCA_INTMASK2,
418 SDW_SCP_SDCA_INTMASK_SDCA_8, 0);
419 mutex_unlock(&rt712_sdca->disable_irq_lock);
420
421 if (ret1 < 0 || ret2 < 0) {
422 /* log but don't prevent suspend from happening */
423 dev_dbg(&slave->dev, "%s: could not disable SDCA interrupts\n:", __func__);
424 }
425
426 return rt712_sdca_dev_suspend(dev);
427 }
428
429 #define RT712_PROBE_TIMEOUT 5000
430
rt712_sdca_dev_resume(struct device * dev)431 static int __maybe_unused rt712_sdca_dev_resume(struct device *dev)
432 {
433 struct sdw_slave *slave = dev_to_sdw_dev(dev);
434 struct rt712_sdca_priv *rt712 = dev_get_drvdata(dev);
435 unsigned long time;
436
437 if (!rt712->first_hw_init)
438 return 0;
439
440 if (!slave->unattach_request) {
441 mutex_lock(&rt712->disable_irq_lock);
442 if (rt712->disable_irq == true) {
443
444 sdw_write_no_pm(slave, SDW_SCP_SDCA_INTMASK1, SDW_SCP_SDCA_INTMASK_SDCA_0);
445 sdw_write_no_pm(slave, SDW_SCP_SDCA_INTMASK2, SDW_SCP_SDCA_INTMASK_SDCA_8);
446 rt712->disable_irq = false;
447 }
448 mutex_unlock(&rt712->disable_irq_lock);
449 goto regmap_sync;
450 }
451
452 time = wait_for_completion_timeout(&slave->initialization_complete,
453 msecs_to_jiffies(RT712_PROBE_TIMEOUT));
454 if (!time) {
455 dev_err(&slave->dev, "Initialization not complete, timed out\n");
456 sdw_show_ping_status(slave->bus, true);
457
458 return -ETIMEDOUT;
459 }
460
461 regmap_sync:
462 slave->unattach_request = 0;
463 regcache_cache_only(rt712->regmap, false);
464 regcache_sync(rt712->regmap);
465 regcache_cache_only(rt712->mbq_regmap, false);
466 regcache_sync(rt712->mbq_regmap);
467 return 0;
468 }
469
470 static const struct dev_pm_ops rt712_sdca_pm = {
471 SET_SYSTEM_SLEEP_PM_OPS(rt712_sdca_dev_system_suspend, rt712_sdca_dev_resume)
472 SET_RUNTIME_PM_OPS(rt712_sdca_dev_suspend, rt712_sdca_dev_resume, NULL)
473 };
474
475 static struct sdw_driver rt712_sdca_sdw_driver = {
476 .driver = {
477 .name = "rt712-sdca",
478 .owner = THIS_MODULE,
479 .pm = &rt712_sdca_pm,
480 },
481 .probe = rt712_sdca_sdw_probe,
482 .remove = rt712_sdca_sdw_remove,
483 .ops = &rt712_sdca_slave_ops,
484 .id_table = rt712_sdca_id,
485 };
486 module_sdw_driver(rt712_sdca_sdw_driver);
487
488 MODULE_DESCRIPTION("ASoC RT712 SDCA SDW driver");
489 MODULE_AUTHOR("Shuming Fan <shumingf@realtek.com>");
490 MODULE_LICENSE("GPL");
491