1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * SoundWire AMD Manager driver
4 *
5 * Copyright 2023 Advanced Micro Devices, Inc.
6 */
7
8 #include <linux/completion.h>
9 #include <linux/device.h>
10 #include <linux/io.h>
11 #include <linux/jiffies.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/soundwire/sdw.h>
16 #include <linux/soundwire/sdw_registers.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/wait.h>
19 #include <sound/pcm_params.h>
20 #include <sound/soc.h>
21 #include "bus.h"
22 #include "amd_manager.h"
23
24 #define DRV_NAME "amd_sdw_manager"
25
26 #define to_amd_sdw(b) container_of(b, struct amd_sdw_manager, bus)
27
amd_enable_sdw_pads(struct amd_sdw_manager * amd_manager)28 static void amd_enable_sdw_pads(struct amd_sdw_manager *amd_manager)
29 {
30 u32 sw_pad_pulldown_val;
31 u32 val;
32
33 mutex_lock(amd_manager->acp_sdw_lock);
34 val = readl(amd_manager->acp_mmio + ACP_SW_PAD_KEEPER_EN);
35 val |= amd_manager->reg_mask->sw_pad_enable_mask;
36 writel(val, amd_manager->acp_mmio + ACP_SW_PAD_KEEPER_EN);
37 usleep_range(1000, 1500);
38
39 sw_pad_pulldown_val = readl(amd_manager->acp_mmio + ACP_PAD_PULLDOWN_CTRL);
40 sw_pad_pulldown_val &= amd_manager->reg_mask->sw_pad_pulldown_mask;
41 writel(sw_pad_pulldown_val, amd_manager->acp_mmio + ACP_PAD_PULLDOWN_CTRL);
42 mutex_unlock(amd_manager->acp_sdw_lock);
43 }
44
amd_init_sdw_manager(struct amd_sdw_manager * amd_manager)45 static int amd_init_sdw_manager(struct amd_sdw_manager *amd_manager)
46 {
47 u32 val;
48 int ret;
49
50 writel(AMD_SDW_ENABLE, amd_manager->mmio + ACP_SW_EN);
51 ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_EN_STATUS, val, val, ACP_DELAY_US,
52 AMD_SDW_TIMEOUT);
53 if (ret)
54 return ret;
55
56 /* SoundWire manager bus reset */
57 writel(AMD_SDW_BUS_RESET_REQ, amd_manager->mmio + ACP_SW_BUS_RESET_CTRL);
58 ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_BUS_RESET_CTRL, val,
59 (val & AMD_SDW_BUS_RESET_DONE), ACP_DELAY_US, AMD_SDW_TIMEOUT);
60 if (ret)
61 return ret;
62
63 writel(AMD_SDW_BUS_RESET_CLEAR_REQ, amd_manager->mmio + ACP_SW_BUS_RESET_CTRL);
64 ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_BUS_RESET_CTRL, val, !val,
65 ACP_DELAY_US, AMD_SDW_TIMEOUT);
66 if (ret) {
67 dev_err(amd_manager->dev, "Failed to reset SoundWire manager instance%d\n",
68 amd_manager->instance);
69 return ret;
70 }
71
72 writel(AMD_SDW_DISABLE, amd_manager->mmio + ACP_SW_EN);
73 return readl_poll_timeout(amd_manager->mmio + ACP_SW_EN_STATUS, val, !val, ACP_DELAY_US,
74 AMD_SDW_TIMEOUT);
75 }
76
amd_enable_sdw_manager(struct amd_sdw_manager * amd_manager)77 static int amd_enable_sdw_manager(struct amd_sdw_manager *amd_manager)
78 {
79 u32 val;
80
81 writel(AMD_SDW_ENABLE, amd_manager->mmio + ACP_SW_EN);
82 return readl_poll_timeout(amd_manager->mmio + ACP_SW_EN_STATUS, val, val, ACP_DELAY_US,
83 AMD_SDW_TIMEOUT);
84 }
85
amd_disable_sdw_manager(struct amd_sdw_manager * amd_manager)86 static int amd_disable_sdw_manager(struct amd_sdw_manager *amd_manager)
87 {
88 u32 val;
89
90 writel(AMD_SDW_DISABLE, amd_manager->mmio + ACP_SW_EN);
91 /*
92 * After invoking manager disable sequence, check whether
93 * manager has executed clock stop sequence. In this case,
94 * manager should ignore checking enable status register.
95 */
96 val = readl(amd_manager->mmio + ACP_SW_CLK_RESUME_CTRL);
97 if (val)
98 return 0;
99 return readl_poll_timeout(amd_manager->mmio + ACP_SW_EN_STATUS, val, !val, ACP_DELAY_US,
100 AMD_SDW_TIMEOUT);
101 }
102
amd_enable_sdw_interrupts(struct amd_sdw_manager * amd_manager)103 static void amd_enable_sdw_interrupts(struct amd_sdw_manager *amd_manager)
104 {
105 struct sdw_manager_reg_mask *reg_mask = amd_manager->reg_mask;
106 u32 val;
107
108 mutex_lock(amd_manager->acp_sdw_lock);
109 val = readl(amd_manager->acp_mmio + ACP_EXTERNAL_INTR_CNTL(amd_manager->instance));
110 val |= reg_mask->acp_sdw_intr_mask;
111 writel(val, amd_manager->acp_mmio + ACP_EXTERNAL_INTR_CNTL(amd_manager->instance));
112 mutex_unlock(amd_manager->acp_sdw_lock);
113
114 writel(AMD_SDW_IRQ_MASK_0TO7, amd_manager->mmio +
115 ACP_SW_STATE_CHANGE_STATUS_MASK_0TO7);
116 writel(AMD_SDW_IRQ_MASK_8TO11, amd_manager->mmio +
117 ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
118 writel(AMD_SDW_IRQ_ERROR_MASK, amd_manager->mmio + ACP_SW_ERROR_INTR_MASK);
119 }
120
amd_disable_sdw_interrupts(struct amd_sdw_manager * amd_manager)121 static void amd_disable_sdw_interrupts(struct amd_sdw_manager *amd_manager)
122 {
123 struct sdw_manager_reg_mask *reg_mask = amd_manager->reg_mask;
124 u32 val;
125
126 mutex_lock(amd_manager->acp_sdw_lock);
127 val = readl(amd_manager->acp_mmio + ACP_EXTERNAL_INTR_CNTL(amd_manager->instance));
128 val &= ~reg_mask->acp_sdw_intr_mask;
129 writel(val, amd_manager->acp_mmio + ACP_EXTERNAL_INTR_CNTL(amd_manager->instance));
130 mutex_unlock(amd_manager->acp_sdw_lock);
131
132 writel(0x00, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_0TO7);
133 writel(0x00, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
134 writel(0x00, amd_manager->mmio + ACP_SW_ERROR_INTR_MASK);
135 }
136
amd_deinit_sdw_manager(struct amd_sdw_manager * amd_manager)137 static int amd_deinit_sdw_manager(struct amd_sdw_manager *amd_manager)
138 {
139 amd_disable_sdw_interrupts(amd_manager);
140 return amd_disable_sdw_manager(amd_manager);
141 }
142
amd_sdw_set_frameshape(struct amd_sdw_manager * amd_manager)143 static void amd_sdw_set_frameshape(struct amd_sdw_manager *amd_manager)
144 {
145 u32 frame_size;
146
147 frame_size = (amd_manager->rows_index << 3) | amd_manager->cols_index;
148 writel(frame_size, amd_manager->mmio + ACP_SW_FRAMESIZE);
149 }
150
amd_sdw_wake_enable(struct amd_sdw_manager * amd_manager,bool enable)151 static void amd_sdw_wake_enable(struct amd_sdw_manager *amd_manager, bool enable)
152 {
153 u32 wake_ctrl;
154
155 wake_ctrl = readl(amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
156 if (enable)
157 wake_ctrl |= AMD_SDW_WAKE_INTR_MASK;
158 else
159 wake_ctrl &= ~AMD_SDW_WAKE_INTR_MASK;
160
161 writel(wake_ctrl, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
162 }
163
amd_sdw_ctl_word_prep(u32 * lower_word,u32 * upper_word,struct sdw_msg * msg,int cmd_offset)164 static void amd_sdw_ctl_word_prep(u32 *lower_word, u32 *upper_word, struct sdw_msg *msg,
165 int cmd_offset)
166 {
167 u32 upper_data;
168 u32 lower_data = 0;
169 u16 addr;
170 u8 upper_addr, lower_addr;
171 u8 data = 0;
172
173 addr = msg->addr + cmd_offset;
174 upper_addr = (addr & 0xFF00) >> 8;
175 lower_addr = addr & 0xFF;
176
177 if (msg->flags == SDW_MSG_FLAG_WRITE)
178 data = msg->buf[cmd_offset];
179
180 upper_data = FIELD_PREP(AMD_SDW_MCP_CMD_DEV_ADDR, msg->dev_num);
181 upper_data |= FIELD_PREP(AMD_SDW_MCP_CMD_COMMAND, msg->flags + 2);
182 upper_data |= FIELD_PREP(AMD_SDW_MCP_CMD_REG_ADDR_HIGH, upper_addr);
183 lower_data |= FIELD_PREP(AMD_SDW_MCP_CMD_REG_ADDR_LOW, lower_addr);
184 lower_data |= FIELD_PREP(AMD_SDW_MCP_CMD_REG_DATA, data);
185
186 *upper_word = upper_data;
187 *lower_word = lower_data;
188 }
189
amd_sdw_send_cmd_get_resp(struct amd_sdw_manager * amd_manager,u32 lower_data,u32 upper_data)190 static u64 amd_sdw_send_cmd_get_resp(struct amd_sdw_manager *amd_manager, u32 lower_data,
191 u32 upper_data)
192 {
193 u64 resp;
194 u32 lower_resp, upper_resp;
195 u32 sts;
196 int ret;
197
198 ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_IMM_CMD_STS, sts,
199 !(sts & AMD_SDW_IMM_CMD_BUSY), ACP_DELAY_US, AMD_SDW_TIMEOUT);
200 if (ret) {
201 dev_err(amd_manager->dev, "SDW%x previous cmd status clear failed\n",
202 amd_manager->instance);
203 return ret;
204 }
205
206 if (sts & AMD_SDW_IMM_RES_VALID) {
207 dev_err(amd_manager->dev, "SDW%x manager is in bad state\n", amd_manager->instance);
208 writel(0x00, amd_manager->mmio + ACP_SW_IMM_CMD_STS);
209 }
210 writel(upper_data, amd_manager->mmio + ACP_SW_IMM_CMD_UPPER_WORD);
211 writel(lower_data, amd_manager->mmio + ACP_SW_IMM_CMD_LOWER_QWORD);
212
213 ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_IMM_CMD_STS, sts,
214 (sts & AMD_SDW_IMM_RES_VALID), ACP_DELAY_US, AMD_SDW_TIMEOUT);
215 if (ret) {
216 dev_err(amd_manager->dev, "SDW%x cmd response timeout occurred\n",
217 amd_manager->instance);
218 return ret;
219 }
220 upper_resp = readl(amd_manager->mmio + ACP_SW_IMM_RESP_UPPER_WORD);
221 lower_resp = readl(amd_manager->mmio + ACP_SW_IMM_RESP_LOWER_QWORD);
222
223 writel(AMD_SDW_IMM_RES_VALID, amd_manager->mmio + ACP_SW_IMM_CMD_STS);
224 ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_IMM_CMD_STS, sts,
225 !(sts & AMD_SDW_IMM_RES_VALID), ACP_DELAY_US, AMD_SDW_TIMEOUT);
226 if (ret) {
227 dev_err(amd_manager->dev, "SDW%x cmd status retry failed\n",
228 amd_manager->instance);
229 return ret;
230 }
231 resp = upper_resp;
232 resp = (resp << 32) | lower_resp;
233 return resp;
234 }
235
236 static enum sdw_command_response
amd_program_scp_addr(struct amd_sdw_manager * amd_manager,struct sdw_msg * msg)237 amd_program_scp_addr(struct amd_sdw_manager *amd_manager, struct sdw_msg *msg)
238 {
239 struct sdw_msg scp_msg = {0};
240 u64 response_buf[2] = {0};
241 u32 upper_data = 0, lower_data = 0;
242 int index;
243
244 scp_msg.dev_num = msg->dev_num;
245 scp_msg.addr = SDW_SCP_ADDRPAGE1;
246 scp_msg.buf = &msg->addr_page1;
247 scp_msg.flags = SDW_MSG_FLAG_WRITE;
248 amd_sdw_ctl_word_prep(&lower_data, &upper_data, &scp_msg, 0);
249 response_buf[0] = amd_sdw_send_cmd_get_resp(amd_manager, lower_data, upper_data);
250 scp_msg.addr = SDW_SCP_ADDRPAGE2;
251 scp_msg.buf = &msg->addr_page2;
252 amd_sdw_ctl_word_prep(&lower_data, &upper_data, &scp_msg, 0);
253 response_buf[1] = amd_sdw_send_cmd_get_resp(amd_manager, lower_data, upper_data);
254
255 for (index = 0; index < 2; index++) {
256 if (response_buf[index] == -ETIMEDOUT) {
257 dev_err_ratelimited(amd_manager->dev,
258 "SCP_addrpage command timeout for Slave %d\n",
259 msg->dev_num);
260 return SDW_CMD_TIMEOUT;
261 } else if (!(response_buf[index] & AMD_SDW_MCP_RESP_ACK)) {
262 if (response_buf[index] & AMD_SDW_MCP_RESP_NACK) {
263 dev_err_ratelimited(amd_manager->dev,
264 "SCP_addrpage NACKed for Slave %d\n",
265 msg->dev_num);
266 return SDW_CMD_FAIL;
267 }
268 dev_dbg_ratelimited(amd_manager->dev, "SCP_addrpage ignored for Slave %d\n",
269 msg->dev_num);
270 return SDW_CMD_IGNORED;
271 }
272 }
273 return SDW_CMD_OK;
274 }
275
amd_prep_msg(struct amd_sdw_manager * amd_manager,struct sdw_msg * msg)276 static int amd_prep_msg(struct amd_sdw_manager *amd_manager, struct sdw_msg *msg)
277 {
278 int ret;
279
280 if (msg->page) {
281 ret = amd_program_scp_addr(amd_manager, msg);
282 if (ret) {
283 msg->len = 0;
284 return ret;
285 }
286 }
287 switch (msg->flags) {
288 case SDW_MSG_FLAG_READ:
289 case SDW_MSG_FLAG_WRITE:
290 break;
291 default:
292 dev_err(amd_manager->dev, "Invalid msg cmd: %d\n", msg->flags);
293 return -EINVAL;
294 }
295 return 0;
296 }
297
amd_sdw_fill_msg_resp(struct amd_sdw_manager * amd_manager,struct sdw_msg * msg,u64 response,int offset)298 static enum sdw_command_response amd_sdw_fill_msg_resp(struct amd_sdw_manager *amd_manager,
299 struct sdw_msg *msg, u64 response,
300 int offset)
301 {
302 if (response & AMD_SDW_MCP_RESP_ACK) {
303 if (msg->flags == SDW_MSG_FLAG_READ)
304 msg->buf[offset] = FIELD_GET(AMD_SDW_MCP_RESP_RDATA, response);
305 } else {
306 if (response == -ETIMEDOUT) {
307 dev_err_ratelimited(amd_manager->dev, "command timeout for Slave %d\n",
308 msg->dev_num);
309 return SDW_CMD_TIMEOUT;
310 } else if (response & AMD_SDW_MCP_RESP_NACK) {
311 dev_err_ratelimited(amd_manager->dev,
312 "command response NACK received for Slave %d\n",
313 msg->dev_num);
314 return SDW_CMD_FAIL;
315 }
316 dev_err_ratelimited(amd_manager->dev, "command is ignored for Slave %d\n",
317 msg->dev_num);
318 return SDW_CMD_IGNORED;
319 }
320 return SDW_CMD_OK;
321 }
322
_amd_sdw_xfer_msg(struct amd_sdw_manager * amd_manager,struct sdw_msg * msg,int cmd_offset)323 static unsigned int _amd_sdw_xfer_msg(struct amd_sdw_manager *amd_manager, struct sdw_msg *msg,
324 int cmd_offset)
325 {
326 u64 response;
327 u32 upper_data = 0, lower_data = 0;
328
329 amd_sdw_ctl_word_prep(&lower_data, &upper_data, msg, cmd_offset);
330 response = amd_sdw_send_cmd_get_resp(amd_manager, lower_data, upper_data);
331 return amd_sdw_fill_msg_resp(amd_manager, msg, response, cmd_offset);
332 }
333
amd_sdw_xfer_msg(struct sdw_bus * bus,struct sdw_msg * msg)334 static enum sdw_command_response amd_sdw_xfer_msg(struct sdw_bus *bus, struct sdw_msg *msg)
335 {
336 struct amd_sdw_manager *amd_manager = to_amd_sdw(bus);
337 int ret, i;
338
339 ret = amd_prep_msg(amd_manager, msg);
340 if (ret)
341 return SDW_CMD_FAIL_OTHER;
342 for (i = 0; i < msg->len; i++) {
343 ret = _amd_sdw_xfer_msg(amd_manager, msg, i);
344 if (ret)
345 return ret;
346 }
347 return SDW_CMD_OK;
348 }
349
amd_sdw_fill_slave_status(struct amd_sdw_manager * amd_manager,u16 index,u32 status)350 static void amd_sdw_fill_slave_status(struct amd_sdw_manager *amd_manager, u16 index, u32 status)
351 {
352 switch (status) {
353 case SDW_SLAVE_ATTACHED:
354 case SDW_SLAVE_UNATTACHED:
355 case SDW_SLAVE_ALERT:
356 amd_manager->status[index] = status;
357 break;
358 default:
359 amd_manager->status[index] = SDW_SLAVE_RESERVED;
360 break;
361 }
362 }
363
amd_sdw_process_ping_status(u64 response,struct amd_sdw_manager * amd_manager)364 static void amd_sdw_process_ping_status(u64 response, struct amd_sdw_manager *amd_manager)
365 {
366 u64 slave_stat;
367 u32 val;
368 u16 dev_index;
369
370 /* slave status response */
371 slave_stat = FIELD_GET(AMD_SDW_MCP_SLAVE_STAT_0_3, response);
372 slave_stat |= FIELD_GET(AMD_SDW_MCP_SLAVE_STAT_4_11, response) << 8;
373 dev_dbg(amd_manager->dev, "slave_stat:0x%llx\n", slave_stat);
374 for (dev_index = 0; dev_index <= SDW_MAX_DEVICES; ++dev_index) {
375 val = (slave_stat >> (dev_index * 2)) & AMD_SDW_MCP_SLAVE_STATUS_MASK;
376 dev_dbg(amd_manager->dev, "val:0x%x\n", val);
377 amd_sdw_fill_slave_status(amd_manager, dev_index, val);
378 }
379 }
380
amd_sdw_read_and_process_ping_status(struct amd_sdw_manager * amd_manager)381 static void amd_sdw_read_and_process_ping_status(struct amd_sdw_manager *amd_manager)
382 {
383 u64 response;
384
385 mutex_lock(&amd_manager->bus.msg_lock);
386 response = amd_sdw_send_cmd_get_resp(amd_manager, 0, 0);
387 mutex_unlock(&amd_manager->bus.msg_lock);
388 amd_sdw_process_ping_status(response, amd_manager);
389 }
390
amd_sdw_read_ping_status(struct sdw_bus * bus)391 static u32 amd_sdw_read_ping_status(struct sdw_bus *bus)
392 {
393 struct amd_sdw_manager *amd_manager = to_amd_sdw(bus);
394 u64 response;
395 u32 slave_stat;
396
397 response = amd_sdw_send_cmd_get_resp(amd_manager, 0, 0);
398 /* slave status from ping response */
399 slave_stat = FIELD_GET(AMD_SDW_MCP_SLAVE_STAT_0_3, response);
400 slave_stat |= FIELD_GET(AMD_SDW_MCP_SLAVE_STAT_4_11, response) << 8;
401 dev_dbg(amd_manager->dev, "slave_stat:0x%x\n", slave_stat);
402 return slave_stat;
403 }
404
amd_sdw_compute_params(struct sdw_bus * bus)405 static int amd_sdw_compute_params(struct sdw_bus *bus)
406 {
407 struct sdw_transport_data t_data = {0};
408 struct sdw_master_runtime *m_rt;
409 struct sdw_port_runtime *p_rt;
410 struct sdw_bus_params *b_params = &bus->params;
411 int port_bo, hstart, hstop, sample_int;
412 unsigned int rate, bps;
413
414 port_bo = 0;
415 hstart = 1;
416 hstop = bus->params.col - 1;
417 t_data.hstop = hstop;
418 t_data.hstart = hstart;
419
420 list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
421 rate = m_rt->stream->params.rate;
422 bps = m_rt->stream->params.bps;
423 sample_int = (bus->params.curr_dr_freq / rate);
424 list_for_each_entry(p_rt, &m_rt->port_list, port_node) {
425 port_bo = (p_rt->num * 64) + 1;
426 dev_dbg(bus->dev, "p_rt->num=%d hstart=%d hstop=%d port_bo=%d\n",
427 p_rt->num, hstart, hstop, port_bo);
428 sdw_fill_xport_params(&p_rt->transport_params, p_rt->num,
429 false, SDW_BLK_GRP_CNT_1, sample_int,
430 port_bo, port_bo >> 8, hstart, hstop,
431 SDW_BLK_PKG_PER_PORT, 0x0);
432
433 sdw_fill_port_params(&p_rt->port_params,
434 p_rt->num, bps,
435 SDW_PORT_FLOW_MODE_ISOCH,
436 b_params->m_data_mode);
437 t_data.hstart = hstart;
438 t_data.hstop = hstop;
439 t_data.block_offset = port_bo;
440 t_data.sub_block_offset = 0;
441 }
442 sdw_compute_slave_ports(m_rt, &t_data);
443 }
444 return 0;
445 }
446
amd_sdw_port_params(struct sdw_bus * bus,struct sdw_port_params * p_params,unsigned int bank)447 static int amd_sdw_port_params(struct sdw_bus *bus, struct sdw_port_params *p_params,
448 unsigned int bank)
449 {
450 struct amd_sdw_manager *amd_manager = to_amd_sdw(bus);
451 u32 frame_fmt_reg, dpn_frame_fmt;
452
453 dev_dbg(amd_manager->dev, "p_params->num:0x%x\n", p_params->num);
454 switch (amd_manager->instance) {
455 case ACP_SDW0:
456 frame_fmt_reg = sdw0_manager_dp_reg[p_params->num].frame_fmt_reg;
457 break;
458 case ACP_SDW1:
459 frame_fmt_reg = sdw1_manager_dp_reg[p_params->num].frame_fmt_reg;
460 break;
461 default:
462 return -EINVAL;
463 }
464
465 dpn_frame_fmt = readl(amd_manager->mmio + frame_fmt_reg);
466 u32p_replace_bits(&dpn_frame_fmt, p_params->flow_mode, AMD_DPN_FRAME_FMT_PFM);
467 u32p_replace_bits(&dpn_frame_fmt, p_params->data_mode, AMD_DPN_FRAME_FMT_PDM);
468 u32p_replace_bits(&dpn_frame_fmt, p_params->bps - 1, AMD_DPN_FRAME_FMT_WORD_LEN);
469 writel(dpn_frame_fmt, amd_manager->mmio + frame_fmt_reg);
470 return 0;
471 }
472
amd_sdw_transport_params(struct sdw_bus * bus,struct sdw_transport_params * params,enum sdw_reg_bank bank)473 static int amd_sdw_transport_params(struct sdw_bus *bus,
474 struct sdw_transport_params *params,
475 enum sdw_reg_bank bank)
476 {
477 struct amd_sdw_manager *amd_manager = to_amd_sdw(bus);
478 u32 dpn_frame_fmt;
479 u32 dpn_sampleinterval;
480 u32 dpn_hctrl;
481 u32 dpn_offsetctrl;
482 u32 dpn_lanectrl;
483 u32 frame_fmt_reg, sample_int_reg, hctrl_dp0_reg;
484 u32 offset_reg, lane_ctrl_ch_en_reg;
485
486 switch (amd_manager->instance) {
487 case ACP_SDW0:
488 frame_fmt_reg = sdw0_manager_dp_reg[params->port_num].frame_fmt_reg;
489 sample_int_reg = sdw0_manager_dp_reg[params->port_num].sample_int_reg;
490 hctrl_dp0_reg = sdw0_manager_dp_reg[params->port_num].hctrl_dp0_reg;
491 offset_reg = sdw0_manager_dp_reg[params->port_num].offset_reg;
492 lane_ctrl_ch_en_reg = sdw0_manager_dp_reg[params->port_num].lane_ctrl_ch_en_reg;
493 break;
494 case ACP_SDW1:
495 frame_fmt_reg = sdw1_manager_dp_reg[params->port_num].frame_fmt_reg;
496 sample_int_reg = sdw1_manager_dp_reg[params->port_num].sample_int_reg;
497 hctrl_dp0_reg = sdw1_manager_dp_reg[params->port_num].hctrl_dp0_reg;
498 offset_reg = sdw1_manager_dp_reg[params->port_num].offset_reg;
499 lane_ctrl_ch_en_reg = sdw1_manager_dp_reg[params->port_num].lane_ctrl_ch_en_reg;
500 break;
501 default:
502 return -EINVAL;
503 }
504 writel(AMD_SDW_SSP_COUNTER_VAL, amd_manager->mmio + ACP_SW_SSP_COUNTER);
505
506 dpn_frame_fmt = readl(amd_manager->mmio + frame_fmt_reg);
507 u32p_replace_bits(&dpn_frame_fmt, params->blk_pkg_mode, AMD_DPN_FRAME_FMT_BLK_PKG_MODE);
508 u32p_replace_bits(&dpn_frame_fmt, params->blk_grp_ctrl, AMD_DPN_FRAME_FMT_BLK_GRP_CTRL);
509 u32p_replace_bits(&dpn_frame_fmt, SDW_STREAM_PCM, AMD_DPN_FRAME_FMT_PCM_OR_PDM);
510 writel(dpn_frame_fmt, amd_manager->mmio + frame_fmt_reg);
511
512 dpn_sampleinterval = params->sample_interval - 1;
513 writel(dpn_sampleinterval, amd_manager->mmio + sample_int_reg);
514
515 dpn_hctrl = FIELD_PREP(AMD_DPN_HCTRL_HSTOP, params->hstop);
516 dpn_hctrl |= FIELD_PREP(AMD_DPN_HCTRL_HSTART, params->hstart);
517 writel(dpn_hctrl, amd_manager->mmio + hctrl_dp0_reg);
518
519 dpn_offsetctrl = FIELD_PREP(AMD_DPN_OFFSET_CTRL_1, params->offset1);
520 dpn_offsetctrl |= FIELD_PREP(AMD_DPN_OFFSET_CTRL_2, params->offset2);
521 writel(dpn_offsetctrl, amd_manager->mmio + offset_reg);
522
523 /*
524 * lane_ctrl_ch_en_reg will be used to program lane_ctrl and ch_mask
525 * parameters.
526 */
527 dpn_lanectrl = readl(amd_manager->mmio + lane_ctrl_ch_en_reg);
528 u32p_replace_bits(&dpn_lanectrl, params->lane_ctrl, AMD_DPN_CH_EN_LCTRL);
529 writel(dpn_lanectrl, amd_manager->mmio + lane_ctrl_ch_en_reg);
530 return 0;
531 }
532
amd_sdw_port_enable(struct sdw_bus * bus,struct sdw_enable_ch * enable_ch,unsigned int bank)533 static int amd_sdw_port_enable(struct sdw_bus *bus,
534 struct sdw_enable_ch *enable_ch,
535 unsigned int bank)
536 {
537 struct amd_sdw_manager *amd_manager = to_amd_sdw(bus);
538 u32 dpn_ch_enable;
539 u32 lane_ctrl_ch_en_reg;
540
541 switch (amd_manager->instance) {
542 case ACP_SDW0:
543 lane_ctrl_ch_en_reg = sdw0_manager_dp_reg[enable_ch->port_num].lane_ctrl_ch_en_reg;
544 break;
545 case ACP_SDW1:
546 lane_ctrl_ch_en_reg = sdw1_manager_dp_reg[enable_ch->port_num].lane_ctrl_ch_en_reg;
547 break;
548 default:
549 return -EINVAL;
550 }
551
552 /*
553 * lane_ctrl_ch_en_reg will be used to program lane_ctrl and ch_mask
554 * parameters.
555 */
556 dpn_ch_enable = readl(amd_manager->mmio + lane_ctrl_ch_en_reg);
557 u32p_replace_bits(&dpn_ch_enable, enable_ch->ch_mask, AMD_DPN_CH_EN_CHMASK);
558 if (enable_ch->enable)
559 writel(dpn_ch_enable, amd_manager->mmio + lane_ctrl_ch_en_reg);
560 else
561 writel(0, amd_manager->mmio + lane_ctrl_ch_en_reg);
562 return 0;
563 }
564
sdw_master_read_amd_prop(struct sdw_bus * bus)565 static int sdw_master_read_amd_prop(struct sdw_bus *bus)
566 {
567 struct amd_sdw_manager *amd_manager = to_amd_sdw(bus);
568 struct fwnode_handle *link;
569 struct sdw_master_prop *prop;
570 u32 quirk_mask = 0;
571 u32 wake_en_mask = 0;
572 u32 power_mode_mask = 0;
573 char name[32];
574
575 prop = &bus->prop;
576 /* Find manager handle */
577 snprintf(name, sizeof(name), "mipi-sdw-link-%d-subproperties", bus->link_id);
578 link = device_get_named_child_node(bus->dev, name);
579 if (!link) {
580 dev_err(bus->dev, "Manager node %s not found\n", name);
581 return -EIO;
582 }
583 fwnode_property_read_u32(link, "amd-sdw-enable", &quirk_mask);
584 if (!(quirk_mask & AMD_SDW_QUIRK_MASK_BUS_ENABLE))
585 prop->hw_disabled = true;
586 prop->quirks = SDW_MASTER_QUIRKS_CLEAR_INITIAL_CLASH |
587 SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY;
588
589 fwnode_property_read_u32(link, "amd-sdw-wakeup-enable", &wake_en_mask);
590 amd_manager->wake_en_mask = wake_en_mask;
591 fwnode_property_read_u32(link, "amd-sdw-power-mode", &power_mode_mask);
592 amd_manager->power_mode_mask = power_mode_mask;
593 return 0;
594 }
595
amd_prop_read(struct sdw_bus * bus)596 static int amd_prop_read(struct sdw_bus *bus)
597 {
598 sdw_master_read_prop(bus);
599 sdw_master_read_amd_prop(bus);
600 return 0;
601 }
602
603 static const struct sdw_master_port_ops amd_sdw_port_ops = {
604 .dpn_set_port_params = amd_sdw_port_params,
605 .dpn_set_port_transport_params = amd_sdw_transport_params,
606 .dpn_port_enable_ch = amd_sdw_port_enable,
607 };
608
609 static const struct sdw_master_ops amd_sdw_ops = {
610 .read_prop = amd_prop_read,
611 .xfer_msg = amd_sdw_xfer_msg,
612 .read_ping_status = amd_sdw_read_ping_status,
613 };
614
amd_sdw_hw_params(struct snd_pcm_substream * substream,struct snd_pcm_hw_params * params,struct snd_soc_dai * dai)615 static int amd_sdw_hw_params(struct snd_pcm_substream *substream,
616 struct snd_pcm_hw_params *params,
617 struct snd_soc_dai *dai)
618 {
619 struct amd_sdw_manager *amd_manager = snd_soc_dai_get_drvdata(dai);
620 struct sdw_amd_dai_runtime *dai_runtime;
621 struct sdw_stream_config sconfig;
622 struct sdw_port_config *pconfig;
623 int ch, dir;
624 int ret;
625
626 dai_runtime = amd_manager->dai_runtime_array[dai->id];
627 if (!dai_runtime)
628 return -EIO;
629
630 ch = params_channels(params);
631 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
632 dir = SDW_DATA_DIR_RX;
633 else
634 dir = SDW_DATA_DIR_TX;
635 dev_dbg(amd_manager->dev, "dir:%d dai->id:0x%x\n", dir, dai->id);
636
637 sconfig.direction = dir;
638 sconfig.ch_count = ch;
639 sconfig.frame_rate = params_rate(params);
640 sconfig.type = dai_runtime->stream_type;
641
642 sconfig.bps = snd_pcm_format_width(params_format(params));
643
644 /* Port configuration */
645 pconfig = kzalloc(sizeof(*pconfig), GFP_KERNEL);
646 if (!pconfig) {
647 ret = -ENOMEM;
648 goto error;
649 }
650
651 pconfig->num = dai->id;
652 pconfig->ch_mask = (1 << ch) - 1;
653 ret = sdw_stream_add_master(&amd_manager->bus, &sconfig,
654 pconfig, 1, dai_runtime->stream);
655 if (ret)
656 dev_err(amd_manager->dev, "add manager to stream failed:%d\n", ret);
657
658 kfree(pconfig);
659 error:
660 return ret;
661 }
662
amd_sdw_hw_free(struct snd_pcm_substream * substream,struct snd_soc_dai * dai)663 static int amd_sdw_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
664 {
665 struct amd_sdw_manager *amd_manager = snd_soc_dai_get_drvdata(dai);
666 struct sdw_amd_dai_runtime *dai_runtime;
667 int ret;
668
669 dai_runtime = amd_manager->dai_runtime_array[dai->id];
670 if (!dai_runtime)
671 return -EIO;
672
673 ret = sdw_stream_remove_master(&amd_manager->bus, dai_runtime->stream);
674 if (ret < 0)
675 dev_err(dai->dev, "remove manager from stream %s failed: %d\n",
676 dai_runtime->stream->name, ret);
677 return ret;
678 }
679
amd_set_sdw_stream(struct snd_soc_dai * dai,void * stream,int direction)680 static int amd_set_sdw_stream(struct snd_soc_dai *dai, void *stream, int direction)
681 {
682 struct amd_sdw_manager *amd_manager = snd_soc_dai_get_drvdata(dai);
683 struct sdw_amd_dai_runtime *dai_runtime;
684
685 dai_runtime = amd_manager->dai_runtime_array[dai->id];
686 if (stream) {
687 /* first paranoia check */
688 if (dai_runtime) {
689 dev_err(dai->dev, "dai_runtime already allocated for dai %s\n", dai->name);
690 return -EINVAL;
691 }
692
693 /* allocate and set dai_runtime info */
694 dai_runtime = kzalloc(sizeof(*dai_runtime), GFP_KERNEL);
695 if (!dai_runtime)
696 return -ENOMEM;
697
698 dai_runtime->stream_type = SDW_STREAM_PCM;
699 dai_runtime->bus = &amd_manager->bus;
700 dai_runtime->stream = stream;
701 amd_manager->dai_runtime_array[dai->id] = dai_runtime;
702 } else {
703 /* second paranoia check */
704 if (!dai_runtime) {
705 dev_err(dai->dev, "dai_runtime not allocated for dai %s\n", dai->name);
706 return -EINVAL;
707 }
708
709 /* for NULL stream we release allocated dai_runtime */
710 kfree(dai_runtime);
711 amd_manager->dai_runtime_array[dai->id] = NULL;
712 }
713 return 0;
714 }
715
amd_pcm_set_sdw_stream(struct snd_soc_dai * dai,void * stream,int direction)716 static int amd_pcm_set_sdw_stream(struct snd_soc_dai *dai, void *stream, int direction)
717 {
718 return amd_set_sdw_stream(dai, stream, direction);
719 }
720
amd_get_sdw_stream(struct snd_soc_dai * dai,int direction)721 static void *amd_get_sdw_stream(struct snd_soc_dai *dai, int direction)
722 {
723 struct amd_sdw_manager *amd_manager = snd_soc_dai_get_drvdata(dai);
724 struct sdw_amd_dai_runtime *dai_runtime;
725
726 dai_runtime = amd_manager->dai_runtime_array[dai->id];
727 if (!dai_runtime)
728 return ERR_PTR(-EINVAL);
729
730 return dai_runtime->stream;
731 }
732
733 static const struct snd_soc_dai_ops amd_sdw_dai_ops = {
734 .hw_params = amd_sdw_hw_params,
735 .hw_free = amd_sdw_hw_free,
736 .set_stream = amd_pcm_set_sdw_stream,
737 .get_stream = amd_get_sdw_stream,
738 };
739
740 static const struct snd_soc_component_driver amd_sdw_dai_component = {
741 .name = "soundwire",
742 };
743
amd_sdw_register_dais(struct amd_sdw_manager * amd_manager)744 static int amd_sdw_register_dais(struct amd_sdw_manager *amd_manager)
745 {
746 struct sdw_amd_dai_runtime **dai_runtime_array;
747 struct snd_soc_dai_driver *dais;
748 struct snd_soc_pcm_stream *stream;
749 struct device *dev;
750 int i, num_dais;
751
752 dev = amd_manager->dev;
753 num_dais = amd_manager->num_dout_ports + amd_manager->num_din_ports;
754 dais = devm_kcalloc(dev, num_dais, sizeof(*dais), GFP_KERNEL);
755 if (!dais)
756 return -ENOMEM;
757
758 dai_runtime_array = devm_kcalloc(dev, num_dais,
759 sizeof(struct sdw_amd_dai_runtime *),
760 GFP_KERNEL);
761 if (!dai_runtime_array)
762 return -ENOMEM;
763 amd_manager->dai_runtime_array = dai_runtime_array;
764 for (i = 0; i < num_dais; i++) {
765 dais[i].name = devm_kasprintf(dev, GFP_KERNEL, "SDW%d Pin%d", amd_manager->instance,
766 i);
767 if (!dais[i].name)
768 return -ENOMEM;
769 if (i < amd_manager->num_dout_ports)
770 stream = &dais[i].playback;
771 else
772 stream = &dais[i].capture;
773
774 stream->channels_min = 2;
775 stream->channels_max = 2;
776 stream->rates = SNDRV_PCM_RATE_48000;
777 stream->formats = SNDRV_PCM_FMTBIT_S16_LE;
778
779 dais[i].ops = &amd_sdw_dai_ops;
780 dais[i].id = i;
781 }
782
783 return devm_snd_soc_register_component(dev, &amd_sdw_dai_component,
784 dais, num_dais);
785 }
786
amd_sdw_update_slave_status_work(struct work_struct * work)787 static void amd_sdw_update_slave_status_work(struct work_struct *work)
788 {
789 struct amd_sdw_manager *amd_manager =
790 container_of(work, struct amd_sdw_manager, amd_sdw_work);
791 int retry_count = 0;
792
793 if (amd_manager->status[0] == SDW_SLAVE_ATTACHED) {
794 writel(0, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_0TO7);
795 writel(0, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
796 }
797
798 update_status:
799 sdw_handle_slave_status(&amd_manager->bus, amd_manager->status);
800 /*
801 * During the peripheral enumeration sequence, the SoundWire manager interrupts
802 * are masked. Once the device number programming is done for all peripherals,
803 * interrupts will be unmasked. Read the peripheral device status from ping command
804 * and process the response. This sequence will ensure all peripheral devices enumerated
805 * and initialized properly.
806 */
807 if (amd_manager->status[0] == SDW_SLAVE_ATTACHED) {
808 if (retry_count++ < SDW_MAX_DEVICES) {
809 writel(AMD_SDW_IRQ_MASK_0TO7, amd_manager->mmio +
810 ACP_SW_STATE_CHANGE_STATUS_MASK_0TO7);
811 writel(AMD_SDW_IRQ_MASK_8TO11, amd_manager->mmio +
812 ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
813 amd_sdw_read_and_process_ping_status(amd_manager);
814 goto update_status;
815 } else {
816 dev_err_ratelimited(amd_manager->dev,
817 "Device0 detected after %d iterations\n",
818 retry_count);
819 }
820 }
821 }
822
amd_sdw_update_slave_status(u32 status_change_0to7,u32 status_change_8to11,struct amd_sdw_manager * amd_manager)823 static void amd_sdw_update_slave_status(u32 status_change_0to7, u32 status_change_8to11,
824 struct amd_sdw_manager *amd_manager)
825 {
826 u64 slave_stat;
827 u32 val;
828 int dev_index;
829
830 if (status_change_0to7 == AMD_SDW_SLAVE_0_ATTACHED)
831 memset(amd_manager->status, 0, sizeof(amd_manager->status));
832 slave_stat = status_change_0to7;
833 slave_stat |= FIELD_GET(AMD_SDW_MCP_SLAVE_STATUS_8TO_11, status_change_8to11) << 32;
834 dev_dbg(amd_manager->dev, "status_change_0to7:0x%x status_change_8to11:0x%x\n",
835 status_change_0to7, status_change_8to11);
836 if (slave_stat) {
837 for (dev_index = 0; dev_index <= SDW_MAX_DEVICES; ++dev_index) {
838 if (slave_stat & AMD_SDW_MCP_SLAVE_STATUS_VALID_MASK(dev_index)) {
839 val = (slave_stat >> AMD_SDW_MCP_SLAVE_STAT_SHIFT_MASK(dev_index)) &
840 AMD_SDW_MCP_SLAVE_STATUS_MASK;
841 amd_sdw_fill_slave_status(amd_manager, dev_index, val);
842 }
843 }
844 }
845 }
846
amd_sdw_process_wake_event(struct amd_sdw_manager * amd_manager)847 static void amd_sdw_process_wake_event(struct amd_sdw_manager *amd_manager)
848 {
849 pm_request_resume(amd_manager->dev);
850 writel(0x00, amd_manager->acp_mmio + ACP_SW_WAKE_EN(amd_manager->instance));
851 writel(0x00, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_8TO11);
852 }
853
amd_sdw_irq_thread(struct work_struct * work)854 static void amd_sdw_irq_thread(struct work_struct *work)
855 {
856 struct amd_sdw_manager *amd_manager =
857 container_of(work, struct amd_sdw_manager, amd_sdw_irq_thread);
858 u32 status_change_8to11;
859 u32 status_change_0to7;
860
861 status_change_8to11 = readl(amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_8TO11);
862 status_change_0to7 = readl(amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_0TO7);
863 dev_dbg(amd_manager->dev, "[SDW%d] SDW INT: 0to7=0x%x, 8to11=0x%x\n",
864 amd_manager->instance, status_change_0to7, status_change_8to11);
865 if (status_change_8to11 & AMD_SDW_WAKE_STAT_MASK)
866 return amd_sdw_process_wake_event(amd_manager);
867
868 if (status_change_8to11 & AMD_SDW_PREQ_INTR_STAT) {
869 amd_sdw_read_and_process_ping_status(amd_manager);
870 } else {
871 /* Check for the updated status on peripheral device */
872 amd_sdw_update_slave_status(status_change_0to7, status_change_8to11, amd_manager);
873 }
874 if (status_change_8to11 || status_change_0to7)
875 schedule_work(&amd_manager->amd_sdw_work);
876 writel(0x00, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_8TO11);
877 writel(0x00, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_0TO7);
878 }
879
amd_sdw_probe_work(struct work_struct * work)880 static void amd_sdw_probe_work(struct work_struct *work)
881 {
882 struct amd_sdw_manager *amd_manager = container_of(work, struct amd_sdw_manager,
883 probe_work);
884 struct sdw_master_prop *prop;
885 int ret;
886
887 prop = &amd_manager->bus.prop;
888 if (!prop->hw_disabled) {
889 amd_enable_sdw_pads(amd_manager);
890 ret = amd_init_sdw_manager(amd_manager);
891 if (ret)
892 return;
893 amd_enable_sdw_interrupts(amd_manager);
894 ret = amd_enable_sdw_manager(amd_manager);
895 if (ret)
896 return;
897 amd_sdw_set_frameshape(amd_manager);
898 }
899 /* Enable runtime PM */
900 pm_runtime_set_autosuspend_delay(amd_manager->dev, AMD_SDW_MASTER_SUSPEND_DELAY_MS);
901 pm_runtime_use_autosuspend(amd_manager->dev);
902 pm_runtime_mark_last_busy(amd_manager->dev);
903 pm_runtime_set_active(amd_manager->dev);
904 pm_runtime_enable(amd_manager->dev);
905 }
906
amd_sdw_manager_probe(struct platform_device * pdev)907 static int amd_sdw_manager_probe(struct platform_device *pdev)
908 {
909 const struct acp_sdw_pdata *pdata = pdev->dev.platform_data;
910 struct resource *res;
911 struct device *dev = &pdev->dev;
912 struct sdw_master_prop *prop;
913 struct sdw_bus_params *params;
914 struct amd_sdw_manager *amd_manager;
915 int ret;
916
917 amd_manager = devm_kzalloc(dev, sizeof(struct amd_sdw_manager), GFP_KERNEL);
918 if (!amd_manager)
919 return -ENOMEM;
920
921 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
922 if (!res)
923 return -ENOMEM;
924
925 amd_manager->acp_mmio = devm_ioremap(dev, res->start, resource_size(res));
926 if (!amd_manager->acp_mmio) {
927 dev_err(dev, "mmio not found\n");
928 return -ENOMEM;
929 }
930 amd_manager->instance = pdata->instance;
931 amd_manager->mmio = amd_manager->acp_mmio +
932 (amd_manager->instance * SDW_MANAGER_REG_OFFSET);
933 amd_manager->acp_sdw_lock = pdata->acp_sdw_lock;
934 amd_manager->cols_index = sdw_find_col_index(AMD_SDW_DEFAULT_COLUMNS);
935 amd_manager->rows_index = sdw_find_row_index(AMD_SDW_DEFAULT_ROWS);
936 amd_manager->dev = dev;
937 amd_manager->bus.ops = &amd_sdw_ops;
938 amd_manager->bus.port_ops = &amd_sdw_port_ops;
939 amd_manager->bus.compute_params = &amd_sdw_compute_params;
940 amd_manager->bus.clk_stop_timeout = 200;
941 amd_manager->bus.link_id = amd_manager->instance;
942
943 /*
944 * Due to BIOS compatibility, the two links are exposed within
945 * the scope of a single controller. If this changes, the
946 * controller_id will have to be updated with drv_data
947 * information.
948 */
949 amd_manager->bus.controller_id = 0;
950
951 switch (amd_manager->instance) {
952 case ACP_SDW0:
953 amd_manager->num_dout_ports = AMD_SDW0_MAX_TX_PORTS;
954 amd_manager->num_din_ports = AMD_SDW0_MAX_RX_PORTS;
955 break;
956 case ACP_SDW1:
957 amd_manager->num_dout_ports = AMD_SDW1_MAX_TX_PORTS;
958 amd_manager->num_din_ports = AMD_SDW1_MAX_RX_PORTS;
959 break;
960 default:
961 return -EINVAL;
962 }
963
964 amd_manager->reg_mask = &sdw_manager_reg_mask_array[amd_manager->instance];
965 params = &amd_manager->bus.params;
966 params->max_dr_freq = AMD_SDW_DEFAULT_CLK_FREQ * 2;
967 params->curr_dr_freq = AMD_SDW_DEFAULT_CLK_FREQ * 2;
968 params->col = AMD_SDW_DEFAULT_COLUMNS;
969 params->row = AMD_SDW_DEFAULT_ROWS;
970 prop = &amd_manager->bus.prop;
971 prop->clk_freq = &amd_sdw_freq_tbl[0];
972 prop->mclk_freq = AMD_SDW_BUS_BASE_FREQ;
973
974 ret = sdw_bus_master_add(&amd_manager->bus, dev, dev->fwnode);
975 if (ret) {
976 dev_err(dev, "Failed to register SoundWire manager(%d)\n", ret);
977 return ret;
978 }
979 ret = amd_sdw_register_dais(amd_manager);
980 if (ret) {
981 dev_err(dev, "CPU DAI registration failed\n");
982 sdw_bus_master_delete(&amd_manager->bus);
983 return ret;
984 }
985 dev_set_drvdata(dev, amd_manager);
986 INIT_WORK(&amd_manager->amd_sdw_irq_thread, amd_sdw_irq_thread);
987 INIT_WORK(&amd_manager->amd_sdw_work, amd_sdw_update_slave_status_work);
988 INIT_WORK(&amd_manager->probe_work, amd_sdw_probe_work);
989 /*
990 * Instead of having lengthy probe sequence, use deferred probe.
991 */
992 schedule_work(&amd_manager->probe_work);
993 return 0;
994 }
995
amd_sdw_manager_remove(struct platform_device * pdev)996 static void amd_sdw_manager_remove(struct platform_device *pdev)
997 {
998 struct amd_sdw_manager *amd_manager = dev_get_drvdata(&pdev->dev);
999 int ret;
1000
1001 pm_runtime_disable(&pdev->dev);
1002 cancel_work_sync(&amd_manager->probe_work);
1003 amd_disable_sdw_interrupts(amd_manager);
1004 sdw_bus_master_delete(&amd_manager->bus);
1005 ret = amd_disable_sdw_manager(amd_manager);
1006 if (ret)
1007 dev_err(&pdev->dev, "Failed to disable device (%pe)\n", ERR_PTR(ret));
1008 }
1009
amd_sdw_clock_stop(struct amd_sdw_manager * amd_manager)1010 static int amd_sdw_clock_stop(struct amd_sdw_manager *amd_manager)
1011 {
1012 u32 val;
1013 int ret;
1014
1015 ret = sdw_bus_prep_clk_stop(&amd_manager->bus);
1016 if (ret < 0 && ret != -ENODATA) {
1017 dev_err(amd_manager->dev, "prepare clock stop failed %d", ret);
1018 return 0;
1019 }
1020 ret = sdw_bus_clk_stop(&amd_manager->bus);
1021 if (ret < 0 && ret != -ENODATA) {
1022 dev_err(amd_manager->dev, "bus clock stop failed %d", ret);
1023 return 0;
1024 }
1025
1026 ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_CLK_RESUME_CTRL, val,
1027 (val & AMD_SDW_CLK_STOP_DONE), ACP_DELAY_US, AMD_SDW_TIMEOUT);
1028 if (ret) {
1029 dev_err(amd_manager->dev, "SDW%x clock stop failed\n", amd_manager->instance);
1030 return 0;
1031 }
1032
1033 amd_manager->clk_stopped = true;
1034 if (amd_manager->wake_en_mask)
1035 writel(0x01, amd_manager->acp_mmio + ACP_SW_WAKE_EN(amd_manager->instance));
1036
1037 dev_dbg(amd_manager->dev, "SDW%x clock stop successful\n", amd_manager->instance);
1038 return 0;
1039 }
1040
amd_sdw_clock_stop_exit(struct amd_sdw_manager * amd_manager)1041 static int amd_sdw_clock_stop_exit(struct amd_sdw_manager *amd_manager)
1042 {
1043 int ret;
1044 u32 val;
1045
1046 if (amd_manager->clk_stopped) {
1047 val = readl(amd_manager->mmio + ACP_SW_CLK_RESUME_CTRL);
1048 val |= AMD_SDW_CLK_RESUME_REQ;
1049 writel(val, amd_manager->mmio + ACP_SW_CLK_RESUME_CTRL);
1050 ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_CLK_RESUME_CTRL, val,
1051 (val & AMD_SDW_CLK_RESUME_DONE), ACP_DELAY_US,
1052 AMD_SDW_TIMEOUT);
1053 if (val & AMD_SDW_CLK_RESUME_DONE) {
1054 writel(0, amd_manager->mmio + ACP_SW_CLK_RESUME_CTRL);
1055 ret = sdw_bus_exit_clk_stop(&amd_manager->bus);
1056 if (ret < 0)
1057 dev_err(amd_manager->dev, "bus failed to exit clock stop %d\n",
1058 ret);
1059 amd_manager->clk_stopped = false;
1060 }
1061 }
1062 if (amd_manager->clk_stopped) {
1063 dev_err(amd_manager->dev, "SDW%x clock stop exit failed\n", amd_manager->instance);
1064 return 0;
1065 }
1066 dev_dbg(amd_manager->dev, "SDW%x clock stop exit successful\n", amd_manager->instance);
1067 return 0;
1068 }
1069
amd_resume_child_device(struct device * dev,void * data)1070 static int amd_resume_child_device(struct device *dev, void *data)
1071 {
1072 struct sdw_slave *slave = dev_to_sdw_dev(dev);
1073 int ret;
1074
1075 if (!slave->probed) {
1076 dev_dbg(dev, "skipping device, no probed driver\n");
1077 return 0;
1078 }
1079 if (!slave->dev_num_sticky) {
1080 dev_dbg(dev, "skipping device, never detected on bus\n");
1081 return 0;
1082 }
1083 ret = pm_request_resume(dev);
1084 if (ret < 0) {
1085 dev_err(dev, "pm_request_resume failed: %d\n", ret);
1086 return ret;
1087 }
1088 return 0;
1089 }
1090
amd_pm_prepare(struct device * dev)1091 static int __maybe_unused amd_pm_prepare(struct device *dev)
1092 {
1093 struct amd_sdw_manager *amd_manager = dev_get_drvdata(dev);
1094 struct sdw_bus *bus = &amd_manager->bus;
1095 int ret;
1096
1097 if (bus->prop.hw_disabled) {
1098 dev_dbg(bus->dev, "SoundWire manager %d is disabled, ignoring\n",
1099 bus->link_id);
1100 return 0;
1101 }
1102 /*
1103 * When multiple peripheral devices connected over the same link, if SoundWire manager
1104 * device is not in runtime suspend state, observed that device alerts are missing
1105 * without pm_prepare on AMD platforms in clockstop mode0.
1106 */
1107 if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) {
1108 ret = pm_request_resume(dev);
1109 if (ret < 0) {
1110 dev_err(bus->dev, "pm_request_resume failed: %d\n", ret);
1111 return 0;
1112 }
1113 }
1114 /* To force peripheral devices to system level suspend state, resume the devices
1115 * from runtime suspend state first. Without that unable to dispatch the alert
1116 * status to peripheral driver during system level resume as they are in runtime
1117 * suspend state.
1118 */
1119 ret = device_for_each_child(bus->dev, NULL, amd_resume_child_device);
1120 if (ret < 0)
1121 dev_err(dev, "amd_resume_child_device failed: %d\n", ret);
1122 return 0;
1123 }
1124
amd_suspend(struct device * dev)1125 static int __maybe_unused amd_suspend(struct device *dev)
1126 {
1127 struct amd_sdw_manager *amd_manager = dev_get_drvdata(dev);
1128 struct sdw_bus *bus = &amd_manager->bus;
1129 int ret;
1130
1131 if (bus->prop.hw_disabled) {
1132 dev_dbg(bus->dev, "SoundWire manager %d is disabled, ignoring\n",
1133 bus->link_id);
1134 return 0;
1135 }
1136
1137 if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) {
1138 amd_sdw_wake_enable(amd_manager, false);
1139 return amd_sdw_clock_stop(amd_manager);
1140 } else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) {
1141 /*
1142 * As per hardware programming sequence on AMD platforms,
1143 * clock stop should be invoked first before powering-off
1144 */
1145 ret = amd_sdw_clock_stop(amd_manager);
1146 if (ret)
1147 return ret;
1148 return amd_deinit_sdw_manager(amd_manager);
1149 }
1150 return 0;
1151 }
1152
amd_suspend_runtime(struct device * dev)1153 static int __maybe_unused amd_suspend_runtime(struct device *dev)
1154 {
1155 struct amd_sdw_manager *amd_manager = dev_get_drvdata(dev);
1156 struct sdw_bus *bus = &amd_manager->bus;
1157 int ret;
1158
1159 if (bus->prop.hw_disabled) {
1160 dev_dbg(bus->dev, "SoundWire manager %d is disabled,\n",
1161 bus->link_id);
1162 return 0;
1163 }
1164 if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) {
1165 amd_sdw_wake_enable(amd_manager, true);
1166 return amd_sdw_clock_stop(amd_manager);
1167 } else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) {
1168 ret = amd_sdw_clock_stop(amd_manager);
1169 if (ret)
1170 return ret;
1171 return amd_deinit_sdw_manager(amd_manager);
1172 }
1173 return 0;
1174 }
1175
amd_resume_runtime(struct device * dev)1176 static int __maybe_unused amd_resume_runtime(struct device *dev)
1177 {
1178 struct amd_sdw_manager *amd_manager = dev_get_drvdata(dev);
1179 struct sdw_bus *bus = &amd_manager->bus;
1180 int ret;
1181 u32 val;
1182
1183 if (bus->prop.hw_disabled) {
1184 dev_dbg(bus->dev, "SoundWire manager %d is disabled, ignoring\n",
1185 bus->link_id);
1186 return 0;
1187 }
1188
1189 if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) {
1190 return amd_sdw_clock_stop_exit(amd_manager);
1191 } else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) {
1192 val = readl(amd_manager->mmio + ACP_SW_CLK_RESUME_CTRL);
1193 if (val) {
1194 val |= AMD_SDW_CLK_RESUME_REQ;
1195 writel(val, amd_manager->mmio + ACP_SW_CLK_RESUME_CTRL);
1196 ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_CLK_RESUME_CTRL, val,
1197 (val & AMD_SDW_CLK_RESUME_DONE), ACP_DELAY_US,
1198 AMD_SDW_TIMEOUT);
1199 if (val & AMD_SDW_CLK_RESUME_DONE) {
1200 writel(0, amd_manager->mmio + ACP_SW_CLK_RESUME_CTRL);
1201 amd_manager->clk_stopped = false;
1202 }
1203 }
1204 sdw_clear_slave_status(bus, SDW_UNATTACH_REQUEST_MASTER_RESET);
1205 amd_init_sdw_manager(amd_manager);
1206 amd_enable_sdw_interrupts(amd_manager);
1207 ret = amd_enable_sdw_manager(amd_manager);
1208 if (ret)
1209 return ret;
1210 amd_sdw_set_frameshape(amd_manager);
1211 }
1212 return 0;
1213 }
1214
1215 static const struct dev_pm_ops amd_pm = {
1216 .prepare = amd_pm_prepare,
1217 SET_SYSTEM_SLEEP_PM_OPS(amd_suspend, amd_resume_runtime)
1218 SET_RUNTIME_PM_OPS(amd_suspend_runtime, amd_resume_runtime, NULL)
1219 };
1220
1221 static struct platform_driver amd_sdw_driver = {
1222 .probe = &amd_sdw_manager_probe,
1223 .remove_new = &amd_sdw_manager_remove,
1224 .driver = {
1225 .name = "amd_sdw_manager",
1226 .pm = &amd_pm,
1227 }
1228 };
1229 module_platform_driver(amd_sdw_driver);
1230
1231 MODULE_AUTHOR("Vijendar.Mukunda@amd.com");
1232 MODULE_DESCRIPTION("AMD SoundWire driver");
1233 MODULE_LICENSE("GPL");
1234 MODULE_ALIAS("platform:" DRV_NAME);
1235