1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Qualcomm Wireless Connectivity Subsystem Peripheral Image Loader
4 *
5 * Copyright (C) 2016 Linaro Ltd
6 * Copyright (C) 2014 Sony Mobile Communications AB
7 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
8 */
9
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/firmware.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/io.h>
17 #include <linux/of.h>
18 #include <linux/of_reserved_mem.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_domain.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/firmware/qcom/qcom_scm.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/remoteproc.h>
25 #include <linux/soc/qcom/mdt_loader.h>
26 #include <linux/soc/qcom/smem.h>
27 #include <linux/soc/qcom/smem_state.h>
28
29 #include "qcom_common.h"
30 #include "remoteproc_internal.h"
31 #include "qcom_pil_info.h"
32 #include "qcom_wcnss.h"
33
34 #define WCNSS_CRASH_REASON_SMEM 422
35 #define WCNSS_FIRMWARE_NAME "wcnss.mdt"
36 #define WCNSS_PAS_ID 6
37 #define WCNSS_SSCTL_ID 0x13
38
39 #define WCNSS_SPARE_NVBIN_DLND BIT(25)
40
41 #define WCNSS_PMU_IRIS_XO_CFG BIT(3)
42 #define WCNSS_PMU_IRIS_XO_EN BIT(4)
43 #define WCNSS_PMU_GC_BUS_MUX_SEL_TOP BIT(5)
44 #define WCNSS_PMU_IRIS_XO_CFG_STS BIT(6) /* 1: in progress, 0: done */
45
46 #define WCNSS_PMU_IRIS_RESET BIT(7)
47 #define WCNSS_PMU_IRIS_RESET_STS BIT(8) /* 1: in progress, 0: done */
48 #define WCNSS_PMU_IRIS_XO_READ BIT(9)
49 #define WCNSS_PMU_IRIS_XO_READ_STS BIT(10)
50
51 #define WCNSS_PMU_XO_MODE_MASK GENMASK(2, 1)
52 #define WCNSS_PMU_XO_MODE_19p2 0
53 #define WCNSS_PMU_XO_MODE_48 3
54
55 #define WCNSS_MAX_PDS 2
56
57 struct wcnss_data {
58 size_t pmu_offset;
59 size_t spare_offset;
60
61 const char *pd_names[WCNSS_MAX_PDS];
62 const struct wcnss_vreg_info *vregs;
63 size_t num_vregs, num_pd_vregs;
64 };
65
66 struct qcom_wcnss {
67 struct device *dev;
68 struct rproc *rproc;
69
70 void __iomem *pmu_cfg;
71 void __iomem *spare_out;
72
73 bool use_48mhz_xo;
74
75 int wdog_irq;
76 int fatal_irq;
77 int ready_irq;
78 int handover_irq;
79 int stop_ack_irq;
80
81 struct qcom_smem_state *state;
82 unsigned stop_bit;
83
84 struct mutex iris_lock;
85 struct qcom_iris *iris;
86
87 struct device *pds[WCNSS_MAX_PDS];
88 size_t num_pds;
89 struct regulator_bulk_data *vregs;
90 size_t num_vregs;
91
92 struct completion start_done;
93 struct completion stop_done;
94
95 phys_addr_t mem_phys;
96 phys_addr_t mem_reloc;
97 void *mem_region;
98 size_t mem_size;
99
100 struct qcom_rproc_subdev smd_subdev;
101 struct qcom_sysmon *sysmon;
102 };
103
104 static const struct wcnss_data riva_data = {
105 .pmu_offset = 0x28,
106 .spare_offset = 0xb4,
107
108 .vregs = (struct wcnss_vreg_info[]) {
109 { "vddmx", 1050000, 1150000, 0 },
110 { "vddcx", 1050000, 1150000, 0 },
111 { "vddpx", 1800000, 1800000, 0 },
112 },
113 .num_vregs = 3,
114 };
115
116 static const struct wcnss_data pronto_v1_data = {
117 .pmu_offset = 0x1004,
118 .spare_offset = 0x1088,
119
120 .pd_names = { "mx", "cx" },
121 .vregs = (struct wcnss_vreg_info[]) {
122 { "vddmx", 950000, 1150000, 0 },
123 { "vddcx", .super_turbo = true},
124 { "vddpx", 1800000, 1800000, 0 },
125 },
126 .num_pd_vregs = 2,
127 .num_vregs = 1,
128 };
129
130 static const struct wcnss_data pronto_v2_data = {
131 .pmu_offset = 0x1004,
132 .spare_offset = 0x1088,
133
134 .pd_names = { "mx", "cx" },
135 .vregs = (struct wcnss_vreg_info[]) {
136 { "vddmx", 1287500, 1287500, 0 },
137 { "vddcx", .super_turbo = true },
138 { "vddpx", 1800000, 1800000, 0 },
139 },
140 .num_pd_vregs = 2,
141 .num_vregs = 1,
142 };
143
144 static const struct wcnss_data pronto_v3_data = {
145 .pmu_offset = 0x1004,
146 .spare_offset = 0x1088,
147
148 .pd_names = { "mx", "cx" },
149 .vregs = (struct wcnss_vreg_info[]) {
150 { "vddpx", 1800000, 1800000, 0 },
151 },
152 .num_vregs = 1,
153 };
154
wcnss_load(struct rproc * rproc,const struct firmware * fw)155 static int wcnss_load(struct rproc *rproc, const struct firmware *fw)
156 {
157 struct qcom_wcnss *wcnss = rproc->priv;
158 int ret;
159
160 ret = qcom_mdt_load(wcnss->dev, fw, rproc->firmware, WCNSS_PAS_ID,
161 wcnss->mem_region, wcnss->mem_phys,
162 wcnss->mem_size, &wcnss->mem_reloc);
163 if (ret)
164 return ret;
165
166 qcom_pil_info_store("wcnss", wcnss->mem_phys, wcnss->mem_size);
167
168 return 0;
169 }
170
wcnss_indicate_nv_download(struct qcom_wcnss * wcnss)171 static void wcnss_indicate_nv_download(struct qcom_wcnss *wcnss)
172 {
173 u32 val;
174
175 /* Indicate NV download capability */
176 val = readl(wcnss->spare_out);
177 val |= WCNSS_SPARE_NVBIN_DLND;
178 writel(val, wcnss->spare_out);
179 }
180
wcnss_configure_iris(struct qcom_wcnss * wcnss)181 static void wcnss_configure_iris(struct qcom_wcnss *wcnss)
182 {
183 u32 val;
184
185 /* Clear PMU cfg register */
186 writel(0, wcnss->pmu_cfg);
187
188 val = WCNSS_PMU_GC_BUS_MUX_SEL_TOP | WCNSS_PMU_IRIS_XO_EN;
189 writel(val, wcnss->pmu_cfg);
190
191 /* Clear XO_MODE */
192 val &= ~WCNSS_PMU_XO_MODE_MASK;
193 if (wcnss->use_48mhz_xo)
194 val |= WCNSS_PMU_XO_MODE_48 << 1;
195 else
196 val |= WCNSS_PMU_XO_MODE_19p2 << 1;
197 writel(val, wcnss->pmu_cfg);
198
199 /* Reset IRIS */
200 val |= WCNSS_PMU_IRIS_RESET;
201 writel(val, wcnss->pmu_cfg);
202
203 /* Wait for PMU.iris_reg_reset_sts */
204 while (readl(wcnss->pmu_cfg) & WCNSS_PMU_IRIS_RESET_STS)
205 cpu_relax();
206
207 /* Clear IRIS reset */
208 val &= ~WCNSS_PMU_IRIS_RESET;
209 writel(val, wcnss->pmu_cfg);
210
211 /* Start IRIS XO configuration */
212 val |= WCNSS_PMU_IRIS_XO_CFG;
213 writel(val, wcnss->pmu_cfg);
214
215 /* Wait for XO configuration to finish */
216 while (readl(wcnss->pmu_cfg) & WCNSS_PMU_IRIS_XO_CFG_STS)
217 cpu_relax();
218
219 /* Stop IRIS XO configuration */
220 val &= ~WCNSS_PMU_GC_BUS_MUX_SEL_TOP;
221 val &= ~WCNSS_PMU_IRIS_XO_CFG;
222 writel(val, wcnss->pmu_cfg);
223
224 /* Add some delay for XO to settle */
225 msleep(20);
226 }
227
wcnss_start(struct rproc * rproc)228 static int wcnss_start(struct rproc *rproc)
229 {
230 struct qcom_wcnss *wcnss = rproc->priv;
231 int ret, i;
232
233 mutex_lock(&wcnss->iris_lock);
234 if (!wcnss->iris) {
235 dev_err(wcnss->dev, "no iris registered\n");
236 ret = -EINVAL;
237 goto release_iris_lock;
238 }
239
240 for (i = 0; i < wcnss->num_pds; i++) {
241 dev_pm_genpd_set_performance_state(wcnss->pds[i], INT_MAX);
242 ret = pm_runtime_get_sync(wcnss->pds[i]);
243 if (ret < 0) {
244 pm_runtime_put_noidle(wcnss->pds[i]);
245 goto disable_pds;
246 }
247 }
248
249 ret = regulator_bulk_enable(wcnss->num_vregs, wcnss->vregs);
250 if (ret)
251 goto disable_pds;
252
253 ret = qcom_iris_enable(wcnss->iris);
254 if (ret)
255 goto disable_regulators;
256
257 wcnss_indicate_nv_download(wcnss);
258 wcnss_configure_iris(wcnss);
259
260 ret = qcom_scm_pas_auth_and_reset(WCNSS_PAS_ID);
261 if (ret) {
262 dev_err(wcnss->dev,
263 "failed to authenticate image and release reset\n");
264 goto disable_iris;
265 }
266
267 ret = wait_for_completion_timeout(&wcnss->start_done,
268 msecs_to_jiffies(5000));
269 if (wcnss->ready_irq > 0 && ret == 0) {
270 /* We have a ready_irq, but it didn't fire in time. */
271 dev_err(wcnss->dev, "start timed out\n");
272 qcom_scm_pas_shutdown(WCNSS_PAS_ID);
273 ret = -ETIMEDOUT;
274 goto disable_iris;
275 }
276
277 ret = 0;
278
279 disable_iris:
280 qcom_iris_disable(wcnss->iris);
281 disable_regulators:
282 regulator_bulk_disable(wcnss->num_vregs, wcnss->vregs);
283 disable_pds:
284 for (i--; i >= 0; i--) {
285 pm_runtime_put(wcnss->pds[i]);
286 dev_pm_genpd_set_performance_state(wcnss->pds[i], 0);
287 }
288 release_iris_lock:
289 mutex_unlock(&wcnss->iris_lock);
290
291 return ret;
292 }
293
wcnss_stop(struct rproc * rproc)294 static int wcnss_stop(struct rproc *rproc)
295 {
296 struct qcom_wcnss *wcnss = rproc->priv;
297 int ret;
298
299 if (wcnss->state) {
300 qcom_smem_state_update_bits(wcnss->state,
301 BIT(wcnss->stop_bit),
302 BIT(wcnss->stop_bit));
303
304 ret = wait_for_completion_timeout(&wcnss->stop_done,
305 msecs_to_jiffies(5000));
306 if (ret == 0)
307 dev_err(wcnss->dev, "timed out on wait\n");
308
309 qcom_smem_state_update_bits(wcnss->state,
310 BIT(wcnss->stop_bit),
311 0);
312 }
313
314 ret = qcom_scm_pas_shutdown(WCNSS_PAS_ID);
315 if (ret)
316 dev_err(wcnss->dev, "failed to shutdown: %d\n", ret);
317
318 return ret;
319 }
320
wcnss_da_to_va(struct rproc * rproc,u64 da,size_t len,bool * is_iomem)321 static void *wcnss_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
322 {
323 struct qcom_wcnss *wcnss = rproc->priv;
324 int offset;
325
326 offset = da - wcnss->mem_reloc;
327 if (offset < 0 || offset + len > wcnss->mem_size)
328 return NULL;
329
330 return wcnss->mem_region + offset;
331 }
332
333 static const struct rproc_ops wcnss_ops = {
334 .start = wcnss_start,
335 .stop = wcnss_stop,
336 .da_to_va = wcnss_da_to_va,
337 .parse_fw = qcom_register_dump_segments,
338 .load = wcnss_load,
339 };
340
wcnss_wdog_interrupt(int irq,void * dev)341 static irqreturn_t wcnss_wdog_interrupt(int irq, void *dev)
342 {
343 struct qcom_wcnss *wcnss = dev;
344
345 rproc_report_crash(wcnss->rproc, RPROC_WATCHDOG);
346
347 return IRQ_HANDLED;
348 }
349
wcnss_fatal_interrupt(int irq,void * dev)350 static irqreturn_t wcnss_fatal_interrupt(int irq, void *dev)
351 {
352 struct qcom_wcnss *wcnss = dev;
353 size_t len;
354 char *msg;
355
356 msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, WCNSS_CRASH_REASON_SMEM, &len);
357 if (!IS_ERR(msg) && len > 0 && msg[0])
358 dev_err(wcnss->dev, "fatal error received: %s\n", msg);
359
360 rproc_report_crash(wcnss->rproc, RPROC_FATAL_ERROR);
361
362 return IRQ_HANDLED;
363 }
364
wcnss_ready_interrupt(int irq,void * dev)365 static irqreturn_t wcnss_ready_interrupt(int irq, void *dev)
366 {
367 struct qcom_wcnss *wcnss = dev;
368
369 complete(&wcnss->start_done);
370
371 return IRQ_HANDLED;
372 }
373
wcnss_handover_interrupt(int irq,void * dev)374 static irqreturn_t wcnss_handover_interrupt(int irq, void *dev)
375 {
376 /*
377 * XXX: At this point we're supposed to release the resources that we
378 * have been holding on behalf of the WCNSS. Unfortunately this
379 * interrupt comes way before the other side seems to be done.
380 *
381 * So we're currently relying on the ready interrupt firing later then
382 * this and we just disable the resources at the end of wcnss_start().
383 */
384
385 return IRQ_HANDLED;
386 }
387
wcnss_stop_ack_interrupt(int irq,void * dev)388 static irqreturn_t wcnss_stop_ack_interrupt(int irq, void *dev)
389 {
390 struct qcom_wcnss *wcnss = dev;
391
392 complete(&wcnss->stop_done);
393
394 return IRQ_HANDLED;
395 }
396
wcnss_init_pds(struct qcom_wcnss * wcnss,const char * const pd_names[WCNSS_MAX_PDS])397 static int wcnss_init_pds(struct qcom_wcnss *wcnss,
398 const char * const pd_names[WCNSS_MAX_PDS])
399 {
400 int i, ret;
401
402 for (i = 0; i < WCNSS_MAX_PDS; i++) {
403 if (!pd_names[i])
404 break;
405
406 wcnss->pds[i] = dev_pm_domain_attach_by_name(wcnss->dev, pd_names[i]);
407 if (IS_ERR_OR_NULL(wcnss->pds[i])) {
408 ret = PTR_ERR(wcnss->pds[i]) ? : -ENODATA;
409 for (i--; i >= 0; i--)
410 dev_pm_domain_detach(wcnss->pds[i], false);
411 return ret;
412 }
413 }
414 wcnss->num_pds = i;
415
416 return 0;
417 }
418
wcnss_release_pds(struct qcom_wcnss * wcnss)419 static void wcnss_release_pds(struct qcom_wcnss *wcnss)
420 {
421 int i;
422
423 for (i = 0; i < wcnss->num_pds; i++)
424 dev_pm_domain_detach(wcnss->pds[i], false);
425 }
426
wcnss_init_regulators(struct qcom_wcnss * wcnss,const struct wcnss_vreg_info * info,int num_vregs,int num_pd_vregs)427 static int wcnss_init_regulators(struct qcom_wcnss *wcnss,
428 const struct wcnss_vreg_info *info,
429 int num_vregs, int num_pd_vregs)
430 {
431 struct regulator_bulk_data *bulk;
432 int ret;
433 int i;
434
435 /*
436 * If attaching the power domains suceeded we can skip requesting
437 * the regulators for the power domains. For old device trees we need to
438 * reserve extra space to manage them through the regulator interface.
439 */
440 if (wcnss->num_pds)
441 info += num_pd_vregs;
442 else
443 num_vregs += num_pd_vregs;
444
445 bulk = devm_kcalloc(wcnss->dev,
446 num_vregs, sizeof(struct regulator_bulk_data),
447 GFP_KERNEL);
448 if (!bulk)
449 return -ENOMEM;
450
451 for (i = 0; i < num_vregs; i++)
452 bulk[i].supply = info[i].name;
453
454 ret = devm_regulator_bulk_get(wcnss->dev, num_vregs, bulk);
455 if (ret)
456 return ret;
457
458 for (i = 0; i < num_vregs; i++) {
459 if (info[i].max_voltage)
460 regulator_set_voltage(bulk[i].consumer,
461 info[i].min_voltage,
462 info[i].max_voltage);
463
464 if (info[i].load_uA)
465 regulator_set_load(bulk[i].consumer, info[i].load_uA);
466 }
467
468 wcnss->vregs = bulk;
469 wcnss->num_vregs = num_vregs;
470
471 return 0;
472 }
473
wcnss_request_irq(struct qcom_wcnss * wcnss,struct platform_device * pdev,const char * name,bool optional,irq_handler_t thread_fn)474 static int wcnss_request_irq(struct qcom_wcnss *wcnss,
475 struct platform_device *pdev,
476 const char *name,
477 bool optional,
478 irq_handler_t thread_fn)
479 {
480 int ret;
481 int irq_number;
482
483 ret = platform_get_irq_byname(pdev, name);
484 if (ret < 0 && optional) {
485 dev_dbg(&pdev->dev, "no %s IRQ defined, ignoring\n", name);
486 return 0;
487 } else if (ret < 0) {
488 dev_err(&pdev->dev, "no %s IRQ defined\n", name);
489 return ret;
490 }
491
492 irq_number = ret;
493
494 ret = devm_request_threaded_irq(&pdev->dev, ret,
495 NULL, thread_fn,
496 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
497 "wcnss", wcnss);
498 if (ret) {
499 dev_err(&pdev->dev, "request %s IRQ failed\n", name);
500 return ret;
501 }
502
503 /* Return the IRQ number if the IRQ was successfully acquired */
504 return irq_number;
505 }
506
wcnss_alloc_memory_region(struct qcom_wcnss * wcnss)507 static int wcnss_alloc_memory_region(struct qcom_wcnss *wcnss)
508 {
509 struct reserved_mem *rmem = NULL;
510 struct device_node *node;
511
512 node = of_parse_phandle(wcnss->dev->of_node, "memory-region", 0);
513 if (node)
514 rmem = of_reserved_mem_lookup(node);
515 of_node_put(node);
516
517 if (!rmem) {
518 dev_err(wcnss->dev, "unable to resolve memory-region\n");
519 return -EINVAL;
520 }
521
522 wcnss->mem_phys = wcnss->mem_reloc = rmem->base;
523 wcnss->mem_size = rmem->size;
524 wcnss->mem_region = devm_ioremap_wc(wcnss->dev, wcnss->mem_phys, wcnss->mem_size);
525 if (!wcnss->mem_region) {
526 dev_err(wcnss->dev, "unable to map memory region: %pa+%zx\n",
527 &rmem->base, wcnss->mem_size);
528 return -EBUSY;
529 }
530
531 return 0;
532 }
533
wcnss_probe(struct platform_device * pdev)534 static int wcnss_probe(struct platform_device *pdev)
535 {
536 const char *fw_name = WCNSS_FIRMWARE_NAME;
537 const struct wcnss_data *data;
538 struct qcom_wcnss *wcnss;
539 struct rproc *rproc;
540 void __iomem *mmio;
541 int ret;
542
543 data = of_device_get_match_data(&pdev->dev);
544
545 if (!qcom_scm_is_available())
546 return -EPROBE_DEFER;
547
548 if (!qcom_scm_pas_supported(WCNSS_PAS_ID)) {
549 dev_err(&pdev->dev, "PAS is not available for WCNSS\n");
550 return -ENXIO;
551 }
552
553 ret = of_property_read_string(pdev->dev.of_node, "firmware-name",
554 &fw_name);
555 if (ret < 0 && ret != -EINVAL)
556 return ret;
557
558 rproc = rproc_alloc(&pdev->dev, pdev->name, &wcnss_ops,
559 fw_name, sizeof(*wcnss));
560 if (!rproc) {
561 dev_err(&pdev->dev, "unable to allocate remoteproc\n");
562 return -ENOMEM;
563 }
564 rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
565
566 wcnss = rproc->priv;
567 wcnss->dev = &pdev->dev;
568 wcnss->rproc = rproc;
569 platform_set_drvdata(pdev, wcnss);
570
571 init_completion(&wcnss->start_done);
572 init_completion(&wcnss->stop_done);
573
574 mutex_init(&wcnss->iris_lock);
575
576 mmio = devm_platform_ioremap_resource_byname(pdev, "pmu");
577 if (IS_ERR(mmio)) {
578 ret = PTR_ERR(mmio);
579 goto free_rproc;
580 }
581
582 ret = wcnss_alloc_memory_region(wcnss);
583 if (ret)
584 goto free_rproc;
585
586 wcnss->pmu_cfg = mmio + data->pmu_offset;
587 wcnss->spare_out = mmio + data->spare_offset;
588
589 /*
590 * We might need to fallback to regulators instead of power domains
591 * for old device trees. Don't report an error in that case.
592 */
593 ret = wcnss_init_pds(wcnss, data->pd_names);
594 if (ret && (ret != -ENODATA || !data->num_pd_vregs))
595 goto free_rproc;
596
597 ret = wcnss_init_regulators(wcnss, data->vregs, data->num_vregs,
598 data->num_pd_vregs);
599 if (ret)
600 goto detach_pds;
601
602 ret = wcnss_request_irq(wcnss, pdev, "wdog", false, wcnss_wdog_interrupt);
603 if (ret < 0)
604 goto detach_pds;
605 wcnss->wdog_irq = ret;
606
607 ret = wcnss_request_irq(wcnss, pdev, "fatal", false, wcnss_fatal_interrupt);
608 if (ret < 0)
609 goto detach_pds;
610 wcnss->fatal_irq = ret;
611
612 ret = wcnss_request_irq(wcnss, pdev, "ready", true, wcnss_ready_interrupt);
613 if (ret < 0)
614 goto detach_pds;
615 wcnss->ready_irq = ret;
616
617 ret = wcnss_request_irq(wcnss, pdev, "handover", true, wcnss_handover_interrupt);
618 if (ret < 0)
619 goto detach_pds;
620 wcnss->handover_irq = ret;
621
622 ret = wcnss_request_irq(wcnss, pdev, "stop-ack", true, wcnss_stop_ack_interrupt);
623 if (ret < 0)
624 goto detach_pds;
625 wcnss->stop_ack_irq = ret;
626
627 if (wcnss->stop_ack_irq) {
628 wcnss->state = devm_qcom_smem_state_get(&pdev->dev, "stop",
629 &wcnss->stop_bit);
630 if (IS_ERR(wcnss->state)) {
631 ret = PTR_ERR(wcnss->state);
632 goto detach_pds;
633 }
634 }
635
636 qcom_add_smd_subdev(rproc, &wcnss->smd_subdev);
637 wcnss->sysmon = qcom_add_sysmon_subdev(rproc, "wcnss", WCNSS_SSCTL_ID);
638 if (IS_ERR(wcnss->sysmon)) {
639 ret = PTR_ERR(wcnss->sysmon);
640 goto detach_pds;
641 }
642
643 wcnss->iris = qcom_iris_probe(&pdev->dev, &wcnss->use_48mhz_xo);
644 if (IS_ERR(wcnss->iris)) {
645 ret = PTR_ERR(wcnss->iris);
646 goto detach_pds;
647 }
648
649 ret = rproc_add(rproc);
650 if (ret)
651 goto remove_iris;
652
653 return 0;
654
655 remove_iris:
656 qcom_iris_remove(wcnss->iris);
657 detach_pds:
658 wcnss_release_pds(wcnss);
659 free_rproc:
660 rproc_free(rproc);
661
662 return ret;
663 }
664
wcnss_remove(struct platform_device * pdev)665 static void wcnss_remove(struct platform_device *pdev)
666 {
667 struct qcom_wcnss *wcnss = platform_get_drvdata(pdev);
668
669 qcom_iris_remove(wcnss->iris);
670
671 rproc_del(wcnss->rproc);
672
673 qcom_remove_sysmon_subdev(wcnss->sysmon);
674 qcom_remove_smd_subdev(wcnss->rproc, &wcnss->smd_subdev);
675 wcnss_release_pds(wcnss);
676 rproc_free(wcnss->rproc);
677 }
678
679 static const struct of_device_id wcnss_of_match[] = {
680 { .compatible = "qcom,riva-pil", &riva_data },
681 { .compatible = "qcom,pronto-v1-pil", &pronto_v1_data },
682 { .compatible = "qcom,pronto-v2-pil", &pronto_v2_data },
683 { .compatible = "qcom,pronto-v3-pil", &pronto_v3_data },
684 { },
685 };
686 MODULE_DEVICE_TABLE(of, wcnss_of_match);
687
688 static struct platform_driver wcnss_driver = {
689 .probe = wcnss_probe,
690 .remove_new = wcnss_remove,
691 .driver = {
692 .name = "qcom-wcnss-pil",
693 .of_match_table = wcnss_of_match,
694 },
695 };
696
697 module_platform_driver(wcnss_driver);
698
699 MODULE_DESCRIPTION("Qualcomm Peripheral Image Loader for Wireless Subsystem");
700 MODULE_LICENSE("GPL v2");
701