1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * bdc_core.c - BRCM BDC USB3.0 device controller core operations
4 *
5 * Copyright (C) 2014 Broadcom Corporation
6 *
7 * Author: Ashwini Pahuja
8 */
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/platform_device.h>
14 #include <linux/interrupt.h>
15 #include <linux/iopoll.h>
16 #include <linux/ioport.h>
17 #include <linux/io.h>
18 #include <linux/list.h>
19 #include <linux/delay.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmapool.h>
22 #include <linux/of.h>
23 #include <linux/phy/phy.h>
24 #include <linux/moduleparam.h>
25 #include <linux/usb/ch9.h>
26 #include <linux/usb/gadget.h>
27 #include <linux/clk.h>
28
29 #include "bdc.h"
30 #include "bdc_dbg.h"
31
32 /* Poll till controller status is not OIP */
poll_oip(struct bdc * bdc,u32 usec)33 static int poll_oip(struct bdc *bdc, u32 usec)
34 {
35 u32 status;
36 int ret;
37
38 ret = readl_poll_timeout(bdc->regs + BDC_BDCSC, status,
39 (BDC_CSTS(status) != BDC_OIP), 10, usec);
40 if (ret)
41 dev_err(bdc->dev, "operation timedout BDCSC: 0x%08x\n", status);
42 else
43 dev_dbg(bdc->dev, "%s complete status=%d", __func__, BDC_CSTS(status));
44
45 return ret;
46 }
47
48 /* Stop the BDC controller */
bdc_stop(struct bdc * bdc)49 int bdc_stop(struct bdc *bdc)
50 {
51 int ret;
52 u32 temp;
53
54 dev_dbg(bdc->dev, "%s ()\n\n", __func__);
55 temp = bdc_readl(bdc->regs, BDC_BDCSC);
56 /* Check if BDC is already halted */
57 if (BDC_CSTS(temp) == BDC_HLT) {
58 dev_vdbg(bdc->dev, "BDC already halted\n");
59 return 0;
60 }
61 temp &= ~BDC_COP_MASK;
62 temp |= BDC_COS|BDC_COP_STP;
63 bdc_writel(bdc->regs, BDC_BDCSC, temp);
64
65 ret = poll_oip(bdc, BDC_COP_TIMEOUT);
66 if (ret)
67 dev_err(bdc->dev, "bdc stop operation failed");
68
69 return ret;
70 }
71
72 /* Issue a reset to BDC controller */
bdc_reset(struct bdc * bdc)73 int bdc_reset(struct bdc *bdc)
74 {
75 u32 temp;
76 int ret;
77
78 dev_dbg(bdc->dev, "%s ()\n", __func__);
79 /* First halt the controller */
80 ret = bdc_stop(bdc);
81 if (ret)
82 return ret;
83
84 temp = bdc_readl(bdc->regs, BDC_BDCSC);
85 temp &= ~BDC_COP_MASK;
86 temp |= BDC_COS|BDC_COP_RST;
87 bdc_writel(bdc->regs, BDC_BDCSC, temp);
88 ret = poll_oip(bdc, BDC_COP_TIMEOUT);
89 if (ret)
90 dev_err(bdc->dev, "bdc reset operation failed");
91
92 return ret;
93 }
94
95 /* Run the BDC controller */
bdc_run(struct bdc * bdc)96 int bdc_run(struct bdc *bdc)
97 {
98 u32 temp;
99 int ret;
100
101 dev_dbg(bdc->dev, "%s ()\n", __func__);
102 temp = bdc_readl(bdc->regs, BDC_BDCSC);
103 /* if BDC is already in running state then do not do anything */
104 if (BDC_CSTS(temp) == BDC_NOR) {
105 dev_warn(bdc->dev, "bdc is already in running state\n");
106 return 0;
107 }
108 temp &= ~BDC_COP_MASK;
109 temp |= BDC_COP_RUN;
110 temp |= BDC_COS;
111 bdc_writel(bdc->regs, BDC_BDCSC, temp);
112 ret = poll_oip(bdc, BDC_COP_TIMEOUT);
113 if (ret) {
114 dev_err(bdc->dev, "bdc run operation failed:%d", ret);
115 return ret;
116 }
117 temp = bdc_readl(bdc->regs, BDC_BDCSC);
118 if (BDC_CSTS(temp) != BDC_NOR) {
119 dev_err(bdc->dev, "bdc not in normal mode after RUN op :%d\n",
120 BDC_CSTS(temp));
121 return -ESHUTDOWN;
122 }
123
124 return 0;
125 }
126
127 /*
128 * Present the termination to the host, typically called from upstream port
129 * event with Vbus present =1
130 */
bdc_softconn(struct bdc * bdc)131 void bdc_softconn(struct bdc *bdc)
132 {
133 u32 uspc;
134
135 uspc = bdc_readl(bdc->regs, BDC_USPC);
136 uspc &= ~BDC_PST_MASK;
137 uspc |= BDC_LINK_STATE_RX_DET;
138 uspc |= BDC_SWS;
139 dev_dbg(bdc->dev, "%s () uspc=%08x\n", __func__, uspc);
140 bdc_writel(bdc->regs, BDC_USPC, uspc);
141 }
142
143 /* Remove the termination */
bdc_softdisconn(struct bdc * bdc)144 void bdc_softdisconn(struct bdc *bdc)
145 {
146 u32 uspc;
147
148 uspc = bdc_readl(bdc->regs, BDC_USPC);
149 uspc |= BDC_SDC;
150 uspc &= ~BDC_SCN;
151 dev_dbg(bdc->dev, "%s () uspc=%x\n", __func__, uspc);
152 bdc_writel(bdc->regs, BDC_USPC, uspc);
153 }
154
155 /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
scratchpad_setup(struct bdc * bdc)156 static int scratchpad_setup(struct bdc *bdc)
157 {
158 int sp_buff_size;
159 u32 low32;
160 u32 upp32;
161
162 sp_buff_size = BDC_SPB(bdc_readl(bdc->regs, BDC_BDCCFG0));
163 dev_dbg(bdc->dev, "%s() sp_buff_size=%d\n", __func__, sp_buff_size);
164 if (!sp_buff_size) {
165 dev_dbg(bdc->dev, "Scratchpad buffer not needed\n");
166 return 0;
167 }
168 /* Refer to BDC spec, Table 4 for description of SPB */
169 sp_buff_size = 1 << (sp_buff_size + 5);
170 dev_dbg(bdc->dev, "Allocating %d bytes for scratchpad\n", sp_buff_size);
171 bdc->scratchpad.buff = dma_alloc_coherent(bdc->dev, sp_buff_size,
172 &bdc->scratchpad.sp_dma,
173 GFP_KERNEL);
174
175 if (!bdc->scratchpad.buff)
176 goto fail;
177
178 bdc->sp_buff_size = sp_buff_size;
179 bdc->scratchpad.size = sp_buff_size;
180 low32 = lower_32_bits(bdc->scratchpad.sp_dma);
181 upp32 = upper_32_bits(bdc->scratchpad.sp_dma);
182 cpu_to_le32s(&low32);
183 cpu_to_le32s(&upp32);
184 bdc_writel(bdc->regs, BDC_SPBBAL, low32);
185 bdc_writel(bdc->regs, BDC_SPBBAH, upp32);
186 return 0;
187
188 fail:
189 bdc->scratchpad.buff = NULL;
190
191 return -ENOMEM;
192 }
193
194 /* Allocate the status report ring */
setup_srr(struct bdc * bdc,int interrupter)195 static int setup_srr(struct bdc *bdc, int interrupter)
196 {
197 dev_dbg(bdc->dev, "%s() NUM_SR_ENTRIES:%d\n", __func__, NUM_SR_ENTRIES);
198 /* Reset the SRR */
199 bdc_writel(bdc->regs, BDC_SRRINT(0), BDC_SRR_RWS | BDC_SRR_RST);
200 bdc->srr.dqp_index = 0;
201 /* allocate the status report descriptors */
202 bdc->srr.sr_bds = dma_alloc_coherent(bdc->dev,
203 NUM_SR_ENTRIES * sizeof(struct bdc_bd),
204 &bdc->srr.dma_addr, GFP_KERNEL);
205 if (!bdc->srr.sr_bds)
206 return -ENOMEM;
207
208 return 0;
209 }
210
211 /* Initialize the HW regs and internal data structures */
bdc_mem_init(struct bdc * bdc,bool reinit)212 static void bdc_mem_init(struct bdc *bdc, bool reinit)
213 {
214 u8 size = 0;
215 u32 usb2_pm;
216 u32 low32;
217 u32 upp32;
218 u32 temp;
219
220 dev_dbg(bdc->dev, "%s ()\n", __func__);
221 bdc->ep0_state = WAIT_FOR_SETUP;
222 bdc->dev_addr = 0;
223 bdc->srr.eqp_index = 0;
224 bdc->srr.dqp_index = 0;
225 bdc->zlp_needed = false;
226 bdc->delayed_status = false;
227
228 bdc_writel(bdc->regs, BDC_SPBBAL, bdc->scratchpad.sp_dma);
229 /* Init the SRR */
230 temp = BDC_SRR_RWS | BDC_SRR_RST;
231 /* Reset the SRR */
232 bdc_writel(bdc->regs, BDC_SRRINT(0), temp);
233 dev_dbg(bdc->dev, "bdc->srr.sr_bds =%p\n", bdc->srr.sr_bds);
234 temp = lower_32_bits(bdc->srr.dma_addr);
235 size = fls(NUM_SR_ENTRIES) - 2;
236 temp |= size;
237 dev_dbg(bdc->dev, "SRRBAL[0]=%08x NUM_SR_ENTRIES:%d size:%d\n",
238 temp, NUM_SR_ENTRIES, size);
239
240 low32 = lower_32_bits(temp);
241 upp32 = upper_32_bits(bdc->srr.dma_addr);
242 cpu_to_le32s(&low32);
243 cpu_to_le32s(&upp32);
244
245 /* Write the dma addresses into regs*/
246 bdc_writel(bdc->regs, BDC_SRRBAL(0), low32);
247 bdc_writel(bdc->regs, BDC_SRRBAH(0), upp32);
248
249 temp = bdc_readl(bdc->regs, BDC_SRRINT(0));
250 temp |= BDC_SRR_IE;
251 temp &= ~(BDC_SRR_RST | BDC_SRR_RWS);
252 bdc_writel(bdc->regs, BDC_SRRINT(0), temp);
253
254 /* Set the Interrupt Coalescence ~500 usec */
255 temp = bdc_readl(bdc->regs, BDC_INTCTLS(0));
256 temp &= ~0xffff;
257 temp |= INT_CLS;
258 bdc_writel(bdc->regs, BDC_INTCTLS(0), temp);
259
260 usb2_pm = bdc_readl(bdc->regs, BDC_USPPM2);
261 dev_dbg(bdc->dev, "usb2_pm=%08x", usb2_pm);
262 /* Enable hardware LPM Enable */
263 usb2_pm |= BDC_HLE;
264 bdc_writel(bdc->regs, BDC_USPPM2, usb2_pm);
265
266 /* readback for debug */
267 usb2_pm = bdc_readl(bdc->regs, BDC_USPPM2);
268 dev_dbg(bdc->dev, "usb2_pm=%08x\n", usb2_pm);
269
270 /* Disable any unwanted SR's on SRR */
271 temp = bdc_readl(bdc->regs, BDC_BDCSC);
272 /* We don't want Microframe counter wrap SR */
273 temp |= BDC_MASK_MCW;
274 bdc_writel(bdc->regs, BDC_BDCSC, temp);
275
276 /*
277 * In some error cases, driver has to reset the entire BDC controller
278 * in that case reinit is passed as 1
279 */
280 if (reinit) {
281 int i;
282 /* Enable interrupts */
283 temp = bdc_readl(bdc->regs, BDC_BDCSC);
284 temp |= BDC_GIE;
285 bdc_writel(bdc->regs, BDC_BDCSC, temp);
286 /* Init scratchpad to 0 */
287 memset(bdc->scratchpad.buff, 0, bdc->sp_buff_size);
288 /* Initialize SRR to 0 */
289 memset(bdc->srr.sr_bds, 0,
290 NUM_SR_ENTRIES * sizeof(struct bdc_bd));
291 /*
292 * clear ep flags to avoid post disconnect stops/deconfigs but
293 * not during S2 exit
294 */
295 if (!bdc->gadget.speed)
296 for (i = 1; i < bdc->num_eps; ++i)
297 bdc->bdc_ep_array[i]->flags = 0;
298 } else {
299 /* One time initiaization only */
300 /* Enable status report function pointers */
301 bdc->sr_handler[0] = bdc_sr_xsf;
302 bdc->sr_handler[1] = bdc_sr_uspc;
303
304 /* EP0 status report function pointers */
305 bdc->sr_xsf_ep0[0] = bdc_xsf_ep0_setup_recv;
306 bdc->sr_xsf_ep0[1] = bdc_xsf_ep0_data_start;
307 bdc->sr_xsf_ep0[2] = bdc_xsf_ep0_status_start;
308 }
309 }
310
311 /* Free the dynamic memory */
bdc_mem_free(struct bdc * bdc)312 static void bdc_mem_free(struct bdc *bdc)
313 {
314 dev_dbg(bdc->dev, "%s\n", __func__);
315 /* Free SRR */
316 if (bdc->srr.sr_bds)
317 dma_free_coherent(bdc->dev,
318 NUM_SR_ENTRIES * sizeof(struct bdc_bd),
319 bdc->srr.sr_bds, bdc->srr.dma_addr);
320
321 /* Free scratchpad */
322 if (bdc->scratchpad.buff)
323 dma_free_coherent(bdc->dev, bdc->sp_buff_size,
324 bdc->scratchpad.buff, bdc->scratchpad.sp_dma);
325
326 /* Destroy the dma pools */
327 dma_pool_destroy(bdc->bd_table_pool);
328
329 /* Free the bdc_ep array */
330 kfree(bdc->bdc_ep_array);
331
332 bdc->srr.sr_bds = NULL;
333 bdc->scratchpad.buff = NULL;
334 bdc->bd_table_pool = NULL;
335 bdc->bdc_ep_array = NULL;
336 }
337
338 /*
339 * bdc reinit gives a controller reset and reinitialize the registers,
340 * called from disconnect/bus reset scenario's, to ensure proper HW cleanup
341 */
bdc_reinit(struct bdc * bdc)342 int bdc_reinit(struct bdc *bdc)
343 {
344 int ret;
345
346 dev_dbg(bdc->dev, "%s\n", __func__);
347 ret = bdc_stop(bdc);
348 if (ret)
349 goto out;
350
351 ret = bdc_reset(bdc);
352 if (ret)
353 goto out;
354
355 /* the reinit flag is 1 */
356 bdc_mem_init(bdc, true);
357 ret = bdc_run(bdc);
358 out:
359 bdc->reinit = false;
360
361 return ret;
362 }
363
364 /* Allocate all the dyanmic memory */
bdc_mem_alloc(struct bdc * bdc)365 static int bdc_mem_alloc(struct bdc *bdc)
366 {
367 u32 page_size;
368 unsigned int num_ieps, num_oeps;
369
370 dev_dbg(bdc->dev,
371 "%s() NUM_BDS_PER_TABLE:%d\n", __func__,
372 NUM_BDS_PER_TABLE);
373 page_size = BDC_PGS(bdc_readl(bdc->regs, BDC_BDCCFG0));
374 /* page size is 2^pgs KB */
375 page_size = 1 << page_size;
376 /* KB */
377 page_size <<= 10;
378 dev_dbg(bdc->dev, "page_size=%d\n", page_size);
379
380 /* Create a pool of bd tables */
381 bdc->bd_table_pool =
382 dma_pool_create("BDC BD tables", bdc->dev, NUM_BDS_PER_TABLE * 16,
383 16, page_size);
384
385 if (!bdc->bd_table_pool)
386 goto fail;
387
388 if (scratchpad_setup(bdc))
389 goto fail;
390
391 /* read from regs */
392 num_ieps = NUM_NCS(bdc_readl(bdc->regs, BDC_FSCNIC));
393 num_oeps = NUM_NCS(bdc_readl(bdc->regs, BDC_FSCNOC));
394 /* +2: 1 for ep0 and the other is rsvd i.e. bdc_ep[0] is rsvd */
395 bdc->num_eps = num_ieps + num_oeps + 2;
396 dev_dbg(bdc->dev,
397 "ieps:%d eops:%d num_eps:%d\n",
398 num_ieps, num_oeps, bdc->num_eps);
399 /* allocate array of ep pointers */
400 bdc->bdc_ep_array = kcalloc(bdc->num_eps, sizeof(struct bdc_ep *),
401 GFP_KERNEL);
402 if (!bdc->bdc_ep_array)
403 goto fail;
404
405 dev_dbg(bdc->dev, "Allocating sr report0\n");
406 if (setup_srr(bdc, 0))
407 goto fail;
408
409 return 0;
410 fail:
411 dev_warn(bdc->dev, "Couldn't initialize memory\n");
412 bdc_mem_free(bdc);
413
414 return -ENOMEM;
415 }
416
417 /* opposite to bdc_hw_init */
bdc_hw_exit(struct bdc * bdc)418 static void bdc_hw_exit(struct bdc *bdc)
419 {
420 dev_dbg(bdc->dev, "%s ()\n", __func__);
421 bdc_mem_free(bdc);
422 }
423
424 /* Initialize the bdc HW and memory */
bdc_hw_init(struct bdc * bdc)425 static int bdc_hw_init(struct bdc *bdc)
426 {
427 int ret;
428
429 dev_dbg(bdc->dev, "%s ()\n", __func__);
430 ret = bdc_reset(bdc);
431 if (ret) {
432 dev_err(bdc->dev, "err resetting bdc abort bdc init%d\n", ret);
433 return ret;
434 }
435 ret = bdc_mem_alloc(bdc);
436 if (ret) {
437 dev_err(bdc->dev, "Mem alloc failed, aborting\n");
438 return -ENOMEM;
439 }
440 bdc_mem_init(bdc, 0);
441 bdc_dbg_regs(bdc);
442 dev_dbg(bdc->dev, "HW Init done\n");
443
444 return 0;
445 }
446
bdc_phy_init(struct bdc * bdc)447 static int bdc_phy_init(struct bdc *bdc)
448 {
449 int phy_num;
450 int ret;
451
452 for (phy_num = 0; phy_num < bdc->num_phys; phy_num++) {
453 ret = phy_init(bdc->phys[phy_num]);
454 if (ret)
455 goto err_exit_phy;
456 ret = phy_power_on(bdc->phys[phy_num]);
457 if (ret) {
458 phy_exit(bdc->phys[phy_num]);
459 goto err_exit_phy;
460 }
461 }
462
463 return 0;
464
465 err_exit_phy:
466 while (--phy_num >= 0) {
467 phy_power_off(bdc->phys[phy_num]);
468 phy_exit(bdc->phys[phy_num]);
469 }
470
471 return ret;
472 }
473
bdc_phy_exit(struct bdc * bdc)474 static void bdc_phy_exit(struct bdc *bdc)
475 {
476 int phy_num;
477
478 for (phy_num = 0; phy_num < bdc->num_phys; phy_num++) {
479 phy_power_off(bdc->phys[phy_num]);
480 phy_exit(bdc->phys[phy_num]);
481 }
482 }
483
bdc_probe(struct platform_device * pdev)484 static int bdc_probe(struct platform_device *pdev)
485 {
486 struct bdc *bdc;
487 int ret;
488 int irq;
489 u32 temp;
490 struct device *dev = &pdev->dev;
491 int phy_num;
492
493 dev_dbg(dev, "%s()\n", __func__);
494
495 bdc = devm_kzalloc(dev, sizeof(*bdc), GFP_KERNEL);
496 if (!bdc)
497 return -ENOMEM;
498
499 bdc->regs = devm_platform_ioremap_resource(pdev, 0);
500 if (IS_ERR(bdc->regs))
501 return PTR_ERR(bdc->regs);
502
503 irq = platform_get_irq(pdev, 0);
504 if (irq < 0)
505 return irq;
506 spin_lock_init(&bdc->lock);
507 platform_set_drvdata(pdev, bdc);
508 bdc->irq = irq;
509 bdc->dev = dev;
510 dev_dbg(dev, "bdc->regs: %p irq=%d\n", bdc->regs, bdc->irq);
511
512 bdc->num_phys = of_count_phandle_with_args(dev->of_node,
513 "phys", "#phy-cells");
514 if (bdc->num_phys > 0) {
515 bdc->phys = devm_kcalloc(dev, bdc->num_phys,
516 sizeof(struct phy *), GFP_KERNEL);
517 if (!bdc->phys)
518 return -ENOMEM;
519 } else {
520 bdc->num_phys = 0;
521 }
522 dev_info(dev, "Using %d phy(s)\n", bdc->num_phys);
523
524 for (phy_num = 0; phy_num < bdc->num_phys; phy_num++) {
525 bdc->phys[phy_num] = devm_of_phy_get_by_index(
526 dev, dev->of_node, phy_num);
527 if (IS_ERR(bdc->phys[phy_num])) {
528 ret = PTR_ERR(bdc->phys[phy_num]);
529 dev_err(bdc->dev,
530 "BDC phy specified but not found:%d\n", ret);
531 return ret;
532 }
533 }
534
535 bdc->clk = devm_clk_get_optional(dev, "sw_usbd");
536 if (IS_ERR(bdc->clk))
537 return PTR_ERR(bdc->clk);
538
539 ret = clk_prepare_enable(bdc->clk);
540 if (ret) {
541 dev_err(dev, "could not enable clock\n");
542 return ret;
543 }
544
545 ret = bdc_phy_init(bdc);
546 if (ret) {
547 dev_err(bdc->dev, "BDC phy init failure:%d\n", ret);
548 goto disable_clk;
549 }
550
551 temp = bdc_readl(bdc->regs, BDC_BDCCAP1);
552 if ((temp & BDC_P64) &&
553 !dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
554 dev_dbg(dev, "Using 64-bit address\n");
555 } else {
556 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
557 if (ret) {
558 dev_err(dev,
559 "No suitable DMA config available, abort\n");
560 ret = -ENOTSUPP;
561 goto phycleanup;
562 }
563 dev_dbg(dev, "Using 32-bit address\n");
564 }
565 ret = bdc_hw_init(bdc);
566 if (ret) {
567 dev_err(dev, "BDC init failure:%d\n", ret);
568 goto phycleanup;
569 }
570 ret = bdc_udc_init(bdc);
571 if (ret) {
572 dev_err(dev, "BDC Gadget init failure:%d\n", ret);
573 goto cleanup;
574 }
575 return 0;
576
577 cleanup:
578 bdc_hw_exit(bdc);
579 phycleanup:
580 bdc_phy_exit(bdc);
581 disable_clk:
582 clk_disable_unprepare(bdc->clk);
583 return ret;
584 }
585
bdc_remove(struct platform_device * pdev)586 static void bdc_remove(struct platform_device *pdev)
587 {
588 struct bdc *bdc;
589
590 bdc = platform_get_drvdata(pdev);
591 dev_dbg(bdc->dev, "%s ()\n", __func__);
592 bdc_udc_exit(bdc);
593 bdc_hw_exit(bdc);
594 bdc_phy_exit(bdc);
595 clk_disable_unprepare(bdc->clk);
596 }
597
598 #ifdef CONFIG_PM_SLEEP
bdc_suspend(struct device * dev)599 static int bdc_suspend(struct device *dev)
600 {
601 struct bdc *bdc = dev_get_drvdata(dev);
602 int ret;
603
604 /* Halt the controller */
605 ret = bdc_stop(bdc);
606 if (!ret)
607 clk_disable_unprepare(bdc->clk);
608
609 return ret;
610 }
611
bdc_resume(struct device * dev)612 static int bdc_resume(struct device *dev)
613 {
614 struct bdc *bdc = dev_get_drvdata(dev);
615 int ret;
616
617 ret = clk_prepare_enable(bdc->clk);
618 if (ret) {
619 dev_err(bdc->dev, "err enabling the clock\n");
620 return ret;
621 }
622 ret = bdc_reinit(bdc);
623 if (ret) {
624 dev_err(bdc->dev, "err in bdc reinit\n");
625 clk_disable_unprepare(bdc->clk);
626 return ret;
627 }
628
629 return 0;
630 }
631
632 #endif /* CONFIG_PM_SLEEP */
633
634 static SIMPLE_DEV_PM_OPS(bdc_pm_ops, bdc_suspend,
635 bdc_resume);
636
637 static const struct of_device_id bdc_of_match[] = {
638 { .compatible = "brcm,bdc-udc-v2" },
639 { .compatible = "brcm,bdc" },
640 { /* sentinel */ }
641 };
642
643 static struct platform_driver bdc_driver = {
644 .driver = {
645 .name = BRCM_BDC_NAME,
646 .pm = &bdc_pm_ops,
647 .of_match_table = bdc_of_match,
648 },
649 .probe = bdc_probe,
650 .remove_new = bdc_remove,
651 };
652
653 module_platform_driver(bdc_driver);
654 MODULE_AUTHOR("Ashwini Pahuja <ashwini.linux@gmail.com>");
655 MODULE_LICENSE("GPL");
656 MODULE_DESCRIPTION(BRCM_BDC_DESC);
657