1 // SPDX-License-Identifier: GPL-2.0
2 // PCI1xxxx SPI driver
3 // Copyright (C) 2022 Microchip Technology Inc.
4 // Authors: Tharun Kumar P <tharunkumar.pasumarthi@microchip.com>
5 // Kumaravel Thiagarajan <Kumaravel.Thiagarajan@microchip.com>
6
7
8 #include <linux/module.h>
9 #include <linux/pci.h>
10 #include <linux/spi/spi.h>
11 #include <linux/delay.h>
12
13 #define DRV_NAME "spi-pci1xxxx"
14
15 #define SYS_FREQ_DEFAULT (62500000)
16
17 #define PCI1XXXX_SPI_MAX_CLOCK_HZ (30000000)
18 #define PCI1XXXX_SPI_CLK_20MHZ (20000000)
19 #define PCI1XXXX_SPI_CLK_15MHZ (15000000)
20 #define PCI1XXXX_SPI_CLK_12MHZ (12000000)
21 #define PCI1XXXX_SPI_CLK_10MHZ (10000000)
22 #define PCI1XXXX_SPI_MIN_CLOCK_HZ (2000000)
23
24 #define PCI1XXXX_SPI_BUFFER_SIZE (320)
25
26 #define SPI_MST_CTL_DEVSEL_MASK (GENMASK(27, 25))
27 #define SPI_MST_CTL_CMD_LEN_MASK (GENMASK(16, 8))
28 #define SPI_MST_CTL_SPEED_MASK (GENMASK(7, 5))
29 #define SPI_MSI_VECTOR_SEL_MASK (GENMASK(4, 4))
30
31 #define SPI_MST_CTL_FORCE_CE (BIT(4))
32 #define SPI_MST_CTL_MODE_SEL (BIT(2))
33 #define SPI_MST_CTL_GO (BIT(0))
34
35 #define SPI_MST1_ADDR_BASE (0x800)
36
37 /* x refers to SPI Host Controller HW instance id in the below macros - 0 or 1 */
38
39 #define SPI_MST_CMD_BUF_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x00)
40 #define SPI_MST_RSP_BUF_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x200)
41 #define SPI_MST_CTL_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x400)
42 #define SPI_MST_EVENT_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x420)
43 #define SPI_MST_EVENT_MASK_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x424)
44 #define SPI_MST_PAD_CTL_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x460)
45 #define SPIALERT_MST_DB_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x464)
46 #define SPIALERT_MST_VAL_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x468)
47 #define SPI_PCI_CTRL_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x480)
48
49 #define PCI1XXXX_IRQ_FLAGS (IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE)
50 #define SPI_MAX_DATA_LEN 320
51
52 #define PCI1XXXX_SPI_TIMEOUT (msecs_to_jiffies(100))
53
54 #define SPI_INTR BIT(8)
55 #define SPI_FORCE_CE BIT(4)
56
57 #define SPI_CHIP_SEL_COUNT 7
58 #define VENDOR_ID_MCHP 0x1055
59
60 #define SPI_SUSPEND_CONFIG 0x101
61 #define SPI_RESUME_CONFIG 0x203
62
63 struct pci1xxxx_spi_internal {
64 u8 hw_inst;
65 bool spi_xfer_in_progress;
66 int irq;
67 struct completion spi_xfer_done;
68 struct spi_controller *spi_host;
69 struct pci1xxxx_spi *parent;
70 struct {
71 unsigned int dev_sel : 3;
72 unsigned int msi_vector_sel : 1;
73 } prev_val;
74 };
75
76 struct pci1xxxx_spi {
77 struct pci_dev *dev;
78 u8 total_hw_instances;
79 void __iomem *reg_base;
80 struct pci1xxxx_spi_internal *spi_int[];
81 };
82
83 static const struct pci_device_id pci1xxxx_spi_pci_id_table[] = {
84 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa004, PCI_ANY_ID, 0x0001), 0, 0, 0x02},
85 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa004, PCI_ANY_ID, 0x0002), 0, 0, 0x01},
86 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa004, PCI_ANY_ID, 0x0003), 0, 0, 0x11},
87 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa004, PCI_ANY_ID, PCI_ANY_ID), 0, 0, 0x01},
88 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa014, PCI_ANY_ID, 0x0001), 0, 0, 0x02},
89 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa014, PCI_ANY_ID, 0x0002), 0, 0, 0x01},
90 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa014, PCI_ANY_ID, 0x0003), 0, 0, 0x11},
91 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa014, PCI_ANY_ID, PCI_ANY_ID), 0, 0, 0x01},
92 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa024, PCI_ANY_ID, 0x0001), 0, 0, 0x02},
93 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa024, PCI_ANY_ID, 0x0002), 0, 0, 0x01},
94 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa024, PCI_ANY_ID, 0x0003), 0, 0, 0x11},
95 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa024, PCI_ANY_ID, PCI_ANY_ID), 0, 0, 0x01},
96 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa034, PCI_ANY_ID, 0x0001), 0, 0, 0x02},
97 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa034, PCI_ANY_ID, 0x0002), 0, 0, 0x01},
98 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa034, PCI_ANY_ID, 0x0003), 0, 0, 0x11},
99 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa034, PCI_ANY_ID, PCI_ANY_ID), 0, 0, 0x01},
100 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa044, PCI_ANY_ID, 0x0001), 0, 0, 0x02},
101 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa044, PCI_ANY_ID, 0x0002), 0, 0, 0x01},
102 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa044, PCI_ANY_ID, 0x0003), 0, 0, 0x11},
103 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa044, PCI_ANY_ID, PCI_ANY_ID), 0, 0, 0x01},
104 { 0, }
105 };
106
107 MODULE_DEVICE_TABLE(pci, pci1xxxx_spi_pci_id_table);
108
pci1xxxx_spi_set_cs(struct spi_device * spi,bool enable)109 static void pci1xxxx_spi_set_cs(struct spi_device *spi, bool enable)
110 {
111 struct pci1xxxx_spi_internal *p = spi_controller_get_devdata(spi->controller);
112 struct pci1xxxx_spi *par = p->parent;
113 u32 regval;
114
115 /* Set the DEV_SEL bits of the SPI_MST_CTL_REG */
116 regval = readl(par->reg_base + SPI_MST_CTL_REG_OFFSET(p->hw_inst));
117 if (!enable) {
118 regval |= SPI_FORCE_CE;
119 regval &= ~SPI_MST_CTL_DEVSEL_MASK;
120 regval |= (spi_get_chipselect(spi, 0) << 25);
121 } else {
122 regval &= ~SPI_FORCE_CE;
123 }
124 writel(regval, par->reg_base + SPI_MST_CTL_REG_OFFSET(p->hw_inst));
125 }
126
pci1xxxx_get_clock_div(u32 hz)127 static u8 pci1xxxx_get_clock_div(u32 hz)
128 {
129 u8 val = 0;
130
131 if (hz >= PCI1XXXX_SPI_MAX_CLOCK_HZ)
132 val = 2;
133 else if ((hz < PCI1XXXX_SPI_MAX_CLOCK_HZ) && (hz >= PCI1XXXX_SPI_CLK_20MHZ))
134 val = 3;
135 else if ((hz < PCI1XXXX_SPI_CLK_20MHZ) && (hz >= PCI1XXXX_SPI_CLK_15MHZ))
136 val = 4;
137 else if ((hz < PCI1XXXX_SPI_CLK_15MHZ) && (hz >= PCI1XXXX_SPI_CLK_12MHZ))
138 val = 5;
139 else if ((hz < PCI1XXXX_SPI_CLK_12MHZ) && (hz >= PCI1XXXX_SPI_CLK_10MHZ))
140 val = 6;
141 else if ((hz < PCI1XXXX_SPI_CLK_10MHZ) && (hz >= PCI1XXXX_SPI_MIN_CLOCK_HZ))
142 val = 7;
143 else
144 val = 2;
145
146 return val;
147 }
148
pci1xxxx_spi_transfer_one(struct spi_controller * spi_ctlr,struct spi_device * spi,struct spi_transfer * xfer)149 static int pci1xxxx_spi_transfer_one(struct spi_controller *spi_ctlr,
150 struct spi_device *spi, struct spi_transfer *xfer)
151 {
152 struct pci1xxxx_spi_internal *p = spi_controller_get_devdata(spi_ctlr);
153 int mode, len, loop_iter, transfer_len;
154 struct pci1xxxx_spi *par = p->parent;
155 unsigned long bytes_transfered;
156 unsigned long bytes_recvd;
157 unsigned long loop_count;
158 u8 *rx_buf, result;
159 const u8 *tx_buf;
160 u32 regval;
161 u8 clkdiv;
162
163 p->spi_xfer_in_progress = true;
164 mode = spi->mode;
165 clkdiv = pci1xxxx_get_clock_div(xfer->speed_hz);
166 tx_buf = xfer->tx_buf;
167 rx_buf = xfer->rx_buf;
168 transfer_len = xfer->len;
169 regval = readl(par->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
170 writel(regval, par->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
171
172 if (tx_buf) {
173 bytes_transfered = 0;
174 bytes_recvd = 0;
175 loop_count = transfer_len / SPI_MAX_DATA_LEN;
176 if (transfer_len % SPI_MAX_DATA_LEN != 0)
177 loop_count += 1;
178
179 for (loop_iter = 0; loop_iter < loop_count; loop_iter++) {
180 len = SPI_MAX_DATA_LEN;
181 if ((transfer_len % SPI_MAX_DATA_LEN != 0) &&
182 (loop_iter == loop_count - 1))
183 len = transfer_len % SPI_MAX_DATA_LEN;
184
185 reinit_completion(&p->spi_xfer_done);
186 memcpy_toio(par->reg_base + SPI_MST_CMD_BUF_OFFSET(p->hw_inst),
187 &tx_buf[bytes_transfered], len);
188 bytes_transfered += len;
189 regval = readl(par->reg_base +
190 SPI_MST_CTL_REG_OFFSET(p->hw_inst));
191 regval &= ~(SPI_MST_CTL_MODE_SEL | SPI_MST_CTL_CMD_LEN_MASK |
192 SPI_MST_CTL_SPEED_MASK);
193
194 if (mode == SPI_MODE_3)
195 regval |= SPI_MST_CTL_MODE_SEL;
196 else
197 regval &= ~SPI_MST_CTL_MODE_SEL;
198
199 regval |= (clkdiv << 5);
200 regval &= ~SPI_MST_CTL_CMD_LEN_MASK;
201 regval |= (len << 8);
202 writel(regval, par->reg_base +
203 SPI_MST_CTL_REG_OFFSET(p->hw_inst));
204 regval = readl(par->reg_base +
205 SPI_MST_CTL_REG_OFFSET(p->hw_inst));
206 regval |= SPI_MST_CTL_GO;
207 writel(regval, par->reg_base +
208 SPI_MST_CTL_REG_OFFSET(p->hw_inst));
209
210 /* Wait for DMA_TERM interrupt */
211 result = wait_for_completion_timeout(&p->spi_xfer_done,
212 PCI1XXXX_SPI_TIMEOUT);
213 if (!result)
214 return -ETIMEDOUT;
215
216 if (rx_buf) {
217 memcpy_fromio(&rx_buf[bytes_recvd], par->reg_base +
218 SPI_MST_RSP_BUF_OFFSET(p->hw_inst), len);
219 bytes_recvd += len;
220 }
221 }
222 }
223 p->spi_xfer_in_progress = false;
224
225 return 0;
226 }
227
pci1xxxx_spi_isr(int irq,void * dev)228 static irqreturn_t pci1xxxx_spi_isr(int irq, void *dev)
229 {
230 struct pci1xxxx_spi_internal *p = dev;
231 irqreturn_t spi_int_fired = IRQ_NONE;
232 u32 regval;
233
234 /* Clear the SPI GO_BIT Interrupt */
235 regval = readl(p->parent->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
236 if (regval & SPI_INTR) {
237 /* Clear xfer_done */
238 complete(&p->spi_xfer_done);
239 spi_int_fired = IRQ_HANDLED;
240 }
241
242 writel(regval, p->parent->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
243
244 return spi_int_fired;
245 }
246
pci1xxxx_spi_probe(struct pci_dev * pdev,const struct pci_device_id * ent)247 static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
248 {
249 u8 hw_inst_cnt, iter, start, only_sec_inst;
250 struct pci1xxxx_spi_internal *spi_sub_ptr;
251 struct device *dev = &pdev->dev;
252 struct pci1xxxx_spi *spi_bus;
253 struct spi_controller *spi_host;
254 u32 regval;
255 int ret;
256
257 hw_inst_cnt = ent->driver_data & 0x0f;
258 start = (ent->driver_data & 0xf0) >> 4;
259 if (start == 1)
260 only_sec_inst = 1;
261 else
262 only_sec_inst = 0;
263
264 spi_bus = devm_kzalloc(&pdev->dev,
265 struct_size(spi_bus, spi_int, hw_inst_cnt),
266 GFP_KERNEL);
267 if (!spi_bus)
268 return -ENOMEM;
269
270 spi_bus->dev = pdev;
271 spi_bus->total_hw_instances = hw_inst_cnt;
272 pci_set_master(pdev);
273
274 for (iter = 0; iter < hw_inst_cnt; iter++) {
275 spi_bus->spi_int[iter] = devm_kzalloc(&pdev->dev,
276 sizeof(struct pci1xxxx_spi_internal),
277 GFP_KERNEL);
278 if (!spi_bus->spi_int[iter])
279 return -ENOMEM;
280 spi_sub_ptr = spi_bus->spi_int[iter];
281 spi_sub_ptr->spi_host = devm_spi_alloc_host(dev, sizeof(struct spi_controller));
282 if (!spi_sub_ptr->spi_host)
283 return -ENOMEM;
284
285 spi_sub_ptr->parent = spi_bus;
286 spi_sub_ptr->spi_xfer_in_progress = false;
287
288 if (!iter) {
289 ret = pcim_enable_device(pdev);
290 if (ret)
291 return -ENOMEM;
292
293 ret = pci_request_regions(pdev, DRV_NAME);
294 if (ret)
295 return -ENOMEM;
296
297 spi_bus->reg_base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
298 if (!spi_bus->reg_base) {
299 ret = -EINVAL;
300 goto error;
301 }
302
303 ret = pci_alloc_irq_vectors(pdev, hw_inst_cnt, hw_inst_cnt,
304 PCI_IRQ_ALL_TYPES);
305 if (ret < 0) {
306 dev_err(&pdev->dev, "Error allocating MSI vectors\n");
307 goto error;
308 }
309
310 init_completion(&spi_sub_ptr->spi_xfer_done);
311 /* Initialize Interrupts - SPI_INT */
312 regval = readl(spi_bus->reg_base +
313 SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst));
314 regval &= ~SPI_INTR;
315 writel(regval, spi_bus->reg_base +
316 SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst));
317 spi_sub_ptr->irq = pci_irq_vector(pdev, 0);
318
319 ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq,
320 pci1xxxx_spi_isr, PCI1XXXX_IRQ_FLAGS,
321 pci_name(pdev), spi_sub_ptr);
322 if (ret < 0) {
323 dev_err(&pdev->dev, "Unable to request irq : %d",
324 spi_sub_ptr->irq);
325 ret = -ENODEV;
326 goto error;
327 }
328
329 /* This register is only applicable for 1st instance */
330 regval = readl(spi_bus->reg_base + SPI_PCI_CTRL_REG_OFFSET(0));
331 if (!only_sec_inst)
332 regval |= (BIT(4));
333 else
334 regval &= ~(BIT(4));
335
336 writel(regval, spi_bus->reg_base + SPI_PCI_CTRL_REG_OFFSET(0));
337 }
338
339 spi_sub_ptr->hw_inst = start++;
340
341 if (iter == 1) {
342 init_completion(&spi_sub_ptr->spi_xfer_done);
343 /* Initialize Interrupts - SPI_INT */
344 regval = readl(spi_bus->reg_base +
345 SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst));
346 regval &= ~SPI_INTR;
347 writel(regval, spi_bus->reg_base +
348 SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst));
349 spi_sub_ptr->irq = pci_irq_vector(pdev, iter);
350 ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq,
351 pci1xxxx_spi_isr, PCI1XXXX_IRQ_FLAGS,
352 pci_name(pdev), spi_sub_ptr);
353 if (ret < 0) {
354 dev_err(&pdev->dev, "Unable to request irq : %d",
355 spi_sub_ptr->irq);
356 ret = -ENODEV;
357 goto error;
358 }
359 }
360
361 spi_host = spi_sub_ptr->spi_host;
362 spi_host->num_chipselect = SPI_CHIP_SEL_COUNT;
363 spi_host->mode_bits = SPI_MODE_0 | SPI_MODE_3 | SPI_RX_DUAL |
364 SPI_TX_DUAL | SPI_LOOP;
365 spi_host->transfer_one = pci1xxxx_spi_transfer_one;
366 spi_host->set_cs = pci1xxxx_spi_set_cs;
367 spi_host->bits_per_word_mask = SPI_BPW_MASK(8);
368 spi_host->max_speed_hz = PCI1XXXX_SPI_MAX_CLOCK_HZ;
369 spi_host->min_speed_hz = PCI1XXXX_SPI_MIN_CLOCK_HZ;
370 spi_host->flags = SPI_CONTROLLER_MUST_TX;
371 spi_controller_set_devdata(spi_host, spi_sub_ptr);
372 ret = devm_spi_register_controller(dev, spi_host);
373 if (ret)
374 goto error;
375 }
376 pci_set_drvdata(pdev, spi_bus);
377
378 return 0;
379
380 error:
381 pci_release_regions(pdev);
382 return ret;
383 }
384
store_restore_config(struct pci1xxxx_spi * spi_ptr,struct pci1xxxx_spi_internal * spi_sub_ptr,u8 inst,bool store)385 static void store_restore_config(struct pci1xxxx_spi *spi_ptr,
386 struct pci1xxxx_spi_internal *spi_sub_ptr,
387 u8 inst, bool store)
388 {
389 u32 regval;
390
391 if (store) {
392 regval = readl(spi_ptr->reg_base +
393 SPI_MST_CTL_REG_OFFSET(spi_sub_ptr->hw_inst));
394 regval &= SPI_MST_CTL_DEVSEL_MASK;
395 spi_sub_ptr->prev_val.dev_sel = (regval >> 25) & 7;
396 regval = readl(spi_ptr->reg_base +
397 SPI_PCI_CTRL_REG_OFFSET(spi_sub_ptr->hw_inst));
398 regval &= SPI_MSI_VECTOR_SEL_MASK;
399 spi_sub_ptr->prev_val.msi_vector_sel = (regval >> 4) & 1;
400 } else {
401 regval = readl(spi_ptr->reg_base + SPI_MST_CTL_REG_OFFSET(inst));
402 regval &= ~SPI_MST_CTL_DEVSEL_MASK;
403 regval |= (spi_sub_ptr->prev_val.dev_sel << 25);
404 writel(regval,
405 spi_ptr->reg_base + SPI_MST_CTL_REG_OFFSET(inst));
406 writel((spi_sub_ptr->prev_val.msi_vector_sel << 4),
407 spi_ptr->reg_base + SPI_PCI_CTRL_REG_OFFSET(inst));
408 }
409 }
410
pci1xxxx_spi_resume(struct device * dev)411 static int pci1xxxx_spi_resume(struct device *dev)
412 {
413 struct pci1xxxx_spi *spi_ptr = dev_get_drvdata(dev);
414 struct pci1xxxx_spi_internal *spi_sub_ptr;
415 u32 regval = SPI_RESUME_CONFIG;
416 u8 iter;
417
418 for (iter = 0; iter < spi_ptr->total_hw_instances; iter++) {
419 spi_sub_ptr = spi_ptr->spi_int[iter];
420 spi_controller_resume(spi_sub_ptr->spi_host);
421 writel(regval, spi_ptr->reg_base +
422 SPI_MST_EVENT_MASK_REG_OFFSET(iter));
423
424 /* Restore config at resume */
425 store_restore_config(spi_ptr, spi_sub_ptr, iter, 0);
426 }
427
428 return 0;
429 }
430
pci1xxxx_spi_suspend(struct device * dev)431 static int pci1xxxx_spi_suspend(struct device *dev)
432 {
433 struct pci1xxxx_spi *spi_ptr = dev_get_drvdata(dev);
434 struct pci1xxxx_spi_internal *spi_sub_ptr;
435 u32 reg1 = SPI_SUSPEND_CONFIG;
436 u8 iter;
437
438 for (iter = 0; iter < spi_ptr->total_hw_instances; iter++) {
439 spi_sub_ptr = spi_ptr->spi_int[iter];
440
441 while (spi_sub_ptr->spi_xfer_in_progress)
442 msleep(20);
443
444 /* Store existing config before suspend */
445 store_restore_config(spi_ptr, spi_sub_ptr, iter, 1);
446 spi_controller_suspend(spi_sub_ptr->spi_host);
447 writel(reg1, spi_ptr->reg_base +
448 SPI_MST_EVENT_MASK_REG_OFFSET(iter));
449 }
450
451 return 0;
452 }
453
454 static DEFINE_SIMPLE_DEV_PM_OPS(spi_pm_ops, pci1xxxx_spi_suspend,
455 pci1xxxx_spi_resume);
456
457 static struct pci_driver pci1xxxx_spi_driver = {
458 .name = DRV_NAME,
459 .id_table = pci1xxxx_spi_pci_id_table,
460 .probe = pci1xxxx_spi_probe,
461 .driver = {
462 .pm = pm_sleep_ptr(&spi_pm_ops),
463 },
464 };
465
466 module_pci_driver(pci1xxxx_spi_driver);
467
468 MODULE_DESCRIPTION("Microchip Technology Inc. pci1xxxx SPI bus driver");
469 MODULE_AUTHOR("Tharun Kumar P<tharunkumar.pasumarthi@microchip.com>");
470 MODULE_AUTHOR("Kumaravel Thiagarajan<kumaravel.thiagarajan@microchip.com>");
471 MODULE_LICENSE("GPL v2");
472