1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 /*
4  * Linux device driver for PCI based Prism54
5  *
6  * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
7  * Copyright (c) 2008, Christian Lamparter <chunkeey@web.de>
8  *
9  * Based on the islsm (softmac prism54) driver, which is:
10  * Copyright 2004-2006 Jean-Baptiste Note <jean-baptiste.note@m4x.org>, et al.
11  */
12 
13 #include <linux/pci.h>
14 #include <linux/slab.h>
15 #include <linux/firmware.h>
16 #include <linux/etherdevice.h>
17 #include <linux/delay.h>
18 #include <linux/completion.h>
19 #include <linux/module.h>
20 #include <net/mac80211.h>
21 
22 #include "p54.h"
23 #include "lmac.h"
24 #include "p54pci.h"
25 
26 MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
27 MODULE_DESCRIPTION("Prism54 PCI wireless driver");
28 MODULE_LICENSE("GPL");
29 MODULE_ALIAS("prism54pci");
30 MODULE_FIRMWARE("isl3886pci");
31 
32 static const struct pci_device_id p54p_table[] = {
33 	/* Intersil PRISM Duette/Prism GT Wireless LAN adapter */
34 	{ PCI_DEVICE(0x1260, 0x3890) },
35 	/* 3COM 3CRWE154G72 Wireless LAN adapter */
36 	{ PCI_DEVICE(0x10b7, 0x6001) },
37 	/* Intersil PRISM Indigo Wireless LAN adapter */
38 	{ PCI_DEVICE(0x1260, 0x3877) },
39 	/* Intersil PRISM Javelin/Xbow Wireless LAN adapter */
40 	{ PCI_DEVICE(0x1260, 0x3886) },
41 	/* Intersil PRISM Xbow Wireless LAN adapter (Symbol AP-300) */
42 	{ PCI_DEVICE(0x1260, 0xffff) },
43 	{ },
44 };
45 
46 MODULE_DEVICE_TABLE(pci, p54p_table);
47 
48 static int p54p_upload_firmware(struct ieee80211_hw *dev)
49 {
50 	struct p54p_priv *priv = dev->priv;
51 	__le32 reg;
52 	int err;
53 	__le32 *data;
54 	u32 remains, left, device_addr;
55 
56 	P54P_WRITE(int_enable, cpu_to_le32(0));
57 	P54P_READ(int_enable);
58 	udelay(10);
59 
60 	reg = P54P_READ(ctrl_stat);
61 	reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET);
62 	reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RAMBOOT);
63 	P54P_WRITE(ctrl_stat, reg);
64 	P54P_READ(ctrl_stat);
65 	udelay(10);
66 
67 	reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RESET);
68 	P54P_WRITE(ctrl_stat, reg);
69 	wmb();
70 	udelay(10);
71 
72 	reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET);
73 	P54P_WRITE(ctrl_stat, reg);
74 	wmb();
75 
76 	/* wait for the firmware to reset properly */
77 	mdelay(10);
78 
79 	err = p54_parse_firmware(dev, priv->firmware);
80 	if (err)
81 		return err;
82 
83 	if (priv->common.fw_interface != FW_LM86) {
84 		dev_err(&priv->pdev->dev, "wrong firmware, "
85 			"please get a LM86(PCI) firmware a try again.\n");
86 		return -EINVAL;
87 	}
88 
89 	data = (__le32 *) priv->firmware->data;
90 	remains = priv->firmware->size;
91 	device_addr = ISL38XX_DEV_FIRMWARE_ADDR;
92 	while (remains) {
93 		u32 i = 0;
94 		left = min((u32)0x1000, remains);
95 		P54P_WRITE(direct_mem_base, cpu_to_le32(device_addr));
96 		P54P_READ(int_enable);
97 
98 		device_addr += 0x1000;
99 		while (i < left) {
100 			P54P_WRITE(direct_mem_win[i], *data++);
101 			i += sizeof(u32);
102 		}
103 
104 		remains -= left;
105 		P54P_READ(int_enable);
106 	}
107 
108 	reg = P54P_READ(ctrl_stat);
109 	reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_CLKRUN);
110 	reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET);
111 	reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RAMBOOT);
112 	P54P_WRITE(ctrl_stat, reg);
113 	P54P_READ(ctrl_stat);
114 	udelay(10);
115 
116 	reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RESET);
117 	P54P_WRITE(ctrl_stat, reg);
118 	wmb();
119 	udelay(10);
120 
121 	reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET);
122 	P54P_WRITE(ctrl_stat, reg);
123 	wmb();
124 	udelay(10);
125 
126 	/* wait for the firmware to boot properly */
127 	mdelay(100);
128 
129 	return 0;
130 }
131 
132 static void p54p_refill_rx_ring(struct ieee80211_hw *dev,
133 	int ring_index, struct p54p_desc *ring, u32 ring_limit,
134 	struct sk_buff **rx_buf, u32 index)
135 {
136 	struct p54p_priv *priv = dev->priv;
137 	struct p54p_ring_control *ring_control = priv->ring_control;
138 	u32 limit, idx, i;
139 
140 	idx = le32_to_cpu(ring_control->host_idx[ring_index]);
141 	limit = idx;
142 	limit -= index;
143 	limit = ring_limit - limit;
144 
145 	i = idx % ring_limit;
146 	while (limit-- > 1) {
147 		struct p54p_desc *desc = &ring[i];
148 
149 		if (!desc->host_addr) {
150 			struct sk_buff *skb;
151 			dma_addr_t mapping;
152 			skb = dev_alloc_skb(priv->common.rx_mtu + 32);
153 			if (!skb)
154 				break;
155 
156 			mapping = dma_map_single(&priv->pdev->dev,
157 						 skb_tail_pointer(skb),
158 						 priv->common.rx_mtu + 32,
159 						 DMA_FROM_DEVICE);
160 
161 			if (dma_mapping_error(&priv->pdev->dev, mapping)) {
162 				dev_kfree_skb_any(skb);
163 				dev_err(&priv->pdev->dev,
164 					"RX DMA Mapping error\n");
165 				break;
166 			}
167 
168 			desc->host_addr = cpu_to_le32(mapping);
169 			desc->device_addr = 0;	// FIXME: necessary?
170 			desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
171 			desc->flags = 0;
172 			rx_buf[i] = skb;
173 		}
174 
175 		i++;
176 		idx++;
177 		i %= ring_limit;
178 	}
179 
180 	wmb();
181 	ring_control->host_idx[ring_index] = cpu_to_le32(idx);
182 }
183 
184 static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
185 	int ring_index, struct p54p_desc *ring, u32 ring_limit,
186 	struct sk_buff **rx_buf)
187 {
188 	struct p54p_priv *priv = dev->priv;
189 	struct p54p_ring_control *ring_control = priv->ring_control;
190 	struct p54p_desc *desc;
191 	u32 idx, i;
192 
193 	i = (*index) % ring_limit;
194 	(*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]);
195 	idx %= ring_limit;
196 	while (i != idx) {
197 		u16 len;
198 		struct sk_buff *skb;
199 		dma_addr_t dma_addr;
200 		desc = &ring[i];
201 		len = le16_to_cpu(desc->len);
202 		skb = rx_buf[i];
203 
204 		if (!skb) {
205 			i++;
206 			i %= ring_limit;
207 			continue;
208 		}
209 
210 		if (unlikely(len > priv->common.rx_mtu)) {
211 			if (net_ratelimit())
212 				dev_err(&priv->pdev->dev, "rx'd frame size "
213 					"exceeds length threshold.\n");
214 
215 			len = priv->common.rx_mtu;
216 		}
217 		dma_addr = le32_to_cpu(desc->host_addr);
218 		dma_sync_single_for_cpu(&priv->pdev->dev, dma_addr,
219 					priv->common.rx_mtu + 32,
220 					DMA_FROM_DEVICE);
221 		skb_put(skb, len);
222 
223 		if (p54_rx(dev, skb)) {
224 			dma_unmap_single(&priv->pdev->dev, dma_addr,
225 					 priv->common.rx_mtu + 32,
226 					 DMA_FROM_DEVICE);
227 			rx_buf[i] = NULL;
228 			desc->host_addr = cpu_to_le32(0);
229 		} else {
230 			skb_trim(skb, 0);
231 			dma_sync_single_for_device(&priv->pdev->dev, dma_addr,
232 						   priv->common.rx_mtu + 32,
233 						   DMA_FROM_DEVICE);
234 			desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
235 		}
236 
237 		i++;
238 		i %= ring_limit;
239 	}
240 
241 	p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf, *index);
242 }
243 
244 static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
245 	int ring_index, struct p54p_desc *ring, u32 ring_limit,
246 	struct sk_buff **tx_buf)
247 {
248 	struct p54p_priv *priv = dev->priv;
249 	struct p54p_ring_control *ring_control = priv->ring_control;
250 	struct p54p_desc *desc;
251 	struct sk_buff *skb;
252 	u32 idx, i;
253 
254 	i = (*index) % ring_limit;
255 	(*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]);
256 	idx %= ring_limit;
257 
258 	while (i != idx) {
259 		desc = &ring[i];
260 
261 		skb = tx_buf[i];
262 		tx_buf[i] = NULL;
263 
264 		dma_unmap_single(&priv->pdev->dev,
265 				 le32_to_cpu(desc->host_addr),
266 				 le16_to_cpu(desc->len), DMA_TO_DEVICE);
267 
268 		desc->host_addr = 0;
269 		desc->device_addr = 0;
270 		desc->len = 0;
271 		desc->flags = 0;
272 
273 		if (skb && FREE_AFTER_TX(skb))
274 			p54_free_skb(dev, skb);
275 
276 		i++;
277 		i %= ring_limit;
278 	}
279 }
280 
281 static void p54p_tasklet(unsigned long dev_id)
282 {
283 	struct ieee80211_hw *dev = (struct ieee80211_hw *)dev_id;
284 	struct p54p_priv *priv = dev->priv;
285 	struct p54p_ring_control *ring_control = priv->ring_control;
286 
287 	p54p_check_tx_ring(dev, &priv->tx_idx_mgmt, 3, ring_control->tx_mgmt,
288 			   ARRAY_SIZE(ring_control->tx_mgmt),
289 			   priv->tx_buf_mgmt);
290 
291 	p54p_check_tx_ring(dev, &priv->tx_idx_data, 1, ring_control->tx_data,
292 			   ARRAY_SIZE(ring_control->tx_data),
293 			   priv->tx_buf_data);
294 
295 	p54p_check_rx_ring(dev, &priv->rx_idx_mgmt, 2, ring_control->rx_mgmt,
296 		ARRAY_SIZE(ring_control->rx_mgmt), priv->rx_buf_mgmt);
297 
298 	p54p_check_rx_ring(dev, &priv->rx_idx_data, 0, ring_control->rx_data,
299 		ARRAY_SIZE(ring_control->rx_data), priv->rx_buf_data);
300 
301 	wmb();
302 	P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
303 }
304 
305 static irqreturn_t p54p_interrupt(int irq, void *dev_id)
306 {
307 	struct ieee80211_hw *dev = dev_id;
308 	struct p54p_priv *priv = dev->priv;
309 	__le32 reg;
310 
311 	reg = P54P_READ(int_ident);
312 	if (unlikely(reg == cpu_to_le32(0xFFFFFFFF))) {
313 		goto out;
314 	}
315 	P54P_WRITE(int_ack, reg);
316 
317 	reg &= P54P_READ(int_enable);
318 
319 	if (reg & cpu_to_le32(ISL38XX_INT_IDENT_UPDATE))
320 		tasklet_schedule(&priv->tasklet);
321 	else if (reg & cpu_to_le32(ISL38XX_INT_IDENT_INIT))
322 		complete(&priv->boot_comp);
323 
324 out:
325 	return reg ? IRQ_HANDLED : IRQ_NONE;
326 }
327 
328 static void p54p_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
329 {
330 	unsigned long flags;
331 	struct p54p_priv *priv = dev->priv;
332 	struct p54p_ring_control *ring_control = priv->ring_control;
333 	struct p54p_desc *desc;
334 	dma_addr_t mapping;
335 	u32 idx, i;
336 
337 	spin_lock_irqsave(&priv->lock, flags);
338 	idx = le32_to_cpu(ring_control->host_idx[1]);
339 	i = idx % ARRAY_SIZE(ring_control->tx_data);
340 
341 	mapping = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
342 				 DMA_TO_DEVICE);
343 	if (dma_mapping_error(&priv->pdev->dev, mapping)) {
344 		spin_unlock_irqrestore(&priv->lock, flags);
345 		p54_free_skb(dev, skb);
346 		dev_err(&priv->pdev->dev, "TX DMA mapping error\n");
347 		return ;
348 	}
349 	priv->tx_buf_data[i] = skb;
350 
351 	desc = &ring_control->tx_data[i];
352 	desc->host_addr = cpu_to_le32(mapping);
353 	desc->device_addr = ((struct p54_hdr *)skb->data)->req_id;
354 	desc->len = cpu_to_le16(skb->len);
355 	desc->flags = 0;
356 
357 	wmb();
358 	ring_control->host_idx[1] = cpu_to_le32(idx + 1);
359 	spin_unlock_irqrestore(&priv->lock, flags);
360 
361 	P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
362 	P54P_READ(dev_int);
363 }
364 
365 static void p54p_stop(struct ieee80211_hw *dev)
366 {
367 	struct p54p_priv *priv = dev->priv;
368 	struct p54p_ring_control *ring_control = priv->ring_control;
369 	unsigned int i;
370 	struct p54p_desc *desc;
371 
372 	P54P_WRITE(int_enable, cpu_to_le32(0));
373 	P54P_READ(int_enable);
374 	udelay(10);
375 
376 	free_irq(priv->pdev->irq, dev);
377 
378 	tasklet_kill(&priv->tasklet);
379 
380 	P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));
381 
382 	for (i = 0; i < ARRAY_SIZE(priv->rx_buf_data); i++) {
383 		desc = &ring_control->rx_data[i];
384 		if (desc->host_addr)
385 			dma_unmap_single(&priv->pdev->dev,
386 					 le32_to_cpu(desc->host_addr),
387 					 priv->common.rx_mtu + 32,
388 					 DMA_FROM_DEVICE);
389 		kfree_skb(priv->rx_buf_data[i]);
390 		priv->rx_buf_data[i] = NULL;
391 	}
392 
393 	for (i = 0; i < ARRAY_SIZE(priv->rx_buf_mgmt); i++) {
394 		desc = &ring_control->rx_mgmt[i];
395 		if (desc->host_addr)
396 			dma_unmap_single(&priv->pdev->dev,
397 					 le32_to_cpu(desc->host_addr),
398 					 priv->common.rx_mtu + 32,
399 					 DMA_FROM_DEVICE);
400 		kfree_skb(priv->rx_buf_mgmt[i]);
401 		priv->rx_buf_mgmt[i] = NULL;
402 	}
403 
404 	for (i = 0; i < ARRAY_SIZE(priv->tx_buf_data); i++) {
405 		desc = &ring_control->tx_data[i];
406 		if (desc->host_addr)
407 			dma_unmap_single(&priv->pdev->dev,
408 					 le32_to_cpu(desc->host_addr),
409 					 le16_to_cpu(desc->len),
410 					 DMA_TO_DEVICE);
411 
412 		p54_free_skb(dev, priv->tx_buf_data[i]);
413 		priv->tx_buf_data[i] = NULL;
414 	}
415 
416 	for (i = 0; i < ARRAY_SIZE(priv->tx_buf_mgmt); i++) {
417 		desc = &ring_control->tx_mgmt[i];
418 		if (desc->host_addr)
419 			dma_unmap_single(&priv->pdev->dev,
420 					 le32_to_cpu(desc->host_addr),
421 					 le16_to_cpu(desc->len),
422 					 DMA_TO_DEVICE);
423 
424 		p54_free_skb(dev, priv->tx_buf_mgmt[i]);
425 		priv->tx_buf_mgmt[i] = NULL;
426 	}
427 
428 	memset(ring_control, 0, sizeof(*ring_control));
429 }
430 
431 static int p54p_open(struct ieee80211_hw *dev)
432 {
433 	struct p54p_priv *priv = dev->priv;
434 	int err;
435 	long timeout;
436 
437 	init_completion(&priv->boot_comp);
438 	err = request_irq(priv->pdev->irq, p54p_interrupt,
439 			  IRQF_SHARED, "p54pci", dev);
440 	if (err) {
441 		dev_err(&priv->pdev->dev, "failed to register IRQ handler\n");
442 		return err;
443 	}
444 
445 	memset(priv->ring_control, 0, sizeof(*priv->ring_control));
446 	err = p54p_upload_firmware(dev);
447 	if (err) {
448 		free_irq(priv->pdev->irq, dev);
449 		return err;
450 	}
451 	priv->rx_idx_data = priv->tx_idx_data = 0;
452 	priv->rx_idx_mgmt = priv->tx_idx_mgmt = 0;
453 
454 	p54p_refill_rx_ring(dev, 0, priv->ring_control->rx_data,
455 		ARRAY_SIZE(priv->ring_control->rx_data), priv->rx_buf_data, 0);
456 
457 	p54p_refill_rx_ring(dev, 2, priv->ring_control->rx_mgmt,
458 		ARRAY_SIZE(priv->ring_control->rx_mgmt), priv->rx_buf_mgmt, 0);
459 
460 	P54P_WRITE(ring_control_base, cpu_to_le32(priv->ring_control_dma));
461 	P54P_READ(ring_control_base);
462 	wmb();
463 	udelay(10);
464 
465 	P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_INIT));
466 	P54P_READ(int_enable);
467 	wmb();
468 	udelay(10);
469 
470 	P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));
471 	P54P_READ(dev_int);
472 
473 	timeout = wait_for_completion_interruptible_timeout(
474 			&priv->boot_comp, HZ);
475 	if (timeout <= 0) {
476 		wiphy_err(dev->wiphy, "Cannot boot firmware!\n");
477 		p54p_stop(dev);
478 		return timeout ? -ERESTARTSYS : -ETIMEDOUT;
479 	}
480 
481 	P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_UPDATE));
482 	P54P_READ(int_enable);
483 	wmb();
484 	udelay(10);
485 
486 	P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
487 	P54P_READ(dev_int);
488 	wmb();
489 	udelay(10);
490 
491 	return 0;
492 }
493 
494 static void p54p_firmware_step2(const struct firmware *fw,
495 				void *context)
496 {
497 	struct p54p_priv *priv = context;
498 	struct ieee80211_hw *dev = priv->common.hw;
499 	struct pci_dev *pdev = priv->pdev;
500 	int err;
501 
502 	if (!fw) {
503 		dev_err(&pdev->dev, "Cannot find firmware (isl3886pci)\n");
504 		err = -ENOENT;
505 		goto out;
506 	}
507 
508 	priv->firmware = fw;
509 
510 	err = p54p_open(dev);
511 	if (err)
512 		goto out;
513 	err = p54_read_eeprom(dev);
514 	p54p_stop(dev);
515 	if (err)
516 		goto out;
517 
518 	err = p54_register_common(dev, &pdev->dev);
519 	if (err)
520 		goto out;
521 
522 out:
523 
524 	complete(&priv->fw_loaded);
525 
526 	if (err) {
527 		struct device *parent = pdev->dev.parent;
528 
529 		if (parent)
530 			device_lock(parent);
531 
532 		/*
533 		 * This will indirectly result in a call to p54p_remove.
534 		 * Hence, we don't need to bother with freeing any
535 		 * allocated ressources at all.
536 		 */
537 		device_release_driver(&pdev->dev);
538 
539 		if (parent)
540 			device_unlock(parent);
541 	}
542 
543 	pci_dev_put(pdev);
544 }
545 
546 static int p54p_probe(struct pci_dev *pdev,
547 				const struct pci_device_id *id)
548 {
549 	struct p54p_priv *priv;
550 	struct ieee80211_hw *dev;
551 	unsigned long mem_addr, mem_len;
552 	int err;
553 
554 	pci_dev_get(pdev);
555 	err = pci_enable_device(pdev);
556 	if (err) {
557 		dev_err(&pdev->dev, "Cannot enable new PCI device\n");
558 		goto err_put;
559 	}
560 
561 	mem_addr = pci_resource_start(pdev, 0);
562 	mem_len = pci_resource_len(pdev, 0);
563 	if (mem_len < sizeof(struct p54p_csr)) {
564 		dev_err(&pdev->dev, "Too short PCI resources\n");
565 		err = -ENODEV;
566 		goto err_disable_dev;
567 	}
568 
569 	err = pci_request_regions(pdev, "p54pci");
570 	if (err) {
571 		dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
572 		goto err_disable_dev;
573 	}
574 
575 	err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
576 	if (!err)
577 		err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
578 	if (err) {
579 		dev_err(&pdev->dev, "No suitable DMA available\n");
580 		goto err_free_reg;
581 	}
582 
583 	pci_set_master(pdev);
584 	pci_try_set_mwi(pdev);
585 
586 	pci_write_config_byte(pdev, 0x40, 0);
587 	pci_write_config_byte(pdev, 0x41, 0);
588 
589 	dev = p54_init_common(sizeof(*priv));
590 	if (!dev) {
591 		dev_err(&pdev->dev, "ieee80211 alloc failed\n");
592 		err = -ENOMEM;
593 		goto err_free_reg;
594 	}
595 
596 	priv = dev->priv;
597 	priv->pdev = pdev;
598 
599 	init_completion(&priv->fw_loaded);
600 	SET_IEEE80211_DEV(dev, &pdev->dev);
601 	pci_set_drvdata(pdev, dev);
602 
603 	priv->map = ioremap(mem_addr, mem_len);
604 	if (!priv->map) {
605 		dev_err(&pdev->dev, "Cannot map device memory\n");
606 		err = -ENOMEM;
607 		goto err_free_dev;
608 	}
609 
610 	priv->ring_control = dma_alloc_coherent(&pdev->dev,
611 						sizeof(*priv->ring_control),
612 						&priv->ring_control_dma, GFP_KERNEL);
613 	if (!priv->ring_control) {
614 		dev_err(&pdev->dev, "Cannot allocate rings\n");
615 		err = -ENOMEM;
616 		goto err_iounmap;
617 	}
618 	priv->common.open = p54p_open;
619 	priv->common.stop = p54p_stop;
620 	priv->common.tx = p54p_tx;
621 
622 	spin_lock_init(&priv->lock);
623 	tasklet_init(&priv->tasklet, p54p_tasklet, (unsigned long)dev);
624 
625 	err = request_firmware_nowait(THIS_MODULE, 1, "isl3886pci",
626 				      &priv->pdev->dev, GFP_KERNEL,
627 				      priv, p54p_firmware_step2);
628 	if (!err)
629 		return 0;
630 
631 	dma_free_coherent(&pdev->dev, sizeof(*priv->ring_control),
632 			  priv->ring_control, priv->ring_control_dma);
633 
634  err_iounmap:
635 	iounmap(priv->map);
636 
637  err_free_dev:
638 	p54_free_common(dev);
639 
640  err_free_reg:
641 	pci_release_regions(pdev);
642  err_disable_dev:
643 	pci_disable_device(pdev);
644 err_put:
645 	pci_dev_put(pdev);
646 	return err;
647 }
648 
649 static void p54p_remove(struct pci_dev *pdev)
650 {
651 	struct ieee80211_hw *dev = pci_get_drvdata(pdev);
652 	struct p54p_priv *priv;
653 
654 	if (!dev)
655 		return;
656 
657 	priv = dev->priv;
658 	wait_for_completion(&priv->fw_loaded);
659 	p54_unregister_common(dev);
660 	release_firmware(priv->firmware);
661 	dma_free_coherent(&pdev->dev, sizeof(*priv->ring_control),
662 			  priv->ring_control, priv->ring_control_dma);
663 	iounmap(priv->map);
664 	pci_release_regions(pdev);
665 	pci_disable_device(pdev);
666 	p54_free_common(dev);
667 }
668 
669 #ifdef CONFIG_PM_SLEEP
670 static int p54p_suspend(struct device *device)
671 {
672 	struct pci_dev *pdev = to_pci_dev(device);
673 
674 	pci_save_state(pdev);
675 	pci_set_power_state(pdev, PCI_D3hot);
676 	pci_disable_device(pdev);
677 	return 0;
678 }
679 
680 static int p54p_resume(struct device *device)
681 {
682 	struct pci_dev *pdev = to_pci_dev(device);
683 	int err;
684 
685 	err = pci_reenable_device(pdev);
686 	if (err)
687 		return err;
688 	return pci_set_power_state(pdev, PCI_D0);
689 }
690 
691 static SIMPLE_DEV_PM_OPS(p54pci_pm_ops, p54p_suspend, p54p_resume);
692 
693 #define P54P_PM_OPS (&p54pci_pm_ops)
694 #else
695 #define P54P_PM_OPS (NULL)
696 #endif /* CONFIG_PM_SLEEP */
697 
698 static struct pci_driver p54p_driver = {
699 	.name		= "p54pci",
700 	.id_table	= p54p_table,
701 	.probe		= p54p_probe,
702 	.remove		= p54p_remove,
703 	.driver.pm	= P54P_PM_OPS,
704 };
705 
706 module_pci_driver(p54p_driver);
707