xref: /openbmc/linux/arch/alpha/kernel/pci-noop.c (revision a1e58bbd)
1 /*
2  *	linux/arch/alpha/kernel/pci-noop.c
3  *
4  * Stub PCI interfaces for Jensen-specific kernels.
5  */
6 
7 #include <linux/pci.h>
8 #include <linux/init.h>
9 #include <linux/bootmem.h>
10 #include <linux/capability.h>
11 #include <linux/mm.h>
12 #include <linux/errno.h>
13 #include <linux/sched.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/scatterlist.h>
16 
17 #include "proto.h"
18 
19 
20 /*
21  * The PCI controller list.
22  */
23 
24 struct pci_controller *hose_head, **hose_tail = &hose_head;
25 struct pci_controller *pci_isa_hose;
26 
27 
28 struct pci_controller * __init
29 alloc_pci_controller(void)
30 {
31 	struct pci_controller *hose;
32 
33 	hose = alloc_bootmem(sizeof(*hose));
34 
35 	*hose_tail = hose;
36 	hose_tail = &hose->next;
37 
38 	return hose;
39 }
40 
41 struct resource * __init
42 alloc_resource(void)
43 {
44 	struct resource *res;
45 
46 	res = alloc_bootmem(sizeof(*res));
47 
48 	return res;
49 }
50 
51 asmlinkage long
52 sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn)
53 {
54 	struct pci_controller *hose;
55 
56 	/* from hose or from bus.devfn */
57 	if (which & IOBASE_FROM_HOSE) {
58 		for (hose = hose_head; hose; hose = hose->next)
59 			if (hose->index == bus)
60 				break;
61 		if (!hose)
62 			return -ENODEV;
63 	} else {
64 		/* Special hook for ISA access.  */
65 		if (bus == 0 && dfn == 0)
66 			hose = pci_isa_hose;
67 		else
68 			return -ENODEV;
69 	}
70 
71 	switch (which & ~IOBASE_FROM_HOSE) {
72 	case IOBASE_HOSE:
73 		return hose->index;
74 	case IOBASE_SPARSE_MEM:
75 		return hose->sparse_mem_base;
76 	case IOBASE_DENSE_MEM:
77 		return hose->dense_mem_base;
78 	case IOBASE_SPARSE_IO:
79 		return hose->sparse_io_base;
80 	case IOBASE_DENSE_IO:
81 		return hose->dense_io_base;
82 	case IOBASE_ROOT_BUS:
83 		return hose->bus->number;
84 	}
85 
86 	return -EOPNOTSUPP;
87 }
88 
89 asmlinkage long
90 sys_pciconfig_read(unsigned long bus, unsigned long dfn,
91 		   unsigned long off, unsigned long len, void *buf)
92 {
93 	if (!capable(CAP_SYS_ADMIN))
94 		return -EPERM;
95 	else
96 		return -ENODEV;
97 }
98 
99 asmlinkage long
100 sys_pciconfig_write(unsigned long bus, unsigned long dfn,
101 		    unsigned long off, unsigned long len, void *buf)
102 {
103 	if (!capable(CAP_SYS_ADMIN))
104 		return -EPERM;
105 	else
106 		return -ENODEV;
107 }
108 
109 /* Stubs for the routines in pci_iommu.c: */
110 
111 void *
112 pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
113 {
114 	return NULL;
115 }
116 
117 void
118 pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr,
119 		    dma_addr_t dma_addr)
120 {
121 }
122 
123 dma_addr_t
124 pci_map_single(struct pci_dev *pdev, void *cpu_addr, size_t size,
125 	       int direction)
126 {
127 	return (dma_addr_t) 0;
128 }
129 
130 void
131 pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size,
132 		 int direction)
133 {
134 }
135 
136 int
137 pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
138 	   int direction)
139 {
140 	return 0;
141 }
142 
143 void
144 pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
145 	     int direction)
146 {
147 }
148 
149 int
150 pci_dma_supported(struct pci_dev *hwdev, dma_addr_t mask)
151 {
152 	return 0;
153 }
154 
155 /* Generic DMA mapping functions: */
156 
157 void *
158 dma_alloc_coherent(struct device *dev, size_t size,
159 		   dma_addr_t *dma_handle, gfp_t gfp)
160 {
161 	void *ret;
162 
163 	if (!dev || *dev->dma_mask >= 0xffffffffUL)
164 		gfp &= ~GFP_DMA;
165 	ret = (void *)__get_free_pages(gfp, get_order(size));
166 	if (ret) {
167 		memset(ret, 0, size);
168 		*dma_handle = virt_to_phys(ret);
169 	}
170 	return ret;
171 }
172 
173 EXPORT_SYMBOL(dma_alloc_coherent);
174 
175 int
176 dma_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
177 	   enum dma_data_direction direction)
178 {
179 	int i;
180 	struct scatterlist *sg;
181 
182 	for_each_sg(sgl, sg, nents, i) {
183 		void *va;
184 
185 		BUG_ON(!sg_page(sg));
186 		va = sg_virt(sg);
187 		sg_dma_address(sg) = (dma_addr_t)virt_to_phys(va);
188 		sg_dma_len(sg) = sg->length;
189 	}
190 
191 	return nents;
192 }
193 
194 EXPORT_SYMBOL(dma_map_sg);
195 
196 int
197 dma_set_mask(struct device *dev, u64 mask)
198 {
199 	if (!dev->dma_mask || !dma_supported(dev, mask))
200 		return -EIO;
201 
202 	*dev->dma_mask = mask;
203 
204 	return 0;
205 }
206 EXPORT_SYMBOL(dma_set_mask);
207 
208 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
209 {
210 	return NULL;
211 }
212 
213 void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
214 {
215 }
216 
217 EXPORT_SYMBOL(pci_iomap);
218 EXPORT_SYMBOL(pci_iounmap);
219