1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2015 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more
17  * details.
18  *
19  * This file may also be available under a different license from Cavium.
20  * Contact Cavium, Inc. for more information
21  **********************************************************************/
22 #include <linux/pci.h>
23 #include <linux/netdevice.h>
24 #include "liquidio_common.h"
25 #include "octeon_droq.h"
26 #include "octeon_iq.h"
27 #include "response_manager.h"
28 #include "octeon_device.h"
29 
30 #define MEMOPS_IDX   MAX_BAR1_MAP_INDEX
31 
32 #ifdef __BIG_ENDIAN_BITFIELD
33 static inline void
34 octeon_toggle_bar1_swapmode(struct octeon_device *oct, u32 idx)
35 {
36 	u32 mask;
37 
38 	mask = oct->fn_list.bar1_idx_read(oct, idx);
39 	mask = (mask & 0x2) ? (mask & ~2) : (mask | 2);
40 	oct->fn_list.bar1_idx_write(oct, idx, mask);
41 }
42 #else
43 #define octeon_toggle_bar1_swapmode(oct, idx) (oct = oct)
44 #endif
45 
46 static void
47 octeon_pci_fastwrite(struct octeon_device *oct, u8 __iomem *mapped_addr,
48 		     u8 *hostbuf, u32 len)
49 {
50 	while ((len) && ((unsigned long)mapped_addr) & 7) {
51 		writeb(*(hostbuf++), mapped_addr++);
52 		len--;
53 	}
54 
55 	octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
56 
57 	while (len >= 8) {
58 		writeq(*((u64 *)hostbuf), mapped_addr);
59 		mapped_addr += 8;
60 		hostbuf += 8;
61 		len -= 8;
62 	}
63 
64 	octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
65 
66 	while (len--)
67 		writeb(*(hostbuf++), mapped_addr++);
68 }
69 
70 static void
71 octeon_pci_fastread(struct octeon_device *oct, u8 __iomem *mapped_addr,
72 		    u8 *hostbuf, u32 len)
73 {
74 	while ((len) && ((unsigned long)mapped_addr) & 7) {
75 		*(hostbuf++) = readb(mapped_addr++);
76 		len--;
77 	}
78 
79 	octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
80 
81 	while (len >= 8) {
82 		*((u64 *)hostbuf) = readq(mapped_addr);
83 		mapped_addr += 8;
84 		hostbuf += 8;
85 		len -= 8;
86 	}
87 
88 	octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
89 
90 	while (len--)
91 		*(hostbuf++) = readb(mapped_addr++);
92 }
93 
94 /* Core mem read/write with temporary bar1 settings. */
95 /* op = 1 to read, op = 0 to write. */
96 static void
97 __octeon_pci_rw_core_mem(struct octeon_device *oct, u64 addr,
98 			 u8 *hostbuf, u32 len, u32 op)
99 {
100 	u32 copy_len = 0, index_reg_val = 0;
101 	unsigned long flags;
102 	u8 __iomem *mapped_addr;
103 
104 	spin_lock_irqsave(&oct->mem_access_lock, flags);
105 
106 	/* Save the original index reg value. */
107 	index_reg_val = oct->fn_list.bar1_idx_read(oct, MEMOPS_IDX);
108 	do {
109 		oct->fn_list.bar1_idx_setup(oct, addr, MEMOPS_IDX, 1);
110 		mapped_addr = oct->mmio[1].hw_addr
111 		    + (MEMOPS_IDX << 22) + (addr & 0x3fffff);
112 
113 		/* If operation crosses a 4MB boundary, split the transfer
114 		 * at the 4MB
115 		 * boundary.
116 		 */
117 		if (((addr + len - 1) & ~(0x3fffff)) != (addr & ~(0x3fffff))) {
118 			copy_len = (u32)(((addr & ~(0x3fffff)) +
119 				   (MEMOPS_IDX << 22)) - addr);
120 		} else {
121 			copy_len = len;
122 		}
123 
124 		if (op) {	/* read from core */
125 			octeon_pci_fastread(oct, mapped_addr, hostbuf,
126 					    copy_len);
127 		} else {
128 			octeon_pci_fastwrite(oct, mapped_addr, hostbuf,
129 					     copy_len);
130 		}
131 
132 		len -= copy_len;
133 		addr += copy_len;
134 		hostbuf += copy_len;
135 
136 	} while (len);
137 
138 	oct->fn_list.bar1_idx_write(oct, MEMOPS_IDX, index_reg_val);
139 
140 	spin_unlock_irqrestore(&oct->mem_access_lock, flags);
141 }
142 
143 void
144 octeon_pci_read_core_mem(struct octeon_device *oct,
145 			 u64 coreaddr,
146 			 u8 *buf,
147 			 u32 len)
148 {
149 	__octeon_pci_rw_core_mem(oct, coreaddr, buf, len, 1);
150 }
151 
152 void
153 octeon_pci_write_core_mem(struct octeon_device *oct,
154 			  u64 coreaddr,
155 			  u8 *buf,
156 			  u32 len)
157 {
158 	__octeon_pci_rw_core_mem(oct, coreaddr, buf, len, 0);
159 }
160 
161 u64 octeon_read_device_mem64(struct octeon_device *oct, u64 coreaddr)
162 {
163 	__be64 ret;
164 
165 	__octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&ret, 8, 1);
166 
167 	return be64_to_cpu(ret);
168 }
169 
170 u32 octeon_read_device_mem32(struct octeon_device *oct, u64 coreaddr)
171 {
172 	__be32 ret;
173 
174 	__octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&ret, 4, 1);
175 
176 	return be32_to_cpu(ret);
177 }
178 
179 void octeon_write_device_mem32(struct octeon_device *oct, u64 coreaddr,
180 			       u32 val)
181 {
182 	__be32 t = cpu_to_be32(val);
183 
184 	__octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&t, 4, 0);
185 }
186