1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2015 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more
17  * details.
18  *
19  * This file may also be available under a different license from Cavium.
20  * Contact Cavium, Inc. for more information
21  **********************************************************************/
22 #include <linux/netdevice.h>
23 #include "liquidio_common.h"
24 #include "octeon_droq.h"
25 #include "octeon_iq.h"
26 #include "response_manager.h"
27 #include "octeon_device.h"
28 
29 #define MEMOPS_IDX   MAX_BAR1_MAP_INDEX
30 
31 #ifdef __BIG_ENDIAN_BITFIELD
32 static inline void
33 octeon_toggle_bar1_swapmode(struct octeon_device *oct, u32 idx)
34 {
35 	u32 mask;
36 
37 	mask = oct->fn_list.bar1_idx_read(oct, idx);
38 	mask = (mask & 0x2) ? (mask & ~2) : (mask | 2);
39 	oct->fn_list.bar1_idx_write(oct, idx, mask);
40 }
41 #else
42 #define octeon_toggle_bar1_swapmode(oct, idx) (oct = oct)
43 #endif
44 
45 static void
46 octeon_pci_fastwrite(struct octeon_device *oct, u8 __iomem *mapped_addr,
47 		     u8 *hostbuf, u32 len)
48 {
49 	while ((len) && ((unsigned long)mapped_addr) & 7) {
50 		writeb(*(hostbuf++), mapped_addr++);
51 		len--;
52 	}
53 
54 	octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
55 
56 	while (len >= 8) {
57 		writeq(*((u64 *)hostbuf), mapped_addr);
58 		mapped_addr += 8;
59 		hostbuf += 8;
60 		len -= 8;
61 	}
62 
63 	octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
64 
65 	while (len--)
66 		writeb(*(hostbuf++), mapped_addr++);
67 }
68 
69 static void
70 octeon_pci_fastread(struct octeon_device *oct, u8 __iomem *mapped_addr,
71 		    u8 *hostbuf, u32 len)
72 {
73 	while ((len) && ((unsigned long)mapped_addr) & 7) {
74 		*(hostbuf++) = readb(mapped_addr++);
75 		len--;
76 	}
77 
78 	octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
79 
80 	while (len >= 8) {
81 		*((u64 *)hostbuf) = readq(mapped_addr);
82 		mapped_addr += 8;
83 		hostbuf += 8;
84 		len -= 8;
85 	}
86 
87 	octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
88 
89 	while (len--)
90 		*(hostbuf++) = readb(mapped_addr++);
91 }
92 
93 /* Core mem read/write with temporary bar1 settings. */
94 /* op = 1 to read, op = 0 to write. */
95 static void
96 __octeon_pci_rw_core_mem(struct octeon_device *oct, u64 addr,
97 			 u8 *hostbuf, u32 len, u32 op)
98 {
99 	u32 copy_len = 0, index_reg_val = 0;
100 	unsigned long flags;
101 	u8 __iomem *mapped_addr;
102 
103 	spin_lock_irqsave(&oct->mem_access_lock, flags);
104 
105 	/* Save the original index reg value. */
106 	index_reg_val = oct->fn_list.bar1_idx_read(oct, MEMOPS_IDX);
107 	do {
108 		oct->fn_list.bar1_idx_setup(oct, addr, MEMOPS_IDX, 1);
109 		mapped_addr = oct->mmio[1].hw_addr
110 		    + (MEMOPS_IDX << 22) + (addr & 0x3fffff);
111 
112 		/* If operation crosses a 4MB boundary, split the transfer
113 		 * at the 4MB
114 		 * boundary.
115 		 */
116 		if (((addr + len - 1) & ~(0x3fffff)) != (addr & ~(0x3fffff))) {
117 			copy_len = (u32)(((addr & ~(0x3fffff)) +
118 				   (MEMOPS_IDX << 22)) - addr);
119 		} else {
120 			copy_len = len;
121 		}
122 
123 		if (op) {	/* read from core */
124 			octeon_pci_fastread(oct, mapped_addr, hostbuf,
125 					    copy_len);
126 		} else {
127 			octeon_pci_fastwrite(oct, mapped_addr, hostbuf,
128 					     copy_len);
129 		}
130 
131 		len -= copy_len;
132 		addr += copy_len;
133 		hostbuf += copy_len;
134 
135 	} while (len);
136 
137 	oct->fn_list.bar1_idx_write(oct, MEMOPS_IDX, index_reg_val);
138 
139 	spin_unlock_irqrestore(&oct->mem_access_lock, flags);
140 }
141 
142 void
143 octeon_pci_read_core_mem(struct octeon_device *oct,
144 			 u64 coreaddr,
145 			 u8 *buf,
146 			 u32 len)
147 {
148 	__octeon_pci_rw_core_mem(oct, coreaddr, buf, len, 1);
149 }
150 
151 void
152 octeon_pci_write_core_mem(struct octeon_device *oct,
153 			  u64 coreaddr,
154 			  u8 *buf,
155 			  u32 len)
156 {
157 	__octeon_pci_rw_core_mem(oct, coreaddr, buf, len, 0);
158 }
159 
160 u64 octeon_read_device_mem64(struct octeon_device *oct, u64 coreaddr)
161 {
162 	__be64 ret;
163 
164 	__octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&ret, 8, 1);
165 
166 	return be64_to_cpu(ret);
167 }
168 
169 u32 octeon_read_device_mem32(struct octeon_device *oct, u64 coreaddr)
170 {
171 	__be32 ret;
172 
173 	__octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&ret, 4, 1);
174 
175 	return be32_to_cpu(ret);
176 }
177 
178 void octeon_write_device_mem32(struct octeon_device *oct, u64 coreaddr,
179 			       u32 val)
180 {
181 	__be32 t = cpu_to_be32(val);
182 
183 	__octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&t, 4, 0);
184 }
185