1 /* bnx2x_sriov.c: Broadcom Everest network driver.
2  *
3  * Copyright 2009-2012 Broadcom Corporation
4  *
5  * Unless you and Broadcom execute a separate written software license
6  * agreement governing use of this software, this software is licensed to you
7  * under the terms of the GNU General Public License version 2, available
8  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9  *
10  * Notwithstanding the above, under no circumstances may you combine this
11  * software in any way with any other Broadcom software provided under a
12  * license other than the GPL, without Broadcom's express prior written
13  * consent.
14  *
15  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16  * Written by: Shmulik Ravid <shmulikr@broadcom.com>
17  *	       Ariel Elior <ariele@broadcom.com>
18  *
19  */
20 #include "bnx2x.h"
21 #include "bnx2x_init.h"
22 #include "bnx2x_sriov.h"
23 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
24 {
25 	int idx;
26 
27 	for_each_vf(bp, idx)
28 		if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
29 			break;
30 	return idx;
31 }
32 
33 static
34 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
35 {
36 	u16 idx =  (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
37 	return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
38 }
39 
40 static int bnx2x_ari_enabled(struct pci_dev *dev)
41 {
42 	return dev->bus->self && dev->bus->self->ari_enabled;
43 }
44 
45 static void
46 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
47 {
48 	struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
49 	if (vf) {
50 		if (!vf_sb_count(vf))
51 			vf->igu_base_id = igu_sb_id;
52 		++vf_sb_count(vf);
53 	}
54 }
55 
56 static void
57 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
58 {
59 	int sb_id;
60 	u32 val;
61 	u8 fid;
62 
63 	/* IGU in normal mode - read CAM */
64 	for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
65 		val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
66 		if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
67 			continue;
68 		fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
69 		if (!(fid & IGU_FID_ENCODE_IS_PF))
70 			bnx2x_vf_set_igu_info(bp, sb_id,
71 					      (fid & IGU_FID_VF_NUM_MASK));
72 
73 		DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
74 		   ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
75 		   ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
76 		   (fid & IGU_FID_VF_NUM_MASK)), sb_id,
77 		   GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
78 	}
79 }
80 
81 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
82 {
83 	if (bp->vfdb) {
84 		kfree(bp->vfdb->vfqs);
85 		kfree(bp->vfdb->vfs);
86 		kfree(bp->vfdb);
87 	}
88 	bp->vfdb = NULL;
89 }
90 
91 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
92 {
93 	int pos;
94 	struct pci_dev *dev = bp->pdev;
95 
96 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
97 	if (!pos) {
98 		BNX2X_ERR("failed to find SRIOV capability in device\n");
99 		return -ENODEV;
100 	}
101 
102 	iov->pos = pos;
103 	DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
104 	pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
105 	pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
106 	pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
107 	pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
108 	pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
109 	pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
110 	pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
111 	pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
112 
113 	return 0;
114 }
115 
116 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
117 {
118 	u32 val;
119 
120 	/* read the SRIOV capability structure
121 	 * The fields can be read via configuration read or
122 	 * directly from the device (starting at offset PCICFG_OFFSET)
123 	 */
124 	if (bnx2x_sriov_pci_cfg_info(bp, iov))
125 		return -ENODEV;
126 
127 	/* get the number of SRIOV bars */
128 	iov->nres = 0;
129 
130 	/* read the first_vfid */
131 	val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
132 	iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
133 			       * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
134 
135 	DP(BNX2X_MSG_IOV,
136 	   "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
137 	   BP_FUNC(bp),
138 	   iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
139 	   iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
140 
141 	return 0;
142 }
143 
144 static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp)
145 {
146 	int i;
147 	u8 queue_count = 0;
148 
149 	if (IS_SRIOV(bp))
150 		for_each_vf(bp, i)
151 			queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs);
152 
153 	return queue_count;
154 }
155 
156 /* must be called after PF bars are mapped */
157 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
158 				 int num_vfs_param)
159 {
160 	int err, i, qcount;
161 	struct bnx2x_sriov *iov;
162 	struct pci_dev *dev = bp->pdev;
163 
164 	bp->vfdb = NULL;
165 
166 	/* verify sriov capability is present in configuration space */
167 	if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV)) {
168 		DP(BNX2X_MSG_IOV, "no sriov - capability not found\n");
169 		return 0;
170 	}
171 
172 	/* verify is pf */
173 	if (IS_VF(bp))
174 		return 0;
175 
176 	/* verify chip revision */
177 	if (CHIP_IS_E1x(bp))
178 		return 0;
179 
180 	/* check if SRIOV support is turned off */
181 	if (!num_vfs_param)
182 		return 0;
183 
184 	/* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
185 	if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
186 		BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
187 			  BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
188 		return 0;
189 	}
190 
191 	/* SRIOV can be enabled only with MSIX */
192 	if (int_mode_param == BNX2X_INT_MODE_MSI ||
193 	    int_mode_param == BNX2X_INT_MODE_INTX) {
194 		BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
195 		return 0;
196 	}
197 
198 	/* verify ari is enabled */
199 	if (!bnx2x_ari_enabled(bp->pdev)) {
200 		BNX2X_ERR("ARI not supported, SRIOV can not be enabled\n");
201 		return 0;
202 	}
203 
204 	/* verify igu is in normal mode */
205 	if (CHIP_INT_MODE_IS_BC(bp)) {
206 		BNX2X_ERR("IGU not normal mode,  SRIOV can not be enabled\n");
207 		return 0;
208 	}
209 
210 	/* allocate the vfs database */
211 	bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
212 	if (!bp->vfdb) {
213 		BNX2X_ERR("failed to allocate vf database\n");
214 		err = -ENOMEM;
215 		goto failed;
216 	}
217 
218 	/* get the sriov info - Linux already collected all the pertinent
219 	 * information, however the sriov structure is for the private use
220 	 * of the pci module. Also we want this information regardless
221 	 * of the hyper-visor.
222 	 */
223 	iov = &(bp->vfdb->sriov);
224 	err = bnx2x_sriov_info(bp, iov);
225 	if (err)
226 		goto failed;
227 
228 	/* SR-IOV capability was enabled but there are no VFs*/
229 	if (iov->total == 0)
230 		goto failed;
231 
232 	/* calcuate the actual number of VFs */
233 	iov->nr_virtfn = min_t(u16, iov->total, (u16)num_vfs_param);
234 
235 	/* allcate the vf array */
236 	bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
237 				BNX2X_NR_VIRTFN(bp), GFP_KERNEL);
238 	if (!bp->vfdb->vfs) {
239 		BNX2X_ERR("failed to allocate vf array\n");
240 		err = -ENOMEM;
241 		goto failed;
242 	}
243 
244 	/* Initial VF init - index and abs_vfid - nr_virtfn must be set */
245 	for_each_vf(bp, i) {
246 		bnx2x_vf(bp, i, index) = i;
247 		bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
248 		bnx2x_vf(bp, i, state) = VF_FREE;
249 		INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head));
250 		mutex_init(&bnx2x_vf(bp, i, op_mutex));
251 		bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
252 	}
253 
254 	/* re-read the IGU CAM for VFs - index and abs_vfid must be set */
255 	bnx2x_get_vf_igu_cam_info(bp);
256 
257 	/* get the total queue count and allocate the global queue arrays */
258 	qcount = bnx2x_iov_get_max_queue_count(bp);
259 
260 	/* allocate the queue arrays for all VFs */
261 	bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue),
262 				 GFP_KERNEL);
263 	if (!bp->vfdb->vfqs) {
264 		BNX2X_ERR("failed to allocate vf queue array\n");
265 		err = -ENOMEM;
266 		goto failed;
267 	}
268 
269 	return 0;
270 failed:
271 	DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
272 	__bnx2x_iov_free_vfdb(bp);
273 	return err;
274 }
275 
276 /* called by bnx2x_init_hw_func, returns the next ilt line */
277 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
278 {
279 	int i;
280 	struct bnx2x_ilt *ilt = BP_ILT(bp);
281 
282 	if (!IS_SRIOV(bp))
283 		return line;
284 
285 	/* set vfs ilt lines */
286 	for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
287 		struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
288 
289 		ilt->lines[line+i].page = hw_cxt->addr;
290 		ilt->lines[line+i].page_mapping = hw_cxt->mapping;
291 		ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
292 	}
293 	return line + i;
294 }
295 
296 void bnx2x_iov_remove_one(struct bnx2x *bp)
297 {
298 	/* if SRIOV is not enabled there's nothing to do */
299 	if (!IS_SRIOV(bp))
300 		return;
301 
302 	/* free vf database */
303 	__bnx2x_iov_free_vfdb(bp);
304 }
305