1f931551bSRalph Campbell /*
27fac3301SMike Marciniszyn  * Copyright (c) 2012 Intel Corporation. All rights reserved.
37fac3301SMike Marciniszyn  * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4f931551bSRalph Campbell  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5f931551bSRalph Campbell  *
6f931551bSRalph Campbell  * This software is available to you under a choice of one of two
7f931551bSRalph Campbell  * licenses.  You may choose to be licensed under the terms of the GNU
8f931551bSRalph Campbell  * General Public License (GPL) Version 2, available from the file
9f931551bSRalph Campbell  * COPYING in the main directory of this source tree, or the
10f931551bSRalph Campbell  * OpenIB.org BSD license below:
11f931551bSRalph Campbell  *
12f931551bSRalph Campbell  *     Redistribution and use in source and binary forms, with or
13f931551bSRalph Campbell  *     without modification, are permitted provided that the following
14f931551bSRalph Campbell  *     conditions are met:
15f931551bSRalph Campbell  *
16f931551bSRalph Campbell  *      - Redistributions of source code must retain the above
17f931551bSRalph Campbell  *        copyright notice, this list of conditions and the following
18f931551bSRalph Campbell  *        disclaimer.
19f931551bSRalph Campbell  *
20f931551bSRalph Campbell  *      - Redistributions in binary form must reproduce the above
21f931551bSRalph Campbell  *        copyright notice, this list of conditions and the following
22f931551bSRalph Campbell  *        disclaimer in the documentation and/or other materials
23f931551bSRalph Campbell  *        provided with the distribution.
24f931551bSRalph Campbell  *
25f931551bSRalph Campbell  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26f931551bSRalph Campbell  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27f931551bSRalph Campbell  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28f931551bSRalph Campbell  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29f931551bSRalph Campbell  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30f931551bSRalph Campbell  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31f931551bSRalph Campbell  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32f931551bSRalph Campbell  * SOFTWARE.
33f931551bSRalph Campbell  */
34f931551bSRalph Campbell 
35f931551bSRalph Campbell /*
36f931551bSRalph Campbell  * This file is conditionally built on x86_64 only.  Otherwise weak symbol
37f931551bSRalph Campbell  * versions of the functions exported from here are used.
38f931551bSRalph Campbell  */
39f931551bSRalph Campbell 
40f931551bSRalph Campbell #include <linux/pci.h>
41f931551bSRalph Campbell #include <asm/mtrr.h>
42f931551bSRalph Campbell #include <asm/processor.h>
43f931551bSRalph Campbell 
44f931551bSRalph Campbell #include "qib.h"
45f931551bSRalph Campbell 
46f931551bSRalph Campbell /**
47f931551bSRalph Campbell  * qib_enable_wc - enable write combining for MMIO writes to the device
48f931551bSRalph Campbell  * @dd: qlogic_ib device
49f931551bSRalph Campbell  *
50f931551bSRalph Campbell  * This routine is x86_64-specific; it twiddles the CPU's MTRRs to enable
51f931551bSRalph Campbell  * write combining.
52f931551bSRalph Campbell  */
qib_enable_wc(struct qib_devdata * dd)53f931551bSRalph Campbell int qib_enable_wc(struct qib_devdata *dd)
54f931551bSRalph Campbell {
55f931551bSRalph Campbell 	int ret = 0;
56f931551bSRalph Campbell 	u64 pioaddr, piolen;
57f931551bSRalph Campbell 	unsigned bits;
58f931551bSRalph Campbell 	const unsigned long addr = pci_resource_start(dd->pcidev, 0);
59f931551bSRalph Campbell 	const size_t len = pci_resource_len(dd->pcidev, 0);
60f931551bSRalph Campbell 
61f931551bSRalph Campbell 	/*
62f931551bSRalph Campbell 	 * Set the PIO buffers to be WCCOMB, so we get HT bursts to the
63f931551bSRalph Campbell 	 * chip.  Linux (possibly the hardware) requires it to be on a power
64f931551bSRalph Campbell 	 * of 2 address matching the length (which has to be a power of 2).
65f931551bSRalph Campbell 	 * For rev1, that means the base address, for rev2, it will be just
66f931551bSRalph Campbell 	 * the PIO buffers themselves.
67f931551bSRalph Campbell 	 * For chips with two sets of buffers, the calculations are
68f931551bSRalph Campbell 	 * somewhat more complicated; we need to sum, and the piobufbase
69f931551bSRalph Campbell 	 * register has both offsets, 2K in low 32 bits, 4K in high 32 bits.
70f931551bSRalph Campbell 	 * The buffers are still packed, so a single range covers both.
71f931551bSRalph Campbell 	 */
72f931551bSRalph Campbell 	if (dd->piobcnt2k && dd->piobcnt4k) {
73f931551bSRalph Campbell 		/* 2 sizes for chip */
74f931551bSRalph Campbell 		unsigned long pio2kbase, pio4kbase;
75da12c1f6SMike Marciniszyn 
76f931551bSRalph Campbell 		pio2kbase = dd->piobufbase & 0xffffffffUL;
77f931551bSRalph Campbell 		pio4kbase = (dd->piobufbase >> 32) & 0xffffffffUL;
78f931551bSRalph Campbell 		if (pio2kbase < pio4kbase) {
79f931551bSRalph Campbell 			/* all current chips */
80f931551bSRalph Campbell 			pioaddr = addr + pio2kbase;
81f931551bSRalph Campbell 			piolen = pio4kbase - pio2kbase +
82f931551bSRalph Campbell 				dd->piobcnt4k * dd->align4k;
83f931551bSRalph Campbell 		} else {
84f931551bSRalph Campbell 			pioaddr = addr + pio4kbase;
85f931551bSRalph Campbell 			piolen = pio2kbase - pio4kbase +
86f931551bSRalph Campbell 				dd->piobcnt2k * dd->palign;
87f931551bSRalph Campbell 		}
88f931551bSRalph Campbell 	} else {  /* single buffer size (2K, currently) */
89f931551bSRalph Campbell 		pioaddr = addr + dd->piobufbase;
90f931551bSRalph Campbell 		piolen = dd->piobcnt2k * dd->palign +
91f931551bSRalph Campbell 			dd->piobcnt4k * dd->align4k;
92f931551bSRalph Campbell 	}
93f931551bSRalph Campbell 
94f931551bSRalph Campbell 	for (bits = 0; !(piolen & (1ULL << bits)); bits++)
95a46a2802SMike Marciniszyn 		; /* do nothing */
96f931551bSRalph Campbell 
97f931551bSRalph Campbell 	if (piolen != (1ULL << bits)) {
98f931551bSRalph Campbell 		piolen >>= bits;
99f931551bSRalph Campbell 		while (piolen >>= 1)
100f931551bSRalph Campbell 			bits++;
101f931551bSRalph Campbell 		piolen = 1ULL << (bits + 1);
102f931551bSRalph Campbell 	}
103f931551bSRalph Campbell 	if (pioaddr & (piolen - 1)) {
104da12c1f6SMike Marciniszyn 		u64 atmp = pioaddr & ~(piolen - 1);
105da12c1f6SMike Marciniszyn 
106f931551bSRalph Campbell 		if (atmp < addr || (atmp + piolen) > (addr + len)) {
1077fac3301SMike Marciniszyn 			qib_dev_err(dd,
1087fac3301SMike Marciniszyn 				"No way to align address/size (%llx/%llx), no WC mtrr\n",
109f931551bSRalph Campbell 				(unsigned long long) atmp,
110f931551bSRalph Campbell 				(unsigned long long) piolen << 1);
111f931551bSRalph Campbell 			ret = -ENODEV;
112f931551bSRalph Campbell 		} else {
113f931551bSRalph Campbell 			pioaddr = atmp;
114f931551bSRalph Campbell 			piolen <<= 1;
115f931551bSRalph Campbell 		}
116f931551bSRalph Campbell 	}
117f931551bSRalph Campbell 
118f931551bSRalph Campbell 	if (!ret) {
119d4988623SLuis R. Rodriguez 		dd->wc_cookie = arch_phys_wc_add(pioaddr, piolen);
120d4988623SLuis R. Rodriguez 		if (dd->wc_cookie < 0)
121ec40f925SMike Marciniszyn 			/* use error from routine */
122ec40f925SMike Marciniszyn 			ret = dd->wc_cookie;
123f931551bSRalph Campbell 	}
124f931551bSRalph Campbell 
125f931551bSRalph Campbell 	return ret;
126f931551bSRalph Campbell }
127f931551bSRalph Campbell 
128f931551bSRalph Campbell /**
129f931551bSRalph Campbell  * qib_disable_wc - disable write combining for MMIO writes to the device
130f931551bSRalph Campbell  * @dd: qlogic_ib device
131f931551bSRalph Campbell  */
qib_disable_wc(struct qib_devdata * dd)132f931551bSRalph Campbell void qib_disable_wc(struct qib_devdata *dd)
133f931551bSRalph Campbell {
134d4988623SLuis R. Rodriguez 	arch_phys_wc_del(dd->wc_cookie);
135f931551bSRalph Campbell }
136f931551bSRalph Campbell 
137f931551bSRalph Campbell /**
138f931551bSRalph Campbell  * qib_unordered_wc - indicate whether write combining is ordered
139f931551bSRalph Campbell  *
140f931551bSRalph Campbell  * Because our performance depends on our ability to do write combining mmio
141f931551bSRalph Campbell  * writes in the most efficient way, we need to know if we are on an Intel
142f931551bSRalph Campbell  * or AMD x86_64 processor.  AMD x86_64 processors flush WC buffers out in
143f931551bSRalph Campbell  * the order completed, and so no special flushing is required to get
144f931551bSRalph Campbell  * correct ordering.  Intel processors, however, will flush write buffers
145f931551bSRalph Campbell  * out in "random" orders, and so explicit ordering is needed at times.
146f931551bSRalph Campbell  */
qib_unordered_wc(void)147f931551bSRalph Campbell int qib_unordered_wc(void)
148f931551bSRalph Campbell {
149f931551bSRalph Campbell 	return boot_cpu_data.x86_vendor != X86_VENDOR_AMD;
150f931551bSRalph Campbell }
151