1 /*
2  * Copyright 2013 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "priv.h"
25 
26 #include <core/firmware.h>
27 #include <subdev/timer.h>
28 
29 bool
30 nvkm_pmu_fan_controlled(struct nvkm_device *device)
31 {
32 	struct nvkm_pmu *pmu = device->pmu;
33 
34 	/* Internal PMU FW does not currently control fans in any way,
35 	 * allow SW control of fans instead.
36 	 */
37 	if (pmu && pmu->func->code.size)
38 		return false;
39 
40 	/* Default (board-loaded, or VBIOS PMU/PREOS) PMU FW on Fermi
41 	 * and newer automatically control the fan speed, which would
42 	 * interfere with SW control.
43 	 */
44 	return (device->chipset >= 0xc0);
45 }
46 
47 void
48 nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
49 {
50 	if (pmu && pmu->func->pgob)
51 		pmu->func->pgob(pmu, enable);
52 }
53 
54 static void
55 nvkm_pmu_recv(struct work_struct *work)
56 {
57 	struct nvkm_pmu *pmu = container_of(work, typeof(*pmu), recv.work);
58 	return pmu->func->recv(pmu);
59 }
60 
61 int
62 nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
63 	      u32 process, u32 message, u32 data0, u32 data1)
64 {
65 	if (!pmu || !pmu->func->send)
66 		return -ENODEV;
67 	return pmu->func->send(pmu, reply, process, message, data0, data1);
68 }
69 
70 static void
71 nvkm_pmu_intr(struct nvkm_subdev *subdev)
72 {
73 	struct nvkm_pmu *pmu = nvkm_pmu(subdev);
74 	if (!pmu->func->intr)
75 		return;
76 	pmu->func->intr(pmu);
77 }
78 
79 static int
80 nvkm_pmu_fini(struct nvkm_subdev *subdev, bool suspend)
81 {
82 	struct nvkm_pmu *pmu = nvkm_pmu(subdev);
83 
84 	if (pmu->func->fini)
85 		pmu->func->fini(pmu);
86 
87 	flush_work(&pmu->recv.work);
88 
89 	reinit_completion(&pmu->wpr_ready);
90 
91 	nvkm_falcon_cmdq_fini(pmu->lpq);
92 	nvkm_falcon_cmdq_fini(pmu->hpq);
93 	pmu->initmsg_received = false;
94 	return 0;
95 }
96 
97 static int
98 nvkm_pmu_reset(struct nvkm_pmu *pmu)
99 {
100 	struct nvkm_device *device = pmu->subdev.device;
101 
102 	if (!pmu->func->enabled(pmu))
103 		return 0;
104 
105 	/* Inhibit interrupts, and wait for idle. */
106 	nvkm_wr32(device, 0x10a014, 0x0000ffff);
107 	nvkm_msec(device, 2000,
108 		if (!nvkm_rd32(device, 0x10a04c))
109 			break;
110 	);
111 
112 	/* Reset. */
113 	if (pmu->func->reset)
114 		pmu->func->reset(pmu);
115 
116 	/* Wait for IMEM/DMEM scrubbing to be complete. */
117 	nvkm_msec(device, 2000,
118 		if (!(nvkm_rd32(device, 0x10a10c) & 0x00000006))
119 			break;
120 	);
121 
122 	return 0;
123 }
124 
125 static int
126 nvkm_pmu_preinit(struct nvkm_subdev *subdev)
127 {
128 	struct nvkm_pmu *pmu = nvkm_pmu(subdev);
129 	return nvkm_pmu_reset(pmu);
130 }
131 
132 static int
133 nvkm_pmu_init(struct nvkm_subdev *subdev)
134 {
135 	struct nvkm_pmu *pmu = nvkm_pmu(subdev);
136 	int ret = nvkm_pmu_reset(pmu);
137 	if (ret == 0 && pmu->func->init)
138 		ret = pmu->func->init(pmu);
139 	return ret;
140 }
141 
142 static void *
143 nvkm_pmu_dtor(struct nvkm_subdev *subdev)
144 {
145 	struct nvkm_pmu *pmu = nvkm_pmu(subdev);
146 	nvkm_falcon_msgq_del(&pmu->msgq);
147 	nvkm_falcon_cmdq_del(&pmu->lpq);
148 	nvkm_falcon_cmdq_del(&pmu->hpq);
149 	nvkm_falcon_qmgr_del(&pmu->qmgr);
150 	nvkm_falcon_dtor(&pmu->falcon);
151 	return nvkm_pmu(subdev);
152 }
153 
154 static const struct nvkm_subdev_func
155 nvkm_pmu = {
156 	.dtor = nvkm_pmu_dtor,
157 	.preinit = nvkm_pmu_preinit,
158 	.init = nvkm_pmu_init,
159 	.fini = nvkm_pmu_fini,
160 	.intr = nvkm_pmu_intr,
161 };
162 
163 int
164 nvkm_pmu_ctor(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device,
165 	      int index, struct nvkm_pmu *pmu)
166 {
167 	int ret;
168 
169 	nvkm_subdev_ctor(&nvkm_pmu, device, index, &pmu->subdev);
170 
171 	INIT_WORK(&pmu->recv.work, nvkm_pmu_recv);
172 	init_waitqueue_head(&pmu->recv.wait);
173 
174 	fwif = nvkm_firmware_load(&pmu->subdev, fwif, "Pmu", pmu);
175 	if (IS_ERR(fwif))
176 		return PTR_ERR(fwif);
177 
178 	pmu->func = fwif->func;
179 
180 	ret = nvkm_falcon_ctor(pmu->func->flcn, &pmu->subdev,
181 			       nvkm_subdev_name[pmu->subdev.index], 0x10a000,
182 			       &pmu->falcon);
183 	if (ret)
184 		return ret;
185 
186 	if ((ret = nvkm_falcon_qmgr_new(&pmu->falcon, &pmu->qmgr)) ||
187 	    (ret = nvkm_falcon_cmdq_new(pmu->qmgr, "hpq", &pmu->hpq)) ||
188 	    (ret = nvkm_falcon_cmdq_new(pmu->qmgr, "lpq", &pmu->lpq)) ||
189 	    (ret = nvkm_falcon_msgq_new(pmu->qmgr, "msgq", &pmu->msgq)))
190 		return ret;
191 
192 	init_completion(&pmu->wpr_ready);
193 	return 0;
194 }
195 
196 int
197 nvkm_pmu_new_(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device,
198 	      int index, struct nvkm_pmu **ppmu)
199 {
200 	struct nvkm_pmu *pmu;
201 	if (!(pmu = *ppmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
202 		return -ENOMEM;
203 	return nvkm_pmu_ctor(fwif, device, index, *ppmu);
204 }
205