xref: /openbmc/linux/drivers/char/ipmi/kcs_bmc.c (revision ba61bb17)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2015-2018, Intel Corporation.
4  */
5 
6 #define pr_fmt(fmt) "kcs-bmc: " fmt
7 
8 #include <linux/errno.h>
9 #include <linux/io.h>
10 #include <linux/ipmi_bmc.h>
11 #include <linux/module.h>
12 #include <linux/platform_device.h>
13 #include <linux/poll.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
16 
17 #include "kcs_bmc.h"
18 
19 #define KCS_MSG_BUFSIZ    1000
20 
21 #define KCS_ZERO_DATA     0
22 
23 
24 /* IPMI 2.0 - Table 9-1, KCS Interface Status Register Bits */
25 #define KCS_STATUS_STATE(state) (state << 6)
26 #define KCS_STATUS_STATE_MASK   GENMASK(7, 6)
27 #define KCS_STATUS_CMD_DAT      BIT(3)
28 #define KCS_STATUS_SMS_ATN      BIT(2)
29 #define KCS_STATUS_IBF          BIT(1)
30 #define KCS_STATUS_OBF          BIT(0)
31 
32 /* IPMI 2.0 - Table 9-2, KCS Interface State Bits */
33 enum kcs_states {
34 	IDLE_STATE  = 0,
35 	READ_STATE  = 1,
36 	WRITE_STATE = 2,
37 	ERROR_STATE = 3,
38 };
39 
40 /* IPMI 2.0 - Table 9-3, KCS Interface Control Codes */
41 #define KCS_CMD_GET_STATUS_ABORT  0x60
42 #define KCS_CMD_WRITE_START       0x61
43 #define KCS_CMD_WRITE_END         0x62
44 #define KCS_CMD_READ_BYTE         0x68
45 
46 static inline u8 read_data(struct kcs_bmc *kcs_bmc)
47 {
48 	return kcs_bmc->io_inputb(kcs_bmc, kcs_bmc->ioreg.idr);
49 }
50 
51 static inline void write_data(struct kcs_bmc *kcs_bmc, u8 data)
52 {
53 	kcs_bmc->io_outputb(kcs_bmc, kcs_bmc->ioreg.odr, data);
54 }
55 
56 static inline u8 read_status(struct kcs_bmc *kcs_bmc)
57 {
58 	return kcs_bmc->io_inputb(kcs_bmc, kcs_bmc->ioreg.str);
59 }
60 
61 static inline void write_status(struct kcs_bmc *kcs_bmc, u8 data)
62 {
63 	kcs_bmc->io_outputb(kcs_bmc, kcs_bmc->ioreg.str, data);
64 }
65 
66 static void update_status_bits(struct kcs_bmc *kcs_bmc, u8 mask, u8 val)
67 {
68 	u8 tmp = read_status(kcs_bmc);
69 
70 	tmp &= ~mask;
71 	tmp |= val & mask;
72 
73 	write_status(kcs_bmc, tmp);
74 }
75 
76 static inline void set_state(struct kcs_bmc *kcs_bmc, u8 state)
77 {
78 	update_status_bits(kcs_bmc, KCS_STATUS_STATE_MASK,
79 					KCS_STATUS_STATE(state));
80 }
81 
82 static void kcs_force_abort(struct kcs_bmc *kcs_bmc)
83 {
84 	set_state(kcs_bmc, ERROR_STATE);
85 	read_data(kcs_bmc);
86 	write_data(kcs_bmc, KCS_ZERO_DATA);
87 
88 	kcs_bmc->phase = KCS_PHASE_ERROR;
89 	kcs_bmc->data_in_avail = false;
90 	kcs_bmc->data_in_idx = 0;
91 }
92 
93 static void kcs_bmc_handle_data(struct kcs_bmc *kcs_bmc)
94 {
95 	u8 data;
96 
97 	switch (kcs_bmc->phase) {
98 	case KCS_PHASE_WRITE_START:
99 		kcs_bmc->phase = KCS_PHASE_WRITE_DATA;
100 		/* fall through */
101 
102 	case KCS_PHASE_WRITE_DATA:
103 		if (kcs_bmc->data_in_idx < KCS_MSG_BUFSIZ) {
104 			set_state(kcs_bmc, WRITE_STATE);
105 			write_data(kcs_bmc, KCS_ZERO_DATA);
106 			kcs_bmc->data_in[kcs_bmc->data_in_idx++] =
107 						read_data(kcs_bmc);
108 		} else {
109 			kcs_force_abort(kcs_bmc);
110 			kcs_bmc->error = KCS_LENGTH_ERROR;
111 		}
112 		break;
113 
114 	case KCS_PHASE_WRITE_END_CMD:
115 		if (kcs_bmc->data_in_idx < KCS_MSG_BUFSIZ) {
116 			set_state(kcs_bmc, READ_STATE);
117 			kcs_bmc->data_in[kcs_bmc->data_in_idx++] =
118 						read_data(kcs_bmc);
119 			kcs_bmc->phase = KCS_PHASE_WRITE_DONE;
120 			kcs_bmc->data_in_avail = true;
121 			wake_up_interruptible(&kcs_bmc->queue);
122 		} else {
123 			kcs_force_abort(kcs_bmc);
124 			kcs_bmc->error = KCS_LENGTH_ERROR;
125 		}
126 		break;
127 
128 	case KCS_PHASE_READ:
129 		if (kcs_bmc->data_out_idx == kcs_bmc->data_out_len)
130 			set_state(kcs_bmc, IDLE_STATE);
131 
132 		data = read_data(kcs_bmc);
133 		if (data != KCS_CMD_READ_BYTE) {
134 			set_state(kcs_bmc, ERROR_STATE);
135 			write_data(kcs_bmc, KCS_ZERO_DATA);
136 			break;
137 		}
138 
139 		if (kcs_bmc->data_out_idx == kcs_bmc->data_out_len) {
140 			write_data(kcs_bmc, KCS_ZERO_DATA);
141 			kcs_bmc->phase = KCS_PHASE_IDLE;
142 			break;
143 		}
144 
145 		write_data(kcs_bmc,
146 			kcs_bmc->data_out[kcs_bmc->data_out_idx++]);
147 		break;
148 
149 	case KCS_PHASE_ABORT_ERROR1:
150 		set_state(kcs_bmc, READ_STATE);
151 		read_data(kcs_bmc);
152 		write_data(kcs_bmc, kcs_bmc->error);
153 		kcs_bmc->phase = KCS_PHASE_ABORT_ERROR2;
154 		break;
155 
156 	case KCS_PHASE_ABORT_ERROR2:
157 		set_state(kcs_bmc, IDLE_STATE);
158 		read_data(kcs_bmc);
159 		write_data(kcs_bmc, KCS_ZERO_DATA);
160 		kcs_bmc->phase = KCS_PHASE_IDLE;
161 		break;
162 
163 	default:
164 		kcs_force_abort(kcs_bmc);
165 		break;
166 	}
167 }
168 
169 static void kcs_bmc_handle_cmd(struct kcs_bmc *kcs_bmc)
170 {
171 	u8 cmd;
172 
173 	set_state(kcs_bmc, WRITE_STATE);
174 	write_data(kcs_bmc, KCS_ZERO_DATA);
175 
176 	cmd = read_data(kcs_bmc);
177 	switch (cmd) {
178 	case KCS_CMD_WRITE_START:
179 		kcs_bmc->phase = KCS_PHASE_WRITE_START;
180 		kcs_bmc->error = KCS_NO_ERROR;
181 		kcs_bmc->data_in_avail = false;
182 		kcs_bmc->data_in_idx = 0;
183 		break;
184 
185 	case KCS_CMD_WRITE_END:
186 		if (kcs_bmc->phase != KCS_PHASE_WRITE_DATA) {
187 			kcs_force_abort(kcs_bmc);
188 			break;
189 		}
190 
191 		kcs_bmc->phase = KCS_PHASE_WRITE_END_CMD;
192 		break;
193 
194 	case KCS_CMD_GET_STATUS_ABORT:
195 		if (kcs_bmc->error == KCS_NO_ERROR)
196 			kcs_bmc->error = KCS_ABORTED_BY_COMMAND;
197 
198 		kcs_bmc->phase = KCS_PHASE_ABORT_ERROR1;
199 		kcs_bmc->data_in_avail = false;
200 		kcs_bmc->data_in_idx = 0;
201 		break;
202 
203 	default:
204 		kcs_force_abort(kcs_bmc);
205 		kcs_bmc->error = KCS_ILLEGAL_CONTROL_CODE;
206 		break;
207 	}
208 }
209 
210 int kcs_bmc_handle_event(struct kcs_bmc *kcs_bmc)
211 {
212 	unsigned long flags;
213 	int ret = 0;
214 	u8 status;
215 
216 	spin_lock_irqsave(&kcs_bmc->lock, flags);
217 
218 	if (!kcs_bmc->running) {
219 		kcs_force_abort(kcs_bmc);
220 		ret = -ENODEV;
221 		goto out_unlock;
222 	}
223 
224 	status = read_status(kcs_bmc) & (KCS_STATUS_IBF | KCS_STATUS_CMD_DAT);
225 
226 	switch (status) {
227 	case KCS_STATUS_IBF | KCS_STATUS_CMD_DAT:
228 		kcs_bmc_handle_cmd(kcs_bmc);
229 		break;
230 
231 	case KCS_STATUS_IBF:
232 		kcs_bmc_handle_data(kcs_bmc);
233 		break;
234 
235 	default:
236 		ret = -ENODATA;
237 		break;
238 	}
239 
240 out_unlock:
241 	spin_unlock_irqrestore(&kcs_bmc->lock, flags);
242 
243 	return ret;
244 }
245 EXPORT_SYMBOL(kcs_bmc_handle_event);
246 
247 static inline struct kcs_bmc *to_kcs_bmc(struct file *filp)
248 {
249 	return container_of(filp->private_data, struct kcs_bmc, miscdev);
250 }
251 
252 static int kcs_bmc_open(struct inode *inode, struct file *filp)
253 {
254 	struct kcs_bmc *kcs_bmc = to_kcs_bmc(filp);
255 	int ret = 0;
256 
257 	spin_lock_irq(&kcs_bmc->lock);
258 	if (!kcs_bmc->running)
259 		kcs_bmc->running = 1;
260 	else
261 		ret = -EBUSY;
262 	spin_unlock_irq(&kcs_bmc->lock);
263 
264 	return ret;
265 }
266 
267 static __poll_t kcs_bmc_poll(struct file *filp, poll_table *wait)
268 {
269 	struct kcs_bmc *kcs_bmc = to_kcs_bmc(filp);
270 	__poll_t mask = 0;
271 
272 	poll_wait(filp, &kcs_bmc->queue, wait);
273 
274 	spin_lock_irq(&kcs_bmc->lock);
275 	if (kcs_bmc->data_in_avail)
276 		mask |= EPOLLIN;
277 	spin_unlock_irq(&kcs_bmc->lock);
278 
279 	return mask;
280 }
281 
282 static ssize_t kcs_bmc_read(struct file *filp, char __user *buf,
283 			    size_t count, loff_t *ppos)
284 {
285 	struct kcs_bmc *kcs_bmc = to_kcs_bmc(filp);
286 	bool data_avail;
287 	size_t data_len;
288 	ssize_t ret;
289 
290 	if (!(filp->f_flags & O_NONBLOCK))
291 		wait_event_interruptible(kcs_bmc->queue,
292 					 kcs_bmc->data_in_avail);
293 
294 	mutex_lock(&kcs_bmc->mutex);
295 
296 	spin_lock_irq(&kcs_bmc->lock);
297 	data_avail = kcs_bmc->data_in_avail;
298 	if (data_avail) {
299 		data_len = kcs_bmc->data_in_idx;
300 		memcpy(kcs_bmc->kbuffer, kcs_bmc->data_in, data_len);
301 	}
302 	spin_unlock_irq(&kcs_bmc->lock);
303 
304 	if (!data_avail) {
305 		ret = -EAGAIN;
306 		goto out_unlock;
307 	}
308 
309 	if (count < data_len) {
310 		pr_err("channel=%u with too large data : %zu\n",
311 			kcs_bmc->channel, data_len);
312 
313 		spin_lock_irq(&kcs_bmc->lock);
314 		kcs_force_abort(kcs_bmc);
315 		spin_unlock_irq(&kcs_bmc->lock);
316 
317 		ret = -EOVERFLOW;
318 		goto out_unlock;
319 	}
320 
321 	if (copy_to_user(buf, kcs_bmc->kbuffer, data_len)) {
322 		ret = -EFAULT;
323 		goto out_unlock;
324 	}
325 
326 	ret = data_len;
327 
328 	spin_lock_irq(&kcs_bmc->lock);
329 	if (kcs_bmc->phase == KCS_PHASE_WRITE_DONE) {
330 		kcs_bmc->phase = KCS_PHASE_WAIT_READ;
331 		kcs_bmc->data_in_avail = false;
332 		kcs_bmc->data_in_idx = 0;
333 	} else {
334 		ret = -EAGAIN;
335 	}
336 	spin_unlock_irq(&kcs_bmc->lock);
337 
338 out_unlock:
339 	mutex_unlock(&kcs_bmc->mutex);
340 
341 	return ret;
342 }
343 
344 static ssize_t kcs_bmc_write(struct file *filp, const char __user *buf,
345 			     size_t count, loff_t *ppos)
346 {
347 	struct kcs_bmc *kcs_bmc = to_kcs_bmc(filp);
348 	ssize_t ret;
349 
350 	/* a minimum response size '3' : netfn + cmd + ccode */
351 	if (count < 3 || count > KCS_MSG_BUFSIZ)
352 		return -EINVAL;
353 
354 	mutex_lock(&kcs_bmc->mutex);
355 
356 	if (copy_from_user(kcs_bmc->kbuffer, buf, count)) {
357 		ret = -EFAULT;
358 		goto out_unlock;
359 	}
360 
361 	spin_lock_irq(&kcs_bmc->lock);
362 	if (kcs_bmc->phase == KCS_PHASE_WAIT_READ) {
363 		kcs_bmc->phase = KCS_PHASE_READ;
364 		kcs_bmc->data_out_idx = 1;
365 		kcs_bmc->data_out_len = count;
366 		memcpy(kcs_bmc->data_out, kcs_bmc->kbuffer, count);
367 		write_data(kcs_bmc, kcs_bmc->data_out[0]);
368 		ret = count;
369 	} else {
370 		ret = -EINVAL;
371 	}
372 	spin_unlock_irq(&kcs_bmc->lock);
373 
374 out_unlock:
375 	mutex_unlock(&kcs_bmc->mutex);
376 
377 	return ret;
378 }
379 
380 static long kcs_bmc_ioctl(struct file *filp, unsigned int cmd,
381 			  unsigned long arg)
382 {
383 	struct kcs_bmc *kcs_bmc = to_kcs_bmc(filp);
384 	long ret = 0;
385 
386 	spin_lock_irq(&kcs_bmc->lock);
387 
388 	switch (cmd) {
389 	case IPMI_BMC_IOCTL_SET_SMS_ATN:
390 		update_status_bits(kcs_bmc, KCS_STATUS_SMS_ATN,
391 				   KCS_STATUS_SMS_ATN);
392 		break;
393 
394 	case IPMI_BMC_IOCTL_CLEAR_SMS_ATN:
395 		update_status_bits(kcs_bmc, KCS_STATUS_SMS_ATN,
396 				   0);
397 		break;
398 
399 	case IPMI_BMC_IOCTL_FORCE_ABORT:
400 		kcs_force_abort(kcs_bmc);
401 		break;
402 
403 	default:
404 		ret = -EINVAL;
405 		break;
406 	}
407 
408 	spin_unlock_irq(&kcs_bmc->lock);
409 
410 	return ret;
411 }
412 
413 static int kcs_bmc_release(struct inode *inode, struct file *filp)
414 {
415 	struct kcs_bmc *kcs_bmc = to_kcs_bmc(filp);
416 
417 	spin_lock_irq(&kcs_bmc->lock);
418 	kcs_bmc->running = 0;
419 	kcs_force_abort(kcs_bmc);
420 	spin_unlock_irq(&kcs_bmc->lock);
421 
422 	return 0;
423 }
424 
425 static const struct file_operations kcs_bmc_fops = {
426 	.owner          = THIS_MODULE,
427 	.open           = kcs_bmc_open,
428 	.read           = kcs_bmc_read,
429 	.write          = kcs_bmc_write,
430 	.release        = kcs_bmc_release,
431 	.poll           = kcs_bmc_poll,
432 	.unlocked_ioctl = kcs_bmc_ioctl,
433 };
434 
435 struct kcs_bmc *kcs_bmc_alloc(struct device *dev, int sizeof_priv, u32 channel)
436 {
437 	struct kcs_bmc *kcs_bmc;
438 
439 	kcs_bmc = devm_kzalloc(dev, sizeof(*kcs_bmc) + sizeof_priv, GFP_KERNEL);
440 	if (!kcs_bmc)
441 		return NULL;
442 
443 	dev_set_name(dev, "ipmi-kcs%u", channel);
444 
445 	spin_lock_init(&kcs_bmc->lock);
446 	kcs_bmc->channel = channel;
447 
448 	mutex_init(&kcs_bmc->mutex);
449 	init_waitqueue_head(&kcs_bmc->queue);
450 
451 	kcs_bmc->data_in = devm_kmalloc(dev, KCS_MSG_BUFSIZ, GFP_KERNEL);
452 	kcs_bmc->data_out = devm_kmalloc(dev, KCS_MSG_BUFSIZ, GFP_KERNEL);
453 	kcs_bmc->kbuffer = devm_kmalloc(dev, KCS_MSG_BUFSIZ, GFP_KERNEL);
454 	if (!kcs_bmc->data_in || !kcs_bmc->data_out || !kcs_bmc->kbuffer)
455 		return NULL;
456 
457 	kcs_bmc->miscdev.minor = MISC_DYNAMIC_MINOR;
458 	kcs_bmc->miscdev.name = dev_name(dev);
459 	kcs_bmc->miscdev.fops = &kcs_bmc_fops;
460 
461 	return kcs_bmc;
462 }
463 EXPORT_SYMBOL(kcs_bmc_alloc);
464 
465 MODULE_LICENSE("GPL v2");
466 MODULE_AUTHOR("Haiyue Wang <haiyue.wang@linux.intel.com>");
467 MODULE_DESCRIPTION("KCS BMC to handle the IPMI request from system software");
468