1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2015-2018, Intel Corporation. 4 */ 5 6 #define pr_fmt(fmt) "kcs-bmc: " fmt 7 8 #include <linux/errno.h> 9 #include <linux/io.h> 10 #include <linux/ipmi_bmc.h> 11 #include <linux/module.h> 12 #include <linux/platform_device.h> 13 #include <linux/poll.h> 14 #include <linux/sched.h> 15 #include <linux/slab.h> 16 17 #include "kcs_bmc.h" 18 19 #define DEVICE_NAME "ipmi-kcs" 20 21 #define KCS_MSG_BUFSIZ 1000 22 23 #define KCS_ZERO_DATA 0 24 25 26 /* IPMI 2.0 - Table 9-1, KCS Interface Status Register Bits */ 27 #define KCS_STATUS_STATE(state) (state << 6) 28 #define KCS_STATUS_STATE_MASK GENMASK(7, 6) 29 #define KCS_STATUS_CMD_DAT BIT(3) 30 #define KCS_STATUS_SMS_ATN BIT(2) 31 #define KCS_STATUS_IBF BIT(1) 32 #define KCS_STATUS_OBF BIT(0) 33 34 /* IPMI 2.0 - Table 9-2, KCS Interface State Bits */ 35 enum kcs_states { 36 IDLE_STATE = 0, 37 READ_STATE = 1, 38 WRITE_STATE = 2, 39 ERROR_STATE = 3, 40 }; 41 42 /* IPMI 2.0 - Table 9-3, KCS Interface Control Codes */ 43 #define KCS_CMD_GET_STATUS_ABORT 0x60 44 #define KCS_CMD_WRITE_START 0x61 45 #define KCS_CMD_WRITE_END 0x62 46 #define KCS_CMD_READ_BYTE 0x68 47 48 static inline u8 read_data(struct kcs_bmc *kcs_bmc) 49 { 50 return kcs_bmc->io_inputb(kcs_bmc, kcs_bmc->ioreg.idr); 51 } 52 53 static inline void write_data(struct kcs_bmc *kcs_bmc, u8 data) 54 { 55 kcs_bmc->io_outputb(kcs_bmc, kcs_bmc->ioreg.odr, data); 56 } 57 58 static inline u8 read_status(struct kcs_bmc *kcs_bmc) 59 { 60 return kcs_bmc->io_inputb(kcs_bmc, kcs_bmc->ioreg.str); 61 } 62 63 static inline void write_status(struct kcs_bmc *kcs_bmc, u8 data) 64 { 65 kcs_bmc->io_outputb(kcs_bmc, kcs_bmc->ioreg.str, data); 66 } 67 68 static void update_status_bits(struct kcs_bmc *kcs_bmc, u8 mask, u8 val) 69 { 70 u8 tmp = read_status(kcs_bmc); 71 72 tmp &= ~mask; 73 tmp |= val & mask; 74 75 write_status(kcs_bmc, tmp); 76 } 77 78 static inline void set_state(struct kcs_bmc *kcs_bmc, u8 state) 79 { 80 update_status_bits(kcs_bmc, KCS_STATUS_STATE_MASK, 81 KCS_STATUS_STATE(state)); 82 } 83 84 static void kcs_force_abort(struct kcs_bmc *kcs_bmc) 85 { 86 set_state(kcs_bmc, ERROR_STATE); 87 read_data(kcs_bmc); 88 write_data(kcs_bmc, KCS_ZERO_DATA); 89 90 kcs_bmc->phase = KCS_PHASE_ERROR; 91 kcs_bmc->data_in_avail = false; 92 kcs_bmc->data_in_idx = 0; 93 } 94 95 static void kcs_bmc_handle_data(struct kcs_bmc *kcs_bmc) 96 { 97 u8 data; 98 99 switch (kcs_bmc->phase) { 100 case KCS_PHASE_WRITE_START: 101 kcs_bmc->phase = KCS_PHASE_WRITE_DATA; 102 /* fall through */ 103 104 case KCS_PHASE_WRITE_DATA: 105 if (kcs_bmc->data_in_idx < KCS_MSG_BUFSIZ) { 106 set_state(kcs_bmc, WRITE_STATE); 107 write_data(kcs_bmc, KCS_ZERO_DATA); 108 kcs_bmc->data_in[kcs_bmc->data_in_idx++] = 109 read_data(kcs_bmc); 110 } else { 111 kcs_force_abort(kcs_bmc); 112 kcs_bmc->error = KCS_LENGTH_ERROR; 113 } 114 break; 115 116 case KCS_PHASE_WRITE_END_CMD: 117 if (kcs_bmc->data_in_idx < KCS_MSG_BUFSIZ) { 118 set_state(kcs_bmc, READ_STATE); 119 kcs_bmc->data_in[kcs_bmc->data_in_idx++] = 120 read_data(kcs_bmc); 121 kcs_bmc->phase = KCS_PHASE_WRITE_DONE; 122 kcs_bmc->data_in_avail = true; 123 wake_up_interruptible(&kcs_bmc->queue); 124 } else { 125 kcs_force_abort(kcs_bmc); 126 kcs_bmc->error = KCS_LENGTH_ERROR; 127 } 128 break; 129 130 case KCS_PHASE_READ: 131 if (kcs_bmc->data_out_idx == kcs_bmc->data_out_len) 132 set_state(kcs_bmc, IDLE_STATE); 133 134 data = read_data(kcs_bmc); 135 if (data != KCS_CMD_READ_BYTE) { 136 set_state(kcs_bmc, ERROR_STATE); 137 write_data(kcs_bmc, KCS_ZERO_DATA); 138 break; 139 } 140 141 if (kcs_bmc->data_out_idx == kcs_bmc->data_out_len) { 142 write_data(kcs_bmc, KCS_ZERO_DATA); 143 kcs_bmc->phase = KCS_PHASE_IDLE; 144 break; 145 } 146 147 write_data(kcs_bmc, 148 kcs_bmc->data_out[kcs_bmc->data_out_idx++]); 149 break; 150 151 case KCS_PHASE_ABORT_ERROR1: 152 set_state(kcs_bmc, READ_STATE); 153 read_data(kcs_bmc); 154 write_data(kcs_bmc, kcs_bmc->error); 155 kcs_bmc->phase = KCS_PHASE_ABORT_ERROR2; 156 break; 157 158 case KCS_PHASE_ABORT_ERROR2: 159 set_state(kcs_bmc, IDLE_STATE); 160 read_data(kcs_bmc); 161 write_data(kcs_bmc, KCS_ZERO_DATA); 162 kcs_bmc->phase = KCS_PHASE_IDLE; 163 break; 164 165 default: 166 kcs_force_abort(kcs_bmc); 167 break; 168 } 169 } 170 171 static void kcs_bmc_handle_cmd(struct kcs_bmc *kcs_bmc) 172 { 173 u8 cmd; 174 175 set_state(kcs_bmc, WRITE_STATE); 176 write_data(kcs_bmc, KCS_ZERO_DATA); 177 178 cmd = read_data(kcs_bmc); 179 switch (cmd) { 180 case KCS_CMD_WRITE_START: 181 kcs_bmc->phase = KCS_PHASE_WRITE_START; 182 kcs_bmc->error = KCS_NO_ERROR; 183 kcs_bmc->data_in_avail = false; 184 kcs_bmc->data_in_idx = 0; 185 break; 186 187 case KCS_CMD_WRITE_END: 188 if (kcs_bmc->phase != KCS_PHASE_WRITE_DATA) { 189 kcs_force_abort(kcs_bmc); 190 break; 191 } 192 193 kcs_bmc->phase = KCS_PHASE_WRITE_END_CMD; 194 break; 195 196 case KCS_CMD_GET_STATUS_ABORT: 197 if (kcs_bmc->error == KCS_NO_ERROR) 198 kcs_bmc->error = KCS_ABORTED_BY_COMMAND; 199 200 kcs_bmc->phase = KCS_PHASE_ABORT_ERROR1; 201 kcs_bmc->data_in_avail = false; 202 kcs_bmc->data_in_idx = 0; 203 break; 204 205 default: 206 kcs_force_abort(kcs_bmc); 207 kcs_bmc->error = KCS_ILLEGAL_CONTROL_CODE; 208 break; 209 } 210 } 211 212 int kcs_bmc_handle_event(struct kcs_bmc *kcs_bmc) 213 { 214 unsigned long flags; 215 int ret = -ENODATA; 216 u8 status; 217 218 spin_lock_irqsave(&kcs_bmc->lock, flags); 219 220 status = read_status(kcs_bmc); 221 if (status & KCS_STATUS_IBF) { 222 if (!kcs_bmc->running) 223 kcs_force_abort(kcs_bmc); 224 else if (status & KCS_STATUS_CMD_DAT) 225 kcs_bmc_handle_cmd(kcs_bmc); 226 else 227 kcs_bmc_handle_data(kcs_bmc); 228 229 ret = 0; 230 } 231 232 spin_unlock_irqrestore(&kcs_bmc->lock, flags); 233 234 return ret; 235 } 236 EXPORT_SYMBOL(kcs_bmc_handle_event); 237 238 static inline struct kcs_bmc *to_kcs_bmc(struct file *filp) 239 { 240 return container_of(filp->private_data, struct kcs_bmc, miscdev); 241 } 242 243 static int kcs_bmc_open(struct inode *inode, struct file *filp) 244 { 245 struct kcs_bmc *kcs_bmc = to_kcs_bmc(filp); 246 int ret = 0; 247 248 spin_lock_irq(&kcs_bmc->lock); 249 if (!kcs_bmc->running) 250 kcs_bmc->running = 1; 251 else 252 ret = -EBUSY; 253 spin_unlock_irq(&kcs_bmc->lock); 254 255 return ret; 256 } 257 258 static __poll_t kcs_bmc_poll(struct file *filp, poll_table *wait) 259 { 260 struct kcs_bmc *kcs_bmc = to_kcs_bmc(filp); 261 __poll_t mask = 0; 262 263 poll_wait(filp, &kcs_bmc->queue, wait); 264 265 spin_lock_irq(&kcs_bmc->lock); 266 if (kcs_bmc->data_in_avail) 267 mask |= EPOLLIN; 268 spin_unlock_irq(&kcs_bmc->lock); 269 270 return mask; 271 } 272 273 static ssize_t kcs_bmc_read(struct file *filp, char __user *buf, 274 size_t count, loff_t *ppos) 275 { 276 struct kcs_bmc *kcs_bmc = to_kcs_bmc(filp); 277 bool data_avail; 278 size_t data_len; 279 ssize_t ret; 280 281 if (!(filp->f_flags & O_NONBLOCK)) 282 wait_event_interruptible(kcs_bmc->queue, 283 kcs_bmc->data_in_avail); 284 285 mutex_lock(&kcs_bmc->mutex); 286 287 spin_lock_irq(&kcs_bmc->lock); 288 data_avail = kcs_bmc->data_in_avail; 289 if (data_avail) { 290 data_len = kcs_bmc->data_in_idx; 291 memcpy(kcs_bmc->kbuffer, kcs_bmc->data_in, data_len); 292 } 293 spin_unlock_irq(&kcs_bmc->lock); 294 295 if (!data_avail) { 296 ret = -EAGAIN; 297 goto out_unlock; 298 } 299 300 if (count < data_len) { 301 pr_err("channel=%u with too large data : %zu\n", 302 kcs_bmc->channel, data_len); 303 304 spin_lock_irq(&kcs_bmc->lock); 305 kcs_force_abort(kcs_bmc); 306 spin_unlock_irq(&kcs_bmc->lock); 307 308 ret = -EOVERFLOW; 309 goto out_unlock; 310 } 311 312 if (copy_to_user(buf, kcs_bmc->kbuffer, data_len)) { 313 ret = -EFAULT; 314 goto out_unlock; 315 } 316 317 ret = data_len; 318 319 spin_lock_irq(&kcs_bmc->lock); 320 if (kcs_bmc->phase == KCS_PHASE_WRITE_DONE) { 321 kcs_bmc->phase = KCS_PHASE_WAIT_READ; 322 kcs_bmc->data_in_avail = false; 323 kcs_bmc->data_in_idx = 0; 324 } else { 325 ret = -EAGAIN; 326 } 327 spin_unlock_irq(&kcs_bmc->lock); 328 329 out_unlock: 330 mutex_unlock(&kcs_bmc->mutex); 331 332 return ret; 333 } 334 335 static ssize_t kcs_bmc_write(struct file *filp, const char __user *buf, 336 size_t count, loff_t *ppos) 337 { 338 struct kcs_bmc *kcs_bmc = to_kcs_bmc(filp); 339 ssize_t ret; 340 341 /* a minimum response size '3' : netfn + cmd + ccode */ 342 if (count < 3 || count > KCS_MSG_BUFSIZ) 343 return -EINVAL; 344 345 mutex_lock(&kcs_bmc->mutex); 346 347 if (copy_from_user(kcs_bmc->kbuffer, buf, count)) { 348 ret = -EFAULT; 349 goto out_unlock; 350 } 351 352 spin_lock_irq(&kcs_bmc->lock); 353 if (kcs_bmc->phase == KCS_PHASE_WAIT_READ) { 354 kcs_bmc->phase = KCS_PHASE_READ; 355 kcs_bmc->data_out_idx = 1; 356 kcs_bmc->data_out_len = count; 357 memcpy(kcs_bmc->data_out, kcs_bmc->kbuffer, count); 358 write_data(kcs_bmc, kcs_bmc->data_out[0]); 359 ret = count; 360 } else { 361 ret = -EINVAL; 362 } 363 spin_unlock_irq(&kcs_bmc->lock); 364 365 out_unlock: 366 mutex_unlock(&kcs_bmc->mutex); 367 368 return ret; 369 } 370 371 static long kcs_bmc_ioctl(struct file *filp, unsigned int cmd, 372 unsigned long arg) 373 { 374 struct kcs_bmc *kcs_bmc = to_kcs_bmc(filp); 375 long ret = 0; 376 377 spin_lock_irq(&kcs_bmc->lock); 378 379 switch (cmd) { 380 case IPMI_BMC_IOCTL_SET_SMS_ATN: 381 update_status_bits(kcs_bmc, KCS_STATUS_SMS_ATN, 382 KCS_STATUS_SMS_ATN); 383 break; 384 385 case IPMI_BMC_IOCTL_CLEAR_SMS_ATN: 386 update_status_bits(kcs_bmc, KCS_STATUS_SMS_ATN, 387 0); 388 break; 389 390 case IPMI_BMC_IOCTL_FORCE_ABORT: 391 kcs_force_abort(kcs_bmc); 392 break; 393 394 default: 395 ret = -EINVAL; 396 break; 397 } 398 399 spin_unlock_irq(&kcs_bmc->lock); 400 401 return ret; 402 } 403 404 static int kcs_bmc_release(struct inode *inode, struct file *filp) 405 { 406 struct kcs_bmc *kcs_bmc = to_kcs_bmc(filp); 407 408 spin_lock_irq(&kcs_bmc->lock); 409 kcs_bmc->running = 0; 410 kcs_force_abort(kcs_bmc); 411 spin_unlock_irq(&kcs_bmc->lock); 412 413 return 0; 414 } 415 416 static const struct file_operations kcs_bmc_fops = { 417 .owner = THIS_MODULE, 418 .open = kcs_bmc_open, 419 .read = kcs_bmc_read, 420 .write = kcs_bmc_write, 421 .release = kcs_bmc_release, 422 .poll = kcs_bmc_poll, 423 .unlocked_ioctl = kcs_bmc_ioctl, 424 }; 425 426 struct kcs_bmc *kcs_bmc_alloc(struct device *dev, int sizeof_priv, u32 channel) 427 { 428 struct kcs_bmc *kcs_bmc; 429 430 kcs_bmc = devm_kzalloc(dev, sizeof(*kcs_bmc) + sizeof_priv, GFP_KERNEL); 431 if (!kcs_bmc) 432 return NULL; 433 434 spin_lock_init(&kcs_bmc->lock); 435 kcs_bmc->channel = channel; 436 437 mutex_init(&kcs_bmc->mutex); 438 init_waitqueue_head(&kcs_bmc->queue); 439 440 kcs_bmc->data_in = devm_kmalloc(dev, KCS_MSG_BUFSIZ, GFP_KERNEL); 441 kcs_bmc->data_out = devm_kmalloc(dev, KCS_MSG_BUFSIZ, GFP_KERNEL); 442 kcs_bmc->kbuffer = devm_kmalloc(dev, KCS_MSG_BUFSIZ, GFP_KERNEL); 443 if (!kcs_bmc->data_in || !kcs_bmc->data_out || !kcs_bmc->kbuffer) 444 return NULL; 445 446 kcs_bmc->miscdev.minor = MISC_DYNAMIC_MINOR; 447 kcs_bmc->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s%u", 448 DEVICE_NAME, channel); 449 kcs_bmc->miscdev.fops = &kcs_bmc_fops; 450 451 return kcs_bmc; 452 } 453 EXPORT_SYMBOL(kcs_bmc_alloc); 454 455 MODULE_LICENSE("GPL v2"); 456 MODULE_AUTHOR("Haiyue Wang <haiyue.wang@linux.intel.com>"); 457 MODULE_DESCRIPTION("KCS BMC to handle the IPMI request from system software"); 458