1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Copyright (c) 2021 IBM Corp. */
3
4 #include <linux/delay.h>
5 #include <linux/device.h>
6 #include <linux/errno.h>
7 #include <linux/fs.h>
8 #include <linux/list.h>
9 #include <linux/miscdevice.h>
10 #include <linux/module.h>
11 #include <linux/poll.h>
12
13 #include "kcs_bmc_client.h"
14
15 #define DEVICE_NAME "raw-kcs"
16
17 struct kcs_bmc_raw {
18 struct list_head entry;
19
20 struct kcs_bmc_client client;
21
22 wait_queue_head_t queue;
23 u8 events;
24 bool writable;
25 bool readable;
26 u8 idr;
27
28 struct miscdevice miscdev;
29 };
30
client_to_kcs_bmc_raw(struct kcs_bmc_client * client)31 static inline struct kcs_bmc_raw *client_to_kcs_bmc_raw(struct kcs_bmc_client *client)
32 {
33 return container_of(client, struct kcs_bmc_raw, client);
34 }
35
36 /* Call under priv->queue.lock */
kcs_bmc_raw_update_event_mask(struct kcs_bmc_raw * priv,u8 mask,u8 state)37 static void kcs_bmc_raw_update_event_mask(struct kcs_bmc_raw *priv, u8 mask, u8 state)
38 {
39 kcs_bmc_update_event_mask(priv->client.dev, mask, state);
40 priv->events &= ~mask;
41 priv->events |= state & mask;
42 }
43
kcs_bmc_raw_event(struct kcs_bmc_client * client)44 static irqreturn_t kcs_bmc_raw_event(struct kcs_bmc_client *client)
45 {
46 struct kcs_bmc_raw *priv;
47 struct device *dev;
48 u8 status, handled;
49
50 priv = client_to_kcs_bmc_raw(client);
51 dev = priv->miscdev.this_device;
52
53 spin_lock(&priv->queue.lock);
54
55 status = kcs_bmc_read_status(client->dev);
56 handled = 0;
57
58 if ((priv->events & KCS_BMC_EVENT_TYPE_IBF) && (status & KCS_BMC_STR_IBF)) {
59 if (priv->readable)
60 dev_err(dev, "Unexpected IBF IRQ, dropping data");
61
62 dev_dbg(dev, "Disabling IDR events for back-pressure\n");
63 kcs_bmc_raw_update_event_mask(priv, KCS_BMC_EVENT_TYPE_IBF, 0);
64 priv->idr = kcs_bmc_read_data(client->dev);
65 priv->readable = true;
66
67 dev_dbg(dev, "IDR read, waking waiters\n");
68 wake_up_locked(&priv->queue);
69
70 handled |= KCS_BMC_EVENT_TYPE_IBF;
71 }
72
73 if ((priv->events & KCS_BMC_EVENT_TYPE_OBE) && !(status & KCS_BMC_STR_OBF)) {
74 kcs_bmc_raw_update_event_mask(priv, KCS_BMC_EVENT_TYPE_OBE, 0);
75 priv->writable = true;
76
77 dev_dbg(dev, "ODR writable, waking waiters\n");
78 wake_up_locked(&priv->queue);
79
80 handled |= KCS_BMC_EVENT_TYPE_OBE;
81 }
82
83 spin_unlock(&priv->queue.lock);
84
85 return handled ? IRQ_HANDLED : IRQ_NONE;
86 }
87
88 static const struct kcs_bmc_client_ops kcs_bmc_raw_client_ops = {
89 .event = kcs_bmc_raw_event,
90 };
91
file_to_kcs_bmc_raw(struct file * filp)92 static inline struct kcs_bmc_raw *file_to_kcs_bmc_raw(struct file *filp)
93 {
94 return container_of(filp->private_data, struct kcs_bmc_raw, miscdev);
95 }
96
kcs_bmc_raw_open(struct inode * inode,struct file * filp)97 static int kcs_bmc_raw_open(struct inode *inode, struct file *filp)
98 {
99 struct kcs_bmc_raw *priv = file_to_kcs_bmc_raw(filp);
100 int rc;
101
102 priv->events = KCS_BMC_EVENT_TYPE_IBF;
103 rc = kcs_bmc_enable_device(priv->client.dev, &priv->client);
104 if (rc)
105 priv->events = 0;
106
107 return rc;
108 }
109
kcs_bmc_raw_prepare_obe(struct kcs_bmc_raw * priv)110 static bool kcs_bmc_raw_prepare_obe(struct kcs_bmc_raw *priv)
111 {
112 bool writable;
113
114 /* Enable the OBE event so we can catch the host clearing OBF */
115 kcs_bmc_raw_update_event_mask(priv, KCS_BMC_EVENT_TYPE_OBE, KCS_BMC_EVENT_TYPE_OBE);
116
117 /* Now that we'll catch an OBE event, check if it's already occurred */
118 writable = !(kcs_bmc_read_status(priv->client.dev) & KCS_BMC_STR_OBF);
119
120 /* If OBF is clear we've missed the OBE event, so disable it */
121 if (writable)
122 kcs_bmc_raw_update_event_mask(priv, KCS_BMC_EVENT_TYPE_OBE, 0);
123
124 return writable;
125 }
126
kcs_bmc_raw_poll(struct file * filp,poll_table * wait)127 static __poll_t kcs_bmc_raw_poll(struct file *filp, poll_table *wait)
128 {
129 struct kcs_bmc_raw *priv;
130 __poll_t events = 0;
131
132 priv = file_to_kcs_bmc_raw(filp);
133
134 poll_wait(filp, &priv->queue, wait);
135
136 spin_lock_irq(&priv->queue.lock);
137 if (kcs_bmc_raw_prepare_obe(priv))
138 events |= (EPOLLOUT | EPOLLWRNORM);
139
140 if (priv->readable || (kcs_bmc_read_status(priv->client.dev) & KCS_BMC_STR_IBF))
141 events |= (EPOLLIN | EPOLLRDNORM);
142 spin_unlock_irq(&priv->queue.lock);
143
144 return events;
145 }
146
kcs_bmc_raw_read(struct file * filp,char __user * buf,size_t count,loff_t * ppos)147 static ssize_t kcs_bmc_raw_read(struct file *filp, char __user *buf,
148 size_t count, loff_t *ppos)
149 {
150 struct kcs_bmc_device *kcs_bmc;
151 struct kcs_bmc_raw *priv;
152 bool read_idr, read_str;
153 struct device *dev;
154 u8 idr, str;
155 ssize_t rc;
156
157 priv = file_to_kcs_bmc_raw(filp);
158 kcs_bmc = priv->client.dev;
159 dev = priv->miscdev.this_device;
160
161 if (!count)
162 return 0;
163
164 if (count > 2 || *ppos > 1)
165 return -EINVAL;
166
167 if (*ppos + count > 2)
168 return -EINVAL;
169
170 read_idr = (*ppos == 0);
171 read_str = (*ppos == 1) || (count == 2);
172
173 spin_lock_irq(&priv->queue.lock);
174 if (read_idr) {
175 dev_dbg(dev, "Waiting for IBF\n");
176 str = kcs_bmc_read_status(kcs_bmc);
177 if ((filp->f_flags & O_NONBLOCK) && (str & KCS_BMC_STR_IBF)) {
178 rc = -EWOULDBLOCK;
179 goto out;
180 }
181
182 rc = wait_event_interruptible_locked(priv->queue,
183 priv->readable || (str & KCS_BMC_STR_IBF));
184 if (rc < 0)
185 goto out;
186
187 if (signal_pending(current)) {
188 dev_dbg(dev, "Interrupted waiting for IBF\n");
189 rc = -EINTR;
190 goto out;
191 }
192
193 /*
194 * Re-enable events prior to possible read of IDR (which clears
195 * IBF) to ensure we receive interrupts for subsequent writes
196 * to IDR. Writes to IDR by the host should not occur while IBF
197 * is set.
198 */
199 dev_dbg(dev, "Woken by IBF, enabling IRQ\n");
200 kcs_bmc_raw_update_event_mask(priv, KCS_BMC_EVENT_TYPE_IBF,
201 KCS_BMC_EVENT_TYPE_IBF);
202
203 /* Read data out of IDR into internal storage if necessary */
204 if (!priv->readable) {
205 WARN(!(str & KCS_BMC_STR_IBF), "Unknown reason for wakeup!");
206
207 priv->idr = kcs_bmc_read_data(kcs_bmc);
208 }
209
210 /* Copy data from internal storage to userspace */
211 idr = priv->idr;
212
213 /* We're done consuming the internally stored value */
214 priv->readable = false;
215 }
216
217 if (read_str) {
218 str = kcs_bmc_read_status(kcs_bmc);
219 if (*ppos == 0 || priv->readable)
220 /*
221 * If we got this far with `*ppos == 0` then we've read
222 * data out of IDR, so set IBF when reporting back to
223 * userspace so userspace knows the IDR value is valid.
224 */
225 str |= KCS_BMC_STR_IBF;
226
227 dev_dbg(dev, "Read status 0x%x\n", str);
228
229 }
230
231 rc = count;
232 out:
233 spin_unlock_irq(&priv->queue.lock);
234
235 if (rc < 0)
236 return rc;
237
238 /* Now copy the data in to the userspace buffer */
239
240 if (read_idr)
241 if (copy_to_user(buf++, &idr, sizeof(idr)))
242 return -EFAULT;
243
244 if (read_str)
245 if (copy_to_user(buf, &str, sizeof(str)))
246 return -EFAULT;
247
248 return count;
249 }
250
kcs_bmc_raw_write(struct file * filp,const char __user * buf,size_t count,loff_t * ppos)251 static ssize_t kcs_bmc_raw_write(struct file *filp, const char __user *buf,
252 size_t count, loff_t *ppos)
253 {
254 struct kcs_bmc_device *kcs_bmc;
255 bool write_odr, write_str;
256 struct kcs_bmc_raw *priv;
257 struct device *dev;
258 ssize_t result;
259 u8 data[2];
260 u8 str;
261
262 priv = file_to_kcs_bmc_raw(filp);
263 kcs_bmc = priv->client.dev;
264 dev = priv->miscdev.this_device;
265
266 if (!count)
267 return count;
268
269 if (count > 2)
270 return -EINVAL;
271
272 if (*ppos >= 2)
273 return -EINVAL;
274
275 if (*ppos + count > 2)
276 return -EINVAL;
277
278 if (copy_from_user(data, buf, count))
279 return -EFAULT;
280
281 write_odr = (*ppos == 0);
282 write_str = (*ppos == 1) || (count == 2);
283
284 spin_lock_irq(&priv->queue.lock);
285
286 /* Always write status before data, we generate the SerIRQ by writing ODR */
287 if (write_str) {
288 /* The index of STR in the userspace buffer depends on whether ODR is written */
289 str = data[*ppos == 0];
290 if (!(str & KCS_BMC_STR_OBF))
291 dev_warn(dev, "Clearing OBF with status write: 0x%x\n", str);
292 dev_dbg(dev, "Writing status 0x%x\n", str);
293 kcs_bmc_write_status(kcs_bmc, str);
294 }
295
296 if (write_odr) {
297 /* If we're writing ODR it's always the first byte in the buffer */
298 u8 odr = data[0];
299
300 str = kcs_bmc_read_status(kcs_bmc);
301 if (str & KCS_BMC_STR_OBF) {
302 if (filp->f_flags & O_NONBLOCK) {
303 result = -EWOULDBLOCK;
304 goto out;
305 }
306
307 priv->writable = kcs_bmc_raw_prepare_obe(priv);
308
309 /* Now either OBF is already clear, or we'll get an OBE event to wake us */
310 dev_dbg(dev, "Waiting for OBF to clear\n");
311 wait_event_interruptible_locked(priv->queue, priv->writable);
312
313 if (signal_pending(current)) {
314 kcs_bmc_raw_update_event_mask(priv, KCS_BMC_EVENT_TYPE_OBE, 0);
315 result = -EINTR;
316 goto out;
317 }
318
319 WARN_ON(kcs_bmc_read_status(kcs_bmc) & KCS_BMC_STR_OBF);
320 }
321
322 dev_dbg(dev, "Writing 0x%x to ODR\n", odr);
323 kcs_bmc_write_data(kcs_bmc, odr);
324 }
325
326 result = count;
327 out:
328 spin_unlock_irq(&priv->queue.lock);
329
330 return result;
331 }
332
kcs_bmc_raw_release(struct inode * inode,struct file * filp)333 static int kcs_bmc_raw_release(struct inode *inode, struct file *filp)
334 {
335 struct kcs_bmc_raw *priv = file_to_kcs_bmc_raw(filp);
336
337 kcs_bmc_disable_device(priv->client.dev, &priv->client);
338 priv->events = 0;
339
340 return 0;
341 }
342
343 static const struct file_operations kcs_bmc_raw_fops = {
344 .owner = THIS_MODULE,
345 .open = kcs_bmc_raw_open,
346 .llseek = no_seek_end_llseek,
347 .read = kcs_bmc_raw_read,
348 .write = kcs_bmc_raw_write,
349 .poll = kcs_bmc_raw_poll,
350 .release = kcs_bmc_raw_release,
351 };
352
353 static DEFINE_SPINLOCK(kcs_bmc_raw_instances_lock);
354 static LIST_HEAD(kcs_bmc_raw_instances);
355
kcs_bmc_raw_add_device(struct kcs_bmc_device * kcs_bmc)356 static int kcs_bmc_raw_add_device(struct kcs_bmc_device *kcs_bmc)
357 {
358 struct kcs_bmc_raw *priv;
359 int rc;
360
361 priv = devm_kzalloc(kcs_bmc->dev, sizeof(*priv), GFP_KERNEL);
362 if (!priv)
363 return -ENOMEM;
364
365 priv->client.dev = kcs_bmc;
366 priv->client.ops = &kcs_bmc_raw_client_ops;
367
368 init_waitqueue_head(&priv->queue);
369 priv->writable = false;
370 priv->readable = false;
371
372 priv->miscdev.minor = MISC_DYNAMIC_MINOR;
373 priv->miscdev.name = devm_kasprintf(kcs_bmc->dev, GFP_KERNEL, "%s%u", DEVICE_NAME,
374 kcs_bmc->channel);
375 if (!priv->miscdev.name)
376 return -EINVAL;
377
378 priv->miscdev.fops = &kcs_bmc_raw_fops;
379
380 /* Disable interrupts until userspace opens the the chardev */
381 kcs_bmc_raw_update_event_mask(priv, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0);
382
383 rc = misc_register(&priv->miscdev);
384 if (rc) {
385 dev_err(kcs_bmc->dev, "Unable to register device\n");
386 return rc;
387 }
388
389 spin_lock_irq(&kcs_bmc_raw_instances_lock);
390 list_add(&priv->entry, &kcs_bmc_raw_instances);
391 spin_unlock_irq(&kcs_bmc_raw_instances_lock);
392
393 dev_info(kcs_bmc->dev, "Initialised raw client for channel %d", kcs_bmc->channel);
394
395 return 0;
396 }
397
kcs_bmc_raw_remove_device(struct kcs_bmc_device * kcs_bmc)398 static int kcs_bmc_raw_remove_device(struct kcs_bmc_device *kcs_bmc)
399 {
400 struct kcs_bmc_raw *priv = NULL, *pos;
401
402 spin_lock_irq(&kcs_bmc_raw_instances_lock);
403 list_for_each_entry(pos, &kcs_bmc_raw_instances, entry) {
404 if (pos->client.dev == kcs_bmc) {
405 priv = pos;
406 list_del(&pos->entry);
407 break;
408 }
409 }
410 spin_unlock_irq(&kcs_bmc_raw_instances_lock);
411
412 if (!priv)
413 return -ENODEV;
414
415 misc_deregister(&priv->miscdev);
416 kcs_bmc_disable_device(kcs_bmc, &priv->client);
417 devm_kfree(priv->client.dev->dev, priv);
418
419 return 0;
420 }
421
422 static const struct kcs_bmc_driver_ops kcs_bmc_raw_driver_ops = {
423 .add_device = kcs_bmc_raw_add_device,
424 .remove_device = kcs_bmc_raw_remove_device,
425 };
426
427 static struct kcs_bmc_driver kcs_bmc_raw_driver = {
428 .ops = &kcs_bmc_raw_driver_ops,
429 };
430
kcs_bmc_raw_init(void)431 static int kcs_bmc_raw_init(void)
432 {
433 kcs_bmc_register_driver(&kcs_bmc_raw_driver);
434
435 return 0;
436 }
437 module_init(kcs_bmc_raw_init);
438
kcs_bmc_raw_exit(void)439 static void kcs_bmc_raw_exit(void)
440 {
441 kcs_bmc_unregister_driver(&kcs_bmc_raw_driver);
442 }
443 module_exit(kcs_bmc_raw_exit);
444
445 MODULE_LICENSE("GPL v2");
446 MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
447 MODULE_DESCRIPTION("Character device for raw access to a KCS device");
448