xref: /openbmc/linux/drivers/char/ipmi/kcs_bmc_cdev_raw.c (revision d66d0df936744e9254300f048814bc36fb5be068)
1*d66d0df9SAndrew Jeffery // SPDX-License-Identifier: GPL-2.0-or-later
2*d66d0df9SAndrew Jeffery /* Copyright (c) 2021 IBM Corp. */
3*d66d0df9SAndrew Jeffery 
4*d66d0df9SAndrew Jeffery #include <linux/delay.h>
5*d66d0df9SAndrew Jeffery #include <linux/device.h>
6*d66d0df9SAndrew Jeffery #include <linux/errno.h>
7*d66d0df9SAndrew Jeffery #include <linux/fs.h>
8*d66d0df9SAndrew Jeffery #include <linux/list.h>
9*d66d0df9SAndrew Jeffery #include <linux/miscdevice.h>
10*d66d0df9SAndrew Jeffery #include <linux/module.h>
11*d66d0df9SAndrew Jeffery #include <linux/poll.h>
12*d66d0df9SAndrew Jeffery 
13*d66d0df9SAndrew Jeffery #include "kcs_bmc_client.h"
14*d66d0df9SAndrew Jeffery 
15*d66d0df9SAndrew Jeffery #define DEVICE_NAME "raw-kcs"
16*d66d0df9SAndrew Jeffery 
17*d66d0df9SAndrew Jeffery struct kcs_bmc_raw {
18*d66d0df9SAndrew Jeffery 	struct list_head entry;
19*d66d0df9SAndrew Jeffery 
20*d66d0df9SAndrew Jeffery 	struct kcs_bmc_client client;
21*d66d0df9SAndrew Jeffery 
22*d66d0df9SAndrew Jeffery 	wait_queue_head_t queue;
23*d66d0df9SAndrew Jeffery 	u8 events;
24*d66d0df9SAndrew Jeffery 	bool writable;
25*d66d0df9SAndrew Jeffery 	bool readable;
26*d66d0df9SAndrew Jeffery 	u8 idr;
27*d66d0df9SAndrew Jeffery 
28*d66d0df9SAndrew Jeffery 	struct miscdevice miscdev;
29*d66d0df9SAndrew Jeffery };
30*d66d0df9SAndrew Jeffery 
client_to_kcs_bmc_raw(struct kcs_bmc_client * client)31*d66d0df9SAndrew Jeffery static inline struct kcs_bmc_raw *client_to_kcs_bmc_raw(struct kcs_bmc_client *client)
32*d66d0df9SAndrew Jeffery {
33*d66d0df9SAndrew Jeffery 	return container_of(client, struct kcs_bmc_raw, client);
34*d66d0df9SAndrew Jeffery }
35*d66d0df9SAndrew Jeffery 
36*d66d0df9SAndrew Jeffery /* Call under priv->queue.lock */
kcs_bmc_raw_update_event_mask(struct kcs_bmc_raw * priv,u8 mask,u8 state)37*d66d0df9SAndrew Jeffery static void kcs_bmc_raw_update_event_mask(struct kcs_bmc_raw *priv, u8 mask, u8 state)
38*d66d0df9SAndrew Jeffery {
39*d66d0df9SAndrew Jeffery 	kcs_bmc_update_event_mask(priv->client.dev, mask, state);
40*d66d0df9SAndrew Jeffery 	priv->events &= ~mask;
41*d66d0df9SAndrew Jeffery 	priv->events |= state & mask;
42*d66d0df9SAndrew Jeffery }
43*d66d0df9SAndrew Jeffery 
kcs_bmc_raw_event(struct kcs_bmc_client * client)44*d66d0df9SAndrew Jeffery static irqreturn_t kcs_bmc_raw_event(struct kcs_bmc_client *client)
45*d66d0df9SAndrew Jeffery {
46*d66d0df9SAndrew Jeffery 	struct kcs_bmc_raw *priv;
47*d66d0df9SAndrew Jeffery 	struct device *dev;
48*d66d0df9SAndrew Jeffery 	u8 status, handled;
49*d66d0df9SAndrew Jeffery 
50*d66d0df9SAndrew Jeffery 	priv = client_to_kcs_bmc_raw(client);
51*d66d0df9SAndrew Jeffery 	dev = priv->miscdev.this_device;
52*d66d0df9SAndrew Jeffery 
53*d66d0df9SAndrew Jeffery 	spin_lock(&priv->queue.lock);
54*d66d0df9SAndrew Jeffery 
55*d66d0df9SAndrew Jeffery 	status = kcs_bmc_read_status(client->dev);
56*d66d0df9SAndrew Jeffery 	handled = 0;
57*d66d0df9SAndrew Jeffery 
58*d66d0df9SAndrew Jeffery 	if ((priv->events & KCS_BMC_EVENT_TYPE_IBF) && (status & KCS_BMC_STR_IBF)) {
59*d66d0df9SAndrew Jeffery 		if (priv->readable)
60*d66d0df9SAndrew Jeffery 			dev_err(dev, "Unexpected IBF IRQ, dropping data");
61*d66d0df9SAndrew Jeffery 
62*d66d0df9SAndrew Jeffery 		dev_dbg(dev, "Disabling IDR events for back-pressure\n");
63*d66d0df9SAndrew Jeffery 		kcs_bmc_raw_update_event_mask(priv, KCS_BMC_EVENT_TYPE_IBF, 0);
64*d66d0df9SAndrew Jeffery 		priv->idr = kcs_bmc_read_data(client->dev);
65*d66d0df9SAndrew Jeffery 		priv->readable = true;
66*d66d0df9SAndrew Jeffery 
67*d66d0df9SAndrew Jeffery 		dev_dbg(dev, "IDR read, waking waiters\n");
68*d66d0df9SAndrew Jeffery 		wake_up_locked(&priv->queue);
69*d66d0df9SAndrew Jeffery 
70*d66d0df9SAndrew Jeffery 		handled |= KCS_BMC_EVENT_TYPE_IBF;
71*d66d0df9SAndrew Jeffery 	}
72*d66d0df9SAndrew Jeffery 
73*d66d0df9SAndrew Jeffery 	if ((priv->events & KCS_BMC_EVENT_TYPE_OBE) && !(status & KCS_BMC_STR_OBF)) {
74*d66d0df9SAndrew Jeffery 		kcs_bmc_raw_update_event_mask(priv, KCS_BMC_EVENT_TYPE_OBE, 0);
75*d66d0df9SAndrew Jeffery 		priv->writable = true;
76*d66d0df9SAndrew Jeffery 
77*d66d0df9SAndrew Jeffery 		dev_dbg(dev, "ODR writable, waking waiters\n");
78*d66d0df9SAndrew Jeffery 		wake_up_locked(&priv->queue);
79*d66d0df9SAndrew Jeffery 
80*d66d0df9SAndrew Jeffery 		handled |= KCS_BMC_EVENT_TYPE_OBE;
81*d66d0df9SAndrew Jeffery 	}
82*d66d0df9SAndrew Jeffery 
83*d66d0df9SAndrew Jeffery 	spin_unlock(&priv->queue.lock);
84*d66d0df9SAndrew Jeffery 
85*d66d0df9SAndrew Jeffery 	return handled ? IRQ_HANDLED : IRQ_NONE;
86*d66d0df9SAndrew Jeffery }
87*d66d0df9SAndrew Jeffery 
88*d66d0df9SAndrew Jeffery static const struct kcs_bmc_client_ops kcs_bmc_raw_client_ops = {
89*d66d0df9SAndrew Jeffery 	.event = kcs_bmc_raw_event,
90*d66d0df9SAndrew Jeffery };
91*d66d0df9SAndrew Jeffery 
file_to_kcs_bmc_raw(struct file * filp)92*d66d0df9SAndrew Jeffery static inline struct kcs_bmc_raw *file_to_kcs_bmc_raw(struct file *filp)
93*d66d0df9SAndrew Jeffery {
94*d66d0df9SAndrew Jeffery 	return container_of(filp->private_data, struct kcs_bmc_raw, miscdev);
95*d66d0df9SAndrew Jeffery }
96*d66d0df9SAndrew Jeffery 
kcs_bmc_raw_open(struct inode * inode,struct file * filp)97*d66d0df9SAndrew Jeffery static int kcs_bmc_raw_open(struct inode *inode, struct file *filp)
98*d66d0df9SAndrew Jeffery {
99*d66d0df9SAndrew Jeffery 	struct kcs_bmc_raw *priv = file_to_kcs_bmc_raw(filp);
100*d66d0df9SAndrew Jeffery 	int rc;
101*d66d0df9SAndrew Jeffery 
102*d66d0df9SAndrew Jeffery 	priv->events = KCS_BMC_EVENT_TYPE_IBF;
103*d66d0df9SAndrew Jeffery 	rc = kcs_bmc_enable_device(priv->client.dev, &priv->client);
104*d66d0df9SAndrew Jeffery 	if (rc)
105*d66d0df9SAndrew Jeffery 		priv->events = 0;
106*d66d0df9SAndrew Jeffery 
107*d66d0df9SAndrew Jeffery 	return rc;
108*d66d0df9SAndrew Jeffery }
109*d66d0df9SAndrew Jeffery 
kcs_bmc_raw_prepare_obe(struct kcs_bmc_raw * priv)110*d66d0df9SAndrew Jeffery static bool kcs_bmc_raw_prepare_obe(struct kcs_bmc_raw *priv)
111*d66d0df9SAndrew Jeffery {
112*d66d0df9SAndrew Jeffery 	bool writable;
113*d66d0df9SAndrew Jeffery 
114*d66d0df9SAndrew Jeffery 	/* Enable the OBE event so we can catch the host clearing OBF */
115*d66d0df9SAndrew Jeffery 	kcs_bmc_raw_update_event_mask(priv, KCS_BMC_EVENT_TYPE_OBE, KCS_BMC_EVENT_TYPE_OBE);
116*d66d0df9SAndrew Jeffery 
117*d66d0df9SAndrew Jeffery 	/* Now that we'll catch an OBE event, check if it's already occurred */
118*d66d0df9SAndrew Jeffery 	writable = !(kcs_bmc_read_status(priv->client.dev) & KCS_BMC_STR_OBF);
119*d66d0df9SAndrew Jeffery 
120*d66d0df9SAndrew Jeffery 	/* If OBF is clear we've missed the OBE event, so disable it */
121*d66d0df9SAndrew Jeffery 	if (writable)
122*d66d0df9SAndrew Jeffery 		kcs_bmc_raw_update_event_mask(priv, KCS_BMC_EVENT_TYPE_OBE, 0);
123*d66d0df9SAndrew Jeffery 
124*d66d0df9SAndrew Jeffery 	return writable;
125*d66d0df9SAndrew Jeffery }
126*d66d0df9SAndrew Jeffery 
kcs_bmc_raw_poll(struct file * filp,poll_table * wait)127*d66d0df9SAndrew Jeffery static __poll_t kcs_bmc_raw_poll(struct file *filp, poll_table *wait)
128*d66d0df9SAndrew Jeffery {
129*d66d0df9SAndrew Jeffery 	struct kcs_bmc_raw *priv;
130*d66d0df9SAndrew Jeffery 	__poll_t events = 0;
131*d66d0df9SAndrew Jeffery 
132*d66d0df9SAndrew Jeffery 	priv = file_to_kcs_bmc_raw(filp);
133*d66d0df9SAndrew Jeffery 
134*d66d0df9SAndrew Jeffery 	poll_wait(filp, &priv->queue, wait);
135*d66d0df9SAndrew Jeffery 
136*d66d0df9SAndrew Jeffery 	spin_lock_irq(&priv->queue.lock);
137*d66d0df9SAndrew Jeffery 	if (kcs_bmc_raw_prepare_obe(priv))
138*d66d0df9SAndrew Jeffery 		events |= (EPOLLOUT | EPOLLWRNORM);
139*d66d0df9SAndrew Jeffery 
140*d66d0df9SAndrew Jeffery 	if (priv->readable || (kcs_bmc_read_status(priv->client.dev) & KCS_BMC_STR_IBF))
141*d66d0df9SAndrew Jeffery 		events |= (EPOLLIN | EPOLLRDNORM);
142*d66d0df9SAndrew Jeffery 	spin_unlock_irq(&priv->queue.lock);
143*d66d0df9SAndrew Jeffery 
144*d66d0df9SAndrew Jeffery 	return events;
145*d66d0df9SAndrew Jeffery }
146*d66d0df9SAndrew Jeffery 
kcs_bmc_raw_read(struct file * filp,char __user * buf,size_t count,loff_t * ppos)147*d66d0df9SAndrew Jeffery static ssize_t kcs_bmc_raw_read(struct file *filp, char __user *buf,
148*d66d0df9SAndrew Jeffery 			     size_t count, loff_t *ppos)
149*d66d0df9SAndrew Jeffery {
150*d66d0df9SAndrew Jeffery 	struct kcs_bmc_device *kcs_bmc;
151*d66d0df9SAndrew Jeffery 	struct kcs_bmc_raw *priv;
152*d66d0df9SAndrew Jeffery 	bool read_idr, read_str;
153*d66d0df9SAndrew Jeffery 	struct device *dev;
154*d66d0df9SAndrew Jeffery 	u8 idr, str;
155*d66d0df9SAndrew Jeffery 	ssize_t rc;
156*d66d0df9SAndrew Jeffery 
157*d66d0df9SAndrew Jeffery 	priv = file_to_kcs_bmc_raw(filp);
158*d66d0df9SAndrew Jeffery 	kcs_bmc = priv->client.dev;
159*d66d0df9SAndrew Jeffery 	dev = priv->miscdev.this_device;
160*d66d0df9SAndrew Jeffery 
161*d66d0df9SAndrew Jeffery 	if (!count)
162*d66d0df9SAndrew Jeffery 		return 0;
163*d66d0df9SAndrew Jeffery 
164*d66d0df9SAndrew Jeffery 	if (count > 2 || *ppos > 1)
165*d66d0df9SAndrew Jeffery 		return -EINVAL;
166*d66d0df9SAndrew Jeffery 
167*d66d0df9SAndrew Jeffery 	if (*ppos + count > 2)
168*d66d0df9SAndrew Jeffery 		return -EINVAL;
169*d66d0df9SAndrew Jeffery 
170*d66d0df9SAndrew Jeffery 	read_idr = (*ppos == 0);
171*d66d0df9SAndrew Jeffery 	read_str = (*ppos == 1) || (count == 2);
172*d66d0df9SAndrew Jeffery 
173*d66d0df9SAndrew Jeffery 	spin_lock_irq(&priv->queue.lock);
174*d66d0df9SAndrew Jeffery 	if (read_idr) {
175*d66d0df9SAndrew Jeffery 		dev_dbg(dev, "Waiting for IBF\n");
176*d66d0df9SAndrew Jeffery 		str = kcs_bmc_read_status(kcs_bmc);
177*d66d0df9SAndrew Jeffery 		if ((filp->f_flags & O_NONBLOCK) && (str & KCS_BMC_STR_IBF)) {
178*d66d0df9SAndrew Jeffery 			rc = -EWOULDBLOCK;
179*d66d0df9SAndrew Jeffery 			goto out;
180*d66d0df9SAndrew Jeffery 		}
181*d66d0df9SAndrew Jeffery 
182*d66d0df9SAndrew Jeffery 		rc = wait_event_interruptible_locked(priv->queue,
183*d66d0df9SAndrew Jeffery 						     priv->readable || (str & KCS_BMC_STR_IBF));
184*d66d0df9SAndrew Jeffery 		if (rc < 0)
185*d66d0df9SAndrew Jeffery 			goto out;
186*d66d0df9SAndrew Jeffery 
187*d66d0df9SAndrew Jeffery 		if (signal_pending(current)) {
188*d66d0df9SAndrew Jeffery 			dev_dbg(dev, "Interrupted waiting for IBF\n");
189*d66d0df9SAndrew Jeffery 			rc = -EINTR;
190*d66d0df9SAndrew Jeffery 			goto out;
191*d66d0df9SAndrew Jeffery 		}
192*d66d0df9SAndrew Jeffery 
193*d66d0df9SAndrew Jeffery 		/*
194*d66d0df9SAndrew Jeffery 		 * Re-enable events prior to possible read of IDR (which clears
195*d66d0df9SAndrew Jeffery 		 * IBF) to ensure we receive interrupts for subsequent writes
196*d66d0df9SAndrew Jeffery 		 * to IDR. Writes to IDR by the host should not occur while IBF
197*d66d0df9SAndrew Jeffery 		 * is set.
198*d66d0df9SAndrew Jeffery 		 */
199*d66d0df9SAndrew Jeffery 		dev_dbg(dev, "Woken by IBF, enabling IRQ\n");
200*d66d0df9SAndrew Jeffery 		kcs_bmc_raw_update_event_mask(priv, KCS_BMC_EVENT_TYPE_IBF,
201*d66d0df9SAndrew Jeffery 					      KCS_BMC_EVENT_TYPE_IBF);
202*d66d0df9SAndrew Jeffery 
203*d66d0df9SAndrew Jeffery 		/* Read data out of IDR into internal storage if necessary */
204*d66d0df9SAndrew Jeffery 		if (!priv->readable) {
205*d66d0df9SAndrew Jeffery 			WARN(!(str & KCS_BMC_STR_IBF), "Unknown reason for wakeup!");
206*d66d0df9SAndrew Jeffery 
207*d66d0df9SAndrew Jeffery 			priv->idr = kcs_bmc_read_data(kcs_bmc);
208*d66d0df9SAndrew Jeffery 		}
209*d66d0df9SAndrew Jeffery 
210*d66d0df9SAndrew Jeffery 		/* Copy data from internal storage to userspace */
211*d66d0df9SAndrew Jeffery 		idr = priv->idr;
212*d66d0df9SAndrew Jeffery 
213*d66d0df9SAndrew Jeffery 		/* We're done consuming the internally stored value */
214*d66d0df9SAndrew Jeffery 		priv->readable = false;
215*d66d0df9SAndrew Jeffery 	}
216*d66d0df9SAndrew Jeffery 
217*d66d0df9SAndrew Jeffery 	if (read_str) {
218*d66d0df9SAndrew Jeffery 		str = kcs_bmc_read_status(kcs_bmc);
219*d66d0df9SAndrew Jeffery 		if (*ppos == 0 || priv->readable)
220*d66d0df9SAndrew Jeffery 			/*
221*d66d0df9SAndrew Jeffery 			 * If we got this far with `*ppos == 0` then we've read
222*d66d0df9SAndrew Jeffery 			 * data out of IDR, so set IBF when reporting back to
223*d66d0df9SAndrew Jeffery 			 * userspace so userspace knows the IDR value is valid.
224*d66d0df9SAndrew Jeffery 			 */
225*d66d0df9SAndrew Jeffery 			str |= KCS_BMC_STR_IBF;
226*d66d0df9SAndrew Jeffery 
227*d66d0df9SAndrew Jeffery 		dev_dbg(dev, "Read status 0x%x\n", str);
228*d66d0df9SAndrew Jeffery 
229*d66d0df9SAndrew Jeffery 	}
230*d66d0df9SAndrew Jeffery 
231*d66d0df9SAndrew Jeffery 	rc = count;
232*d66d0df9SAndrew Jeffery out:
233*d66d0df9SAndrew Jeffery 	spin_unlock_irq(&priv->queue.lock);
234*d66d0df9SAndrew Jeffery 
235*d66d0df9SAndrew Jeffery 	if (rc < 0)
236*d66d0df9SAndrew Jeffery 		return rc;
237*d66d0df9SAndrew Jeffery 
238*d66d0df9SAndrew Jeffery 	/* Now copy the data in to the userspace buffer */
239*d66d0df9SAndrew Jeffery 
240*d66d0df9SAndrew Jeffery 	if (read_idr)
241*d66d0df9SAndrew Jeffery 		if (copy_to_user(buf++, &idr, sizeof(idr)))
242*d66d0df9SAndrew Jeffery 			return -EFAULT;
243*d66d0df9SAndrew Jeffery 
244*d66d0df9SAndrew Jeffery 	if (read_str)
245*d66d0df9SAndrew Jeffery 		if (copy_to_user(buf, &str, sizeof(str)))
246*d66d0df9SAndrew Jeffery 			return -EFAULT;
247*d66d0df9SAndrew Jeffery 
248*d66d0df9SAndrew Jeffery 	return count;
249*d66d0df9SAndrew Jeffery }
250*d66d0df9SAndrew Jeffery 
kcs_bmc_raw_write(struct file * filp,const char __user * buf,size_t count,loff_t * ppos)251*d66d0df9SAndrew Jeffery static ssize_t kcs_bmc_raw_write(struct file *filp, const char __user *buf,
252*d66d0df9SAndrew Jeffery 			      size_t count, loff_t *ppos)
253*d66d0df9SAndrew Jeffery {
254*d66d0df9SAndrew Jeffery 	struct kcs_bmc_device *kcs_bmc;
255*d66d0df9SAndrew Jeffery 	bool write_odr, write_str;
256*d66d0df9SAndrew Jeffery 	struct kcs_bmc_raw *priv;
257*d66d0df9SAndrew Jeffery 	struct device *dev;
258*d66d0df9SAndrew Jeffery 	ssize_t result;
259*d66d0df9SAndrew Jeffery 	u8 data[2];
260*d66d0df9SAndrew Jeffery 	u8 str;
261*d66d0df9SAndrew Jeffery 
262*d66d0df9SAndrew Jeffery 	priv = file_to_kcs_bmc_raw(filp);
263*d66d0df9SAndrew Jeffery 	kcs_bmc = priv->client.dev;
264*d66d0df9SAndrew Jeffery 	dev = priv->miscdev.this_device;
265*d66d0df9SAndrew Jeffery 
266*d66d0df9SAndrew Jeffery 	if (!count)
267*d66d0df9SAndrew Jeffery 		return count;
268*d66d0df9SAndrew Jeffery 
269*d66d0df9SAndrew Jeffery 	if (count > 2)
270*d66d0df9SAndrew Jeffery 		return -EINVAL;
271*d66d0df9SAndrew Jeffery 
272*d66d0df9SAndrew Jeffery 	if (*ppos >= 2)
273*d66d0df9SAndrew Jeffery 		return -EINVAL;
274*d66d0df9SAndrew Jeffery 
275*d66d0df9SAndrew Jeffery 	if (*ppos + count > 2)
276*d66d0df9SAndrew Jeffery 		return -EINVAL;
277*d66d0df9SAndrew Jeffery 
278*d66d0df9SAndrew Jeffery 	if (copy_from_user(data, buf, count))
279*d66d0df9SAndrew Jeffery 		return -EFAULT;
280*d66d0df9SAndrew Jeffery 
281*d66d0df9SAndrew Jeffery 	write_odr = (*ppos == 0);
282*d66d0df9SAndrew Jeffery 	write_str = (*ppos == 1) || (count == 2);
283*d66d0df9SAndrew Jeffery 
284*d66d0df9SAndrew Jeffery 	spin_lock_irq(&priv->queue.lock);
285*d66d0df9SAndrew Jeffery 
286*d66d0df9SAndrew Jeffery 	/* Always write status before data, we generate the SerIRQ by writing ODR */
287*d66d0df9SAndrew Jeffery 	if (write_str) {
288*d66d0df9SAndrew Jeffery 		/* The index of STR in the userspace buffer depends on whether ODR is written */
289*d66d0df9SAndrew Jeffery 		str = data[*ppos == 0];
290*d66d0df9SAndrew Jeffery 		if (!(str & KCS_BMC_STR_OBF))
291*d66d0df9SAndrew Jeffery 			dev_warn(dev, "Clearing OBF with status write: 0x%x\n", str);
292*d66d0df9SAndrew Jeffery 		dev_dbg(dev, "Writing status 0x%x\n", str);
293*d66d0df9SAndrew Jeffery 		kcs_bmc_write_status(kcs_bmc, str);
294*d66d0df9SAndrew Jeffery 	}
295*d66d0df9SAndrew Jeffery 
296*d66d0df9SAndrew Jeffery 	if (write_odr) {
297*d66d0df9SAndrew Jeffery 		/* If we're writing ODR it's always the first byte in the buffer */
298*d66d0df9SAndrew Jeffery 		u8 odr = data[0];
299*d66d0df9SAndrew Jeffery 
300*d66d0df9SAndrew Jeffery 		str = kcs_bmc_read_status(kcs_bmc);
301*d66d0df9SAndrew Jeffery 		if (str & KCS_BMC_STR_OBF) {
302*d66d0df9SAndrew Jeffery 			if (filp->f_flags & O_NONBLOCK) {
303*d66d0df9SAndrew Jeffery 				result = -EWOULDBLOCK;
304*d66d0df9SAndrew Jeffery 				goto out;
305*d66d0df9SAndrew Jeffery 			}
306*d66d0df9SAndrew Jeffery 
307*d66d0df9SAndrew Jeffery 			priv->writable = kcs_bmc_raw_prepare_obe(priv);
308*d66d0df9SAndrew Jeffery 
309*d66d0df9SAndrew Jeffery 			/* Now either OBF is already clear, or we'll get an OBE event to wake us */
310*d66d0df9SAndrew Jeffery 			dev_dbg(dev, "Waiting for OBF to clear\n");
311*d66d0df9SAndrew Jeffery 			wait_event_interruptible_locked(priv->queue, priv->writable);
312*d66d0df9SAndrew Jeffery 
313*d66d0df9SAndrew Jeffery 			if (signal_pending(current)) {
314*d66d0df9SAndrew Jeffery 				kcs_bmc_raw_update_event_mask(priv, KCS_BMC_EVENT_TYPE_OBE, 0);
315*d66d0df9SAndrew Jeffery 				result = -EINTR;
316*d66d0df9SAndrew Jeffery 				goto out;
317*d66d0df9SAndrew Jeffery 			}
318*d66d0df9SAndrew Jeffery 
319*d66d0df9SAndrew Jeffery 			WARN_ON(kcs_bmc_read_status(kcs_bmc) & KCS_BMC_STR_OBF);
320*d66d0df9SAndrew Jeffery 		}
321*d66d0df9SAndrew Jeffery 
322*d66d0df9SAndrew Jeffery 		dev_dbg(dev, "Writing 0x%x to ODR\n", odr);
323*d66d0df9SAndrew Jeffery 		kcs_bmc_write_data(kcs_bmc, odr);
324*d66d0df9SAndrew Jeffery 	}
325*d66d0df9SAndrew Jeffery 
326*d66d0df9SAndrew Jeffery 	result = count;
327*d66d0df9SAndrew Jeffery out:
328*d66d0df9SAndrew Jeffery 	spin_unlock_irq(&priv->queue.lock);
329*d66d0df9SAndrew Jeffery 
330*d66d0df9SAndrew Jeffery 	return result;
331*d66d0df9SAndrew Jeffery }
332*d66d0df9SAndrew Jeffery 
kcs_bmc_raw_release(struct inode * inode,struct file * filp)333*d66d0df9SAndrew Jeffery static int kcs_bmc_raw_release(struct inode *inode, struct file *filp)
334*d66d0df9SAndrew Jeffery {
335*d66d0df9SAndrew Jeffery 	struct kcs_bmc_raw *priv = file_to_kcs_bmc_raw(filp);
336*d66d0df9SAndrew Jeffery 
337*d66d0df9SAndrew Jeffery 	kcs_bmc_disable_device(priv->client.dev, &priv->client);
338*d66d0df9SAndrew Jeffery 	priv->events = 0;
339*d66d0df9SAndrew Jeffery 
340*d66d0df9SAndrew Jeffery 	return 0;
341*d66d0df9SAndrew Jeffery }
342*d66d0df9SAndrew Jeffery 
343*d66d0df9SAndrew Jeffery static const struct file_operations kcs_bmc_raw_fops = {
344*d66d0df9SAndrew Jeffery 	.owner          = THIS_MODULE,
345*d66d0df9SAndrew Jeffery 	.open		= kcs_bmc_raw_open,
346*d66d0df9SAndrew Jeffery 	.llseek		= no_seek_end_llseek,
347*d66d0df9SAndrew Jeffery 	.read           = kcs_bmc_raw_read,
348*d66d0df9SAndrew Jeffery 	.write          = kcs_bmc_raw_write,
349*d66d0df9SAndrew Jeffery 	.poll		= kcs_bmc_raw_poll,
350*d66d0df9SAndrew Jeffery 	.release	= kcs_bmc_raw_release,
351*d66d0df9SAndrew Jeffery };
352*d66d0df9SAndrew Jeffery 
353*d66d0df9SAndrew Jeffery static DEFINE_SPINLOCK(kcs_bmc_raw_instances_lock);
354*d66d0df9SAndrew Jeffery static LIST_HEAD(kcs_bmc_raw_instances);
355*d66d0df9SAndrew Jeffery 
kcs_bmc_raw_add_device(struct kcs_bmc_device * kcs_bmc)356*d66d0df9SAndrew Jeffery static int kcs_bmc_raw_add_device(struct kcs_bmc_device *kcs_bmc)
357*d66d0df9SAndrew Jeffery {
358*d66d0df9SAndrew Jeffery 	struct kcs_bmc_raw *priv;
359*d66d0df9SAndrew Jeffery 	int rc;
360*d66d0df9SAndrew Jeffery 
361*d66d0df9SAndrew Jeffery 	priv = devm_kzalloc(kcs_bmc->dev, sizeof(*priv), GFP_KERNEL);
362*d66d0df9SAndrew Jeffery 	if (!priv)
363*d66d0df9SAndrew Jeffery 		return -ENOMEM;
364*d66d0df9SAndrew Jeffery 
365*d66d0df9SAndrew Jeffery 	priv->client.dev = kcs_bmc;
366*d66d0df9SAndrew Jeffery 	priv->client.ops = &kcs_bmc_raw_client_ops;
367*d66d0df9SAndrew Jeffery 
368*d66d0df9SAndrew Jeffery 	init_waitqueue_head(&priv->queue);
369*d66d0df9SAndrew Jeffery 	priv->writable = false;
370*d66d0df9SAndrew Jeffery 	priv->readable = false;
371*d66d0df9SAndrew Jeffery 
372*d66d0df9SAndrew Jeffery 	priv->miscdev.minor = MISC_DYNAMIC_MINOR;
373*d66d0df9SAndrew Jeffery 	priv->miscdev.name = devm_kasprintf(kcs_bmc->dev, GFP_KERNEL, "%s%u", DEVICE_NAME,
374*d66d0df9SAndrew Jeffery 					   kcs_bmc->channel);
375*d66d0df9SAndrew Jeffery 	if (!priv->miscdev.name)
376*d66d0df9SAndrew Jeffery 		return -EINVAL;
377*d66d0df9SAndrew Jeffery 
378*d66d0df9SAndrew Jeffery 	priv->miscdev.fops = &kcs_bmc_raw_fops;
379*d66d0df9SAndrew Jeffery 
380*d66d0df9SAndrew Jeffery 	/* Disable interrupts until userspace opens the the chardev */
381*d66d0df9SAndrew Jeffery 	kcs_bmc_raw_update_event_mask(priv, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0);
382*d66d0df9SAndrew Jeffery 
383*d66d0df9SAndrew Jeffery 	rc = misc_register(&priv->miscdev);
384*d66d0df9SAndrew Jeffery 	if (rc) {
385*d66d0df9SAndrew Jeffery 		dev_err(kcs_bmc->dev, "Unable to register device\n");
386*d66d0df9SAndrew Jeffery 		return rc;
387*d66d0df9SAndrew Jeffery 	}
388*d66d0df9SAndrew Jeffery 
389*d66d0df9SAndrew Jeffery 	spin_lock_irq(&kcs_bmc_raw_instances_lock);
390*d66d0df9SAndrew Jeffery 	list_add(&priv->entry, &kcs_bmc_raw_instances);
391*d66d0df9SAndrew Jeffery 	spin_unlock_irq(&kcs_bmc_raw_instances_lock);
392*d66d0df9SAndrew Jeffery 
393*d66d0df9SAndrew Jeffery 	dev_info(kcs_bmc->dev, "Initialised raw client for channel %d", kcs_bmc->channel);
394*d66d0df9SAndrew Jeffery 
395*d66d0df9SAndrew Jeffery 	return 0;
396*d66d0df9SAndrew Jeffery }
397*d66d0df9SAndrew Jeffery 
kcs_bmc_raw_remove_device(struct kcs_bmc_device * kcs_bmc)398*d66d0df9SAndrew Jeffery static int kcs_bmc_raw_remove_device(struct kcs_bmc_device *kcs_bmc)
399*d66d0df9SAndrew Jeffery {
400*d66d0df9SAndrew Jeffery 	struct kcs_bmc_raw *priv = NULL, *pos;
401*d66d0df9SAndrew Jeffery 
402*d66d0df9SAndrew Jeffery 	spin_lock_irq(&kcs_bmc_raw_instances_lock);
403*d66d0df9SAndrew Jeffery 	list_for_each_entry(pos, &kcs_bmc_raw_instances, entry) {
404*d66d0df9SAndrew Jeffery 		if (pos->client.dev == kcs_bmc) {
405*d66d0df9SAndrew Jeffery 			priv = pos;
406*d66d0df9SAndrew Jeffery 			list_del(&pos->entry);
407*d66d0df9SAndrew Jeffery 			break;
408*d66d0df9SAndrew Jeffery 		}
409*d66d0df9SAndrew Jeffery 	}
410*d66d0df9SAndrew Jeffery 	spin_unlock_irq(&kcs_bmc_raw_instances_lock);
411*d66d0df9SAndrew Jeffery 
412*d66d0df9SAndrew Jeffery 	if (!priv)
413*d66d0df9SAndrew Jeffery 		return -ENODEV;
414*d66d0df9SAndrew Jeffery 
415*d66d0df9SAndrew Jeffery 	misc_deregister(&priv->miscdev);
416*d66d0df9SAndrew Jeffery 	kcs_bmc_disable_device(kcs_bmc, &priv->client);
417*d66d0df9SAndrew Jeffery 	devm_kfree(priv->client.dev->dev, priv);
418*d66d0df9SAndrew Jeffery 
419*d66d0df9SAndrew Jeffery 	return 0;
420*d66d0df9SAndrew Jeffery }
421*d66d0df9SAndrew Jeffery 
422*d66d0df9SAndrew Jeffery static const struct kcs_bmc_driver_ops kcs_bmc_raw_driver_ops = {
423*d66d0df9SAndrew Jeffery 	.add_device = kcs_bmc_raw_add_device,
424*d66d0df9SAndrew Jeffery 	.remove_device = kcs_bmc_raw_remove_device,
425*d66d0df9SAndrew Jeffery };
426*d66d0df9SAndrew Jeffery 
427*d66d0df9SAndrew Jeffery static struct kcs_bmc_driver kcs_bmc_raw_driver = {
428*d66d0df9SAndrew Jeffery 	.ops = &kcs_bmc_raw_driver_ops,
429*d66d0df9SAndrew Jeffery };
430*d66d0df9SAndrew Jeffery 
kcs_bmc_raw_init(void)431*d66d0df9SAndrew Jeffery static int kcs_bmc_raw_init(void)
432*d66d0df9SAndrew Jeffery {
433*d66d0df9SAndrew Jeffery 	kcs_bmc_register_driver(&kcs_bmc_raw_driver);
434*d66d0df9SAndrew Jeffery 
435*d66d0df9SAndrew Jeffery 	return 0;
436*d66d0df9SAndrew Jeffery }
437*d66d0df9SAndrew Jeffery module_init(kcs_bmc_raw_init);
438*d66d0df9SAndrew Jeffery 
kcs_bmc_raw_exit(void)439*d66d0df9SAndrew Jeffery static void kcs_bmc_raw_exit(void)
440*d66d0df9SAndrew Jeffery {
441*d66d0df9SAndrew Jeffery 	kcs_bmc_unregister_driver(&kcs_bmc_raw_driver);
442*d66d0df9SAndrew Jeffery }
443*d66d0df9SAndrew Jeffery module_exit(kcs_bmc_raw_exit);
444*d66d0df9SAndrew Jeffery 
445*d66d0df9SAndrew Jeffery MODULE_LICENSE("GPL v2");
446*d66d0df9SAndrew Jeffery MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
447*d66d0df9SAndrew Jeffery MODULE_DESCRIPTION("Character device for raw access to a KCS device");
448