xref: /openbmc/linux/drivers/staging/greybus/gpio.c (revision a5a8cb96)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * GPIO Greybus driver.
4  *
5  * Copyright 2014 Google Inc.
6  * Copyright 2014 Linaro Ltd.
7  */
8 
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/irq.h>
13 #include <linux/irqdomain.h>
14 #include <linux/gpio/driver.h>
15 #include <linux/mutex.h>
16 #include <linux/greybus.h>
17 
18 #include "gbphy.h"
19 
20 struct gb_gpio_line {
21 	/* The following has to be an array of line_max entries */
22 	/* --> make them just a flags field */
23 	u8			active:    1,
24 				direction: 1,	/* 0 = output, 1 = input */
25 				value:     1;	/* 0 = low, 1 = high */
26 	u16			debounce_usec;
27 
28 	u8			irq_type;
29 	bool			irq_type_pending;
30 	bool			masked;
31 	bool			masked_pending;
32 };
33 
34 struct gb_gpio_controller {
35 	struct gbphy_device	*gbphy_dev;
36 	struct gb_connection	*connection;
37 	u8			line_max;	/* max line number */
38 	struct gb_gpio_line	*lines;
39 
40 	struct gpio_chip	chip;
41 	struct irq_chip		irqc;
42 	struct mutex		irq_lock;
43 };
44 #define gpio_chip_to_gb_gpio_controller(chip) \
45 	container_of(chip, struct gb_gpio_controller, chip)
46 #define irq_data_to_gpio_chip(d) (d->domain->host_data)
47 
48 static int gb_gpio_line_count_operation(struct gb_gpio_controller *ggc)
49 {
50 	struct gb_gpio_line_count_response response;
51 	int ret;
52 
53 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_LINE_COUNT,
54 				NULL, 0, &response, sizeof(response));
55 	if (!ret)
56 		ggc->line_max = response.count;
57 	return ret;
58 }
59 
60 static int gb_gpio_activate_operation(struct gb_gpio_controller *ggc, u8 which)
61 {
62 	struct gb_gpio_activate_request request;
63 	struct gbphy_device *gbphy_dev = ggc->gbphy_dev;
64 	int ret;
65 
66 	ret = gbphy_runtime_get_sync(gbphy_dev);
67 	if (ret)
68 		return ret;
69 
70 	request.which = which;
71 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_ACTIVATE,
72 				&request, sizeof(request), NULL, 0);
73 	if (ret) {
74 		gbphy_runtime_put_autosuspend(gbphy_dev);
75 		return ret;
76 	}
77 
78 	ggc->lines[which].active = true;
79 
80 	return 0;
81 }
82 
83 static void gb_gpio_deactivate_operation(struct gb_gpio_controller *ggc,
84 					 u8 which)
85 {
86 	struct gbphy_device *gbphy_dev = ggc->gbphy_dev;
87 	struct device *dev = &gbphy_dev->dev;
88 	struct gb_gpio_deactivate_request request;
89 	int ret;
90 
91 	request.which = which;
92 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DEACTIVATE,
93 				&request, sizeof(request), NULL, 0);
94 	if (ret) {
95 		dev_err(dev, "failed to deactivate gpio %u\n", which);
96 		goto out_pm_put;
97 	}
98 
99 	ggc->lines[which].active = false;
100 
101 out_pm_put:
102 	gbphy_runtime_put_autosuspend(gbphy_dev);
103 }
104 
105 static int gb_gpio_get_direction_operation(struct gb_gpio_controller *ggc,
106 					   u8 which)
107 {
108 	struct device *dev = &ggc->gbphy_dev->dev;
109 	struct gb_gpio_get_direction_request request;
110 	struct gb_gpio_get_direction_response response;
111 	int ret;
112 	u8 direction;
113 
114 	request.which = which;
115 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_GET_DIRECTION,
116 				&request, sizeof(request),
117 				&response, sizeof(response));
118 	if (ret)
119 		return ret;
120 
121 	direction = response.direction;
122 	if (direction && direction != 1) {
123 		dev_warn(dev, "gpio %u direction was %u (should be 0 or 1)\n",
124 			 which, direction);
125 	}
126 	ggc->lines[which].direction = direction ? 1 : 0;
127 	return 0;
128 }
129 
130 static int gb_gpio_direction_in_operation(struct gb_gpio_controller *ggc,
131 					  u8 which)
132 {
133 	struct gb_gpio_direction_in_request request;
134 	int ret;
135 
136 	request.which = which;
137 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DIRECTION_IN,
138 				&request, sizeof(request), NULL, 0);
139 	if (!ret)
140 		ggc->lines[which].direction = 1;
141 	return ret;
142 }
143 
144 static int gb_gpio_direction_out_operation(struct gb_gpio_controller *ggc,
145 					   u8 which, bool value_high)
146 {
147 	struct gb_gpio_direction_out_request request;
148 	int ret;
149 
150 	request.which = which;
151 	request.value = value_high ? 1 : 0;
152 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DIRECTION_OUT,
153 				&request, sizeof(request), NULL, 0);
154 	if (!ret)
155 		ggc->lines[which].direction = 0;
156 	return ret;
157 }
158 
159 static int gb_gpio_get_value_operation(struct gb_gpio_controller *ggc,
160 				       u8 which)
161 {
162 	struct device *dev = &ggc->gbphy_dev->dev;
163 	struct gb_gpio_get_value_request request;
164 	struct gb_gpio_get_value_response response;
165 	int ret;
166 	u8 value;
167 
168 	request.which = which;
169 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_GET_VALUE,
170 				&request, sizeof(request),
171 				&response, sizeof(response));
172 	if (ret) {
173 		dev_err(dev, "failed to get value of gpio %u\n", which);
174 		return ret;
175 	}
176 
177 	value = response.value;
178 	if (value && value != 1) {
179 		dev_warn(dev, "gpio %u value was %u (should be 0 or 1)\n",
180 			 which, value);
181 	}
182 	ggc->lines[which].value = value ? 1 : 0;
183 	return 0;
184 }
185 
186 static void gb_gpio_set_value_operation(struct gb_gpio_controller *ggc,
187 					u8 which, bool value_high)
188 {
189 	struct device *dev = &ggc->gbphy_dev->dev;
190 	struct gb_gpio_set_value_request request;
191 	int ret;
192 
193 	if (ggc->lines[which].direction == 1) {
194 		dev_warn(dev, "refusing to set value of input gpio %u\n",
195 			 which);
196 		return;
197 	}
198 
199 	request.which = which;
200 	request.value = value_high ? 1 : 0;
201 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_SET_VALUE,
202 				&request, sizeof(request), NULL, 0);
203 	if (ret) {
204 		dev_err(dev, "failed to set value of gpio %u\n", which);
205 		return;
206 	}
207 
208 	ggc->lines[which].value = request.value;
209 }
210 
211 static int gb_gpio_set_debounce_operation(struct gb_gpio_controller *ggc,
212 					  u8 which, u16 debounce_usec)
213 {
214 	struct gb_gpio_set_debounce_request request;
215 	int ret;
216 
217 	request.which = which;
218 	request.usec = cpu_to_le16(debounce_usec);
219 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_SET_DEBOUNCE,
220 				&request, sizeof(request), NULL, 0);
221 	if (!ret)
222 		ggc->lines[which].debounce_usec = debounce_usec;
223 	return ret;
224 }
225 
226 static void _gb_gpio_irq_mask(struct gb_gpio_controller *ggc, u8 hwirq)
227 {
228 	struct device *dev = &ggc->gbphy_dev->dev;
229 	struct gb_gpio_irq_mask_request request;
230 	int ret;
231 
232 	request.which = hwirq;
233 	ret = gb_operation_sync(ggc->connection,
234 				GB_GPIO_TYPE_IRQ_MASK,
235 				&request, sizeof(request), NULL, 0);
236 	if (ret)
237 		dev_err(dev, "failed to mask irq: %d\n", ret);
238 }
239 
240 static void _gb_gpio_irq_unmask(struct gb_gpio_controller *ggc, u8 hwirq)
241 {
242 	struct device *dev = &ggc->gbphy_dev->dev;
243 	struct gb_gpio_irq_unmask_request request;
244 	int ret;
245 
246 	request.which = hwirq;
247 	ret = gb_operation_sync(ggc->connection,
248 				GB_GPIO_TYPE_IRQ_UNMASK,
249 				&request, sizeof(request), NULL, 0);
250 	if (ret)
251 		dev_err(dev, "failed to unmask irq: %d\n", ret);
252 }
253 
254 static void _gb_gpio_irq_set_type(struct gb_gpio_controller *ggc,
255 				  u8 hwirq, u8 type)
256 {
257 	struct device *dev = &ggc->gbphy_dev->dev;
258 	struct gb_gpio_irq_type_request request;
259 	int ret;
260 
261 	request.which = hwirq;
262 	request.type = type;
263 
264 	ret = gb_operation_sync(ggc->connection,
265 				GB_GPIO_TYPE_IRQ_TYPE,
266 				&request, sizeof(request), NULL, 0);
267 	if (ret)
268 		dev_err(dev, "failed to set irq type: %d\n", ret);
269 }
270 
271 static void gb_gpio_irq_mask(struct irq_data *d)
272 {
273 	struct gpio_chip *chip = irq_data_to_gpio_chip(d);
274 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
275 	struct gb_gpio_line *line = &ggc->lines[d->hwirq];
276 
277 	line->masked = true;
278 	line->masked_pending = true;
279 }
280 
281 static void gb_gpio_irq_unmask(struct irq_data *d)
282 {
283 	struct gpio_chip *chip = irq_data_to_gpio_chip(d);
284 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
285 	struct gb_gpio_line *line = &ggc->lines[d->hwirq];
286 
287 	line->masked = false;
288 	line->masked_pending = true;
289 }
290 
291 static int gb_gpio_irq_set_type(struct irq_data *d, unsigned int type)
292 {
293 	struct gpio_chip *chip = irq_data_to_gpio_chip(d);
294 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
295 	struct gb_gpio_line *line = &ggc->lines[d->hwirq];
296 	struct device *dev = &ggc->gbphy_dev->dev;
297 	u8 irq_type;
298 
299 	switch (type) {
300 	case IRQ_TYPE_NONE:
301 		irq_type = GB_GPIO_IRQ_TYPE_NONE;
302 		break;
303 	case IRQ_TYPE_EDGE_RISING:
304 		irq_type = GB_GPIO_IRQ_TYPE_EDGE_RISING;
305 		break;
306 	case IRQ_TYPE_EDGE_FALLING:
307 		irq_type = GB_GPIO_IRQ_TYPE_EDGE_FALLING;
308 		break;
309 	case IRQ_TYPE_EDGE_BOTH:
310 		irq_type = GB_GPIO_IRQ_TYPE_EDGE_BOTH;
311 		break;
312 	case IRQ_TYPE_LEVEL_LOW:
313 		irq_type = GB_GPIO_IRQ_TYPE_LEVEL_LOW;
314 		break;
315 	case IRQ_TYPE_LEVEL_HIGH:
316 		irq_type = GB_GPIO_IRQ_TYPE_LEVEL_HIGH;
317 		break;
318 	default:
319 		dev_err(dev, "unsupported irq type: %u\n", type);
320 		return -EINVAL;
321 	}
322 
323 	line->irq_type = irq_type;
324 	line->irq_type_pending = true;
325 
326 	return 0;
327 }
328 
329 static void gb_gpio_irq_bus_lock(struct irq_data *d)
330 {
331 	struct gpio_chip *chip = irq_data_to_gpio_chip(d);
332 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
333 
334 	mutex_lock(&ggc->irq_lock);
335 }
336 
337 static void gb_gpio_irq_bus_sync_unlock(struct irq_data *d)
338 {
339 	struct gpio_chip *chip = irq_data_to_gpio_chip(d);
340 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
341 	struct gb_gpio_line *line = &ggc->lines[d->hwirq];
342 
343 	if (line->irq_type_pending) {
344 		_gb_gpio_irq_set_type(ggc, d->hwirq, line->irq_type);
345 		line->irq_type_pending = false;
346 	}
347 
348 	if (line->masked_pending) {
349 		if (line->masked)
350 			_gb_gpio_irq_mask(ggc, d->hwirq);
351 		else
352 			_gb_gpio_irq_unmask(ggc, d->hwirq);
353 		line->masked_pending = false;
354 	}
355 
356 	mutex_unlock(&ggc->irq_lock);
357 }
358 
359 static int gb_gpio_request_handler(struct gb_operation *op)
360 {
361 	struct gb_connection *connection = op->connection;
362 	struct gb_gpio_controller *ggc = gb_connection_get_data(connection);
363 	struct device *dev = &ggc->gbphy_dev->dev;
364 	struct gb_message *request;
365 	struct gb_gpio_irq_event_request *event;
366 	u8 type = op->type;
367 	int irq;
368 	struct irq_desc *desc;
369 
370 	if (type != GB_GPIO_TYPE_IRQ_EVENT) {
371 		dev_err(dev, "unsupported unsolicited request: %u\n", type);
372 		return -EINVAL;
373 	}
374 
375 	request = op->request;
376 
377 	if (request->payload_size < sizeof(*event)) {
378 		dev_err(dev, "short event received (%zu < %zu)\n",
379 			request->payload_size, sizeof(*event));
380 		return -EINVAL;
381 	}
382 
383 	event = request->payload;
384 	if (event->which > ggc->line_max) {
385 		dev_err(dev, "invalid hw irq: %d\n", event->which);
386 		return -EINVAL;
387 	}
388 
389 	irq = irq_find_mapping(ggc->chip.irq.domain, event->which);
390 	if (!irq) {
391 		dev_err(dev, "failed to find IRQ\n");
392 		return -EINVAL;
393 	}
394 	desc = irq_to_desc(irq);
395 	if (!desc) {
396 		dev_err(dev, "failed to look up irq\n");
397 		return -EINVAL;
398 	}
399 
400 	local_irq_disable();
401 	generic_handle_irq_desc(desc);
402 	local_irq_enable();
403 
404 	return 0;
405 }
406 
407 static int gb_gpio_request(struct gpio_chip *chip, unsigned int offset)
408 {
409 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
410 
411 	return gb_gpio_activate_operation(ggc, (u8)offset);
412 }
413 
414 static void gb_gpio_free(struct gpio_chip *chip, unsigned int offset)
415 {
416 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
417 
418 	gb_gpio_deactivate_operation(ggc, (u8)offset);
419 }
420 
421 static int gb_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
422 {
423 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
424 	u8 which;
425 	int ret;
426 
427 	which = (u8)offset;
428 	ret = gb_gpio_get_direction_operation(ggc, which);
429 	if (ret)
430 		return ret;
431 
432 	return ggc->lines[which].direction ? 1 : 0;
433 }
434 
435 static int gb_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
436 {
437 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
438 
439 	return gb_gpio_direction_in_operation(ggc, (u8)offset);
440 }
441 
442 static int gb_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
443 				    int value)
444 {
445 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
446 
447 	return gb_gpio_direction_out_operation(ggc, (u8)offset, !!value);
448 }
449 
450 static int gb_gpio_get(struct gpio_chip *chip, unsigned int offset)
451 {
452 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
453 	u8 which;
454 	int ret;
455 
456 	which = (u8)offset;
457 	ret = gb_gpio_get_value_operation(ggc, which);
458 	if (ret)
459 		return ret;
460 
461 	return ggc->lines[which].value;
462 }
463 
464 static void gb_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
465 {
466 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
467 
468 	gb_gpio_set_value_operation(ggc, (u8)offset, !!value);
469 }
470 
471 static int gb_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
472 			      unsigned long config)
473 {
474 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
475 	u32 debounce;
476 
477 	if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
478 		return -ENOTSUPP;
479 
480 	debounce = pinconf_to_config_argument(config);
481 	if (debounce > U16_MAX)
482 		return -EINVAL;
483 
484 	return gb_gpio_set_debounce_operation(ggc, (u8)offset, (u16)debounce);
485 }
486 
487 static int gb_gpio_controller_setup(struct gb_gpio_controller *ggc)
488 {
489 	int ret;
490 
491 	/* Now find out how many lines there are */
492 	ret = gb_gpio_line_count_operation(ggc);
493 	if (ret)
494 		return ret;
495 
496 	ggc->lines = kcalloc(ggc->line_max + 1, sizeof(*ggc->lines),
497 			     GFP_KERNEL);
498 	if (!ggc->lines)
499 		return -ENOMEM;
500 
501 	return ret;
502 }
503 
504 static int gb_gpio_probe(struct gbphy_device *gbphy_dev,
505 			 const struct gbphy_device_id *id)
506 {
507 	struct gb_connection *connection;
508 	struct gb_gpio_controller *ggc;
509 	struct gpio_chip *gpio;
510 	struct irq_chip *irqc;
511 	int ret;
512 
513 	ggc = kzalloc(sizeof(*ggc), GFP_KERNEL);
514 	if (!ggc)
515 		return -ENOMEM;
516 
517 	connection =
518 		gb_connection_create(gbphy_dev->bundle,
519 				     le16_to_cpu(gbphy_dev->cport_desc->id),
520 				     gb_gpio_request_handler);
521 	if (IS_ERR(connection)) {
522 		ret = PTR_ERR(connection);
523 		goto exit_ggc_free;
524 	}
525 
526 	ggc->connection = connection;
527 	gb_connection_set_data(connection, ggc);
528 	ggc->gbphy_dev = gbphy_dev;
529 	gb_gbphy_set_data(gbphy_dev, ggc);
530 
531 	ret = gb_connection_enable_tx(connection);
532 	if (ret)
533 		goto exit_connection_destroy;
534 
535 	ret = gb_gpio_controller_setup(ggc);
536 	if (ret)
537 		goto exit_connection_disable;
538 
539 	irqc = &ggc->irqc;
540 	irqc->irq_mask = gb_gpio_irq_mask;
541 	irqc->irq_unmask = gb_gpio_irq_unmask;
542 	irqc->irq_set_type = gb_gpio_irq_set_type;
543 	irqc->irq_bus_lock = gb_gpio_irq_bus_lock;
544 	irqc->irq_bus_sync_unlock = gb_gpio_irq_bus_sync_unlock;
545 	irqc->name = "greybus_gpio";
546 
547 	mutex_init(&ggc->irq_lock);
548 
549 	gpio = &ggc->chip;
550 
551 	gpio->label = "greybus_gpio";
552 	gpio->parent = &gbphy_dev->dev;
553 	gpio->owner = THIS_MODULE;
554 
555 	gpio->request = gb_gpio_request;
556 	gpio->free = gb_gpio_free;
557 	gpio->get_direction = gb_gpio_get_direction;
558 	gpio->direction_input = gb_gpio_direction_input;
559 	gpio->direction_output = gb_gpio_direction_output;
560 	gpio->get = gb_gpio_get;
561 	gpio->set = gb_gpio_set;
562 	gpio->set_config = gb_gpio_set_config;
563 	gpio->base = -1;		/* Allocate base dynamically */
564 	gpio->ngpio = ggc->line_max + 1;
565 	gpio->can_sleep = true;
566 
567 	ret = gb_connection_enable(connection);
568 	if (ret)
569 		goto exit_line_free;
570 
571 	ret = gpiochip_add(gpio);
572 	if (ret) {
573 		dev_err(&gbphy_dev->dev, "failed to add gpio chip: %d\n", ret);
574 		goto exit_line_free;
575 	}
576 
577 	ret = gpiochip_irqchip_add(gpio, irqc, 0, handle_level_irq,
578 				   IRQ_TYPE_NONE);
579 	if (ret) {
580 		dev_err(&gbphy_dev->dev, "failed to add irq chip: %d\n", ret);
581 		goto exit_gpiochip_remove;
582 	}
583 
584 	gbphy_runtime_put_autosuspend(gbphy_dev);
585 	return 0;
586 
587 exit_gpiochip_remove:
588 	gpiochip_remove(gpio);
589 exit_line_free:
590 	kfree(ggc->lines);
591 exit_connection_disable:
592 	gb_connection_disable(connection);
593 exit_connection_destroy:
594 	gb_connection_destroy(connection);
595 exit_ggc_free:
596 	kfree(ggc);
597 	return ret;
598 }
599 
600 static void gb_gpio_remove(struct gbphy_device *gbphy_dev)
601 {
602 	struct gb_gpio_controller *ggc = gb_gbphy_get_data(gbphy_dev);
603 	struct gb_connection *connection = ggc->connection;
604 	int ret;
605 
606 	ret = gbphy_runtime_get_sync(gbphy_dev);
607 	if (ret)
608 		gbphy_runtime_get_noresume(gbphy_dev);
609 
610 	gb_connection_disable_rx(connection);
611 	gpiochip_remove(&ggc->chip);
612 	gb_connection_disable(connection);
613 	gb_connection_destroy(connection);
614 	kfree(ggc->lines);
615 	kfree(ggc);
616 }
617 
618 static const struct gbphy_device_id gb_gpio_id_table[] = {
619 	{ GBPHY_PROTOCOL(GREYBUS_PROTOCOL_GPIO) },
620 	{ },
621 };
622 MODULE_DEVICE_TABLE(gbphy, gb_gpio_id_table);
623 
624 static struct gbphy_driver gpio_driver = {
625 	.name		= "gpio",
626 	.probe		= gb_gpio_probe,
627 	.remove		= gb_gpio_remove,
628 	.id_table	= gb_gpio_id_table,
629 };
630 
631 module_gbphy_driver(gpio_driver);
632 MODULE_LICENSE("GPL v2");
633