xref: /openbmc/linux/drivers/staging/most/dim2/dim2.c (revision 14474950)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * dim2.c - MediaLB DIM2 Hardware Dependent Module
4  *
5  * Copyright (C) 2015-2016, Microchip Technology Germany II GmbH & Co. KG
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 
10 #include <linux/module.h>
11 #include <linux/of_platform.h>
12 #include <linux/printk.h>
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/platform_device.h>
16 #include <linux/interrupt.h>
17 #include <linux/slab.h>
18 #include <linux/io.h>
19 #include <linux/clk.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/sched.h>
22 #include <linux/kthread.h>
23 #include <linux/most.h>
24 #include "hal.h"
25 #include "errors.h"
26 #include "sysfs.h"
27 
28 #define DMA_CHANNELS (32 - 1)  /* channel 0 is a system channel */
29 
30 #define MAX_BUFFERS_PACKET      32
31 #define MAX_BUFFERS_STREAMING   32
32 #define MAX_BUF_SIZE_PACKET     2048
33 #define MAX_BUF_SIZE_STREAMING  (8 * 1024)
34 
35 /*
36  * The parameter representing the number of frames per sub-buffer for
37  * synchronous channels.  Valid values: [0 .. 6].
38  *
39  * The values 0, 1, 2, 3, 4, 5, 6 represent corresponding number of frames per
40  * sub-buffer 1, 2, 4, 8, 16, 32, 64.
41  */
42 static u8 fcnt = 4;  /* (1 << fcnt) frames per subbuffer */
43 module_param(fcnt, byte, 0000);
44 MODULE_PARM_DESC(fcnt, "Num of frames per sub-buffer for sync channels as a power of 2");
45 
46 static DEFINE_SPINLOCK(dim_lock);
47 
48 static void dim2_tasklet_fn(unsigned long data);
49 static DECLARE_TASKLET(dim2_tasklet, dim2_tasklet_fn, 0);
50 
51 /**
52  * struct hdm_channel - private structure to keep channel specific data
53  * @is_initialized: identifier to know whether the channel is initialized
54  * @ch: HAL specific channel data
55  * @pending_list: list to keep MBO's before starting transfer
56  * @started_list: list to keep MBO's after starting transfer
57  * @direction: channel direction (TX or RX)
58  * @data_type: channel data type
59  */
60 struct hdm_channel {
61 	char name[sizeof "caNNN"];
62 	bool is_initialized;
63 	struct dim_channel ch;
64 	u16 *reset_dbr_size;
65 	struct list_head pending_list;	/* before dim_enqueue_buffer() */
66 	struct list_head started_list;	/* after dim_enqueue_buffer() */
67 	enum most_channel_direction direction;
68 	enum most_channel_data_type data_type;
69 };
70 
71 /**
72  * struct dim2_hdm - private structure to keep interface specific data
73  * @hch: an array of channel specific data
74  * @most_iface: most interface structure
75  * @capabilities: an array of channel capability data
76  * @io_base: I/O register base address
77  * @netinfo_task: thread to deliver network status
78  * @netinfo_waitq: waitq for the thread to sleep
79  * @deliver_netinfo: to identify whether network status received
80  * @mac_addrs: INIC mac address
81  * @link_state: network link state
82  * @atx_idx: index of async tx channel
83  */
84 struct dim2_hdm {
85 	struct device dev;
86 	struct hdm_channel hch[DMA_CHANNELS];
87 	struct most_channel_capability capabilities[DMA_CHANNELS];
88 	struct most_interface most_iface;
89 	char name[16 + sizeof "dim2-"];
90 	void __iomem *io_base;
91 	u8 clk_speed;
92 	struct clk *clk;
93 	struct clk *clk_pll;
94 	struct task_struct *netinfo_task;
95 	wait_queue_head_t netinfo_waitq;
96 	int deliver_netinfo;
97 	unsigned char mac_addrs[6];
98 	unsigned char link_state;
99 	int atx_idx;
100 	struct medialb_bus bus;
101 	void (*on_netinfo)(struct most_interface *most_iface,
102 			   unsigned char link_state, unsigned char *addrs);
103 	void (*disable_platform)(struct platform_device *);
104 };
105 
106 struct dim2_platform_data {
107 	int (*enable)(struct platform_device *);
108 	void (*disable)(struct platform_device *);
109 };
110 
111 #define iface_to_hdm(iface) container_of(iface, struct dim2_hdm, most_iface)
112 
113 /* Macro to identify a network status message */
114 #define PACKET_IS_NET_INFO(p)  \
115 	(((p)[1] == 0x18) && ((p)[2] == 0x05) && ((p)[3] == 0x0C) && \
116 	 ((p)[13] == 0x3C) && ((p)[14] == 0x00) && ((p)[15] == 0x0A))
117 
118 bool dim2_sysfs_get_state_cb(void)
119 {
120 	bool state;
121 	unsigned long flags;
122 
123 	spin_lock_irqsave(&dim_lock, flags);
124 	state = dim_get_lock_state();
125 	spin_unlock_irqrestore(&dim_lock, flags);
126 
127 	return state;
128 }
129 
130 /**
131  * dimcb_on_error - callback from HAL to report miscommunication between
132  * HDM and HAL
133  * @error_id: Error ID
134  * @error_message: Error message. Some text in a free format
135  */
136 void dimcb_on_error(u8 error_id, const char *error_message)
137 {
138 	pr_err("%s: error_id - %d, error_message - %s\n", __func__, error_id,
139 	       error_message);
140 }
141 
142 /**
143  * try_start_dim_transfer - try to transfer a buffer on a channel
144  * @hdm_ch: channel specific data
145  *
146  * Transfer a buffer from pending_list if the channel is ready
147  */
148 static int try_start_dim_transfer(struct hdm_channel *hdm_ch)
149 {
150 	u16 buf_size;
151 	struct list_head *head = &hdm_ch->pending_list;
152 	struct mbo *mbo;
153 	unsigned long flags;
154 	struct dim_ch_state_t st;
155 
156 	BUG_ON(!hdm_ch);
157 	BUG_ON(!hdm_ch->is_initialized);
158 
159 	spin_lock_irqsave(&dim_lock, flags);
160 	if (list_empty(head)) {
161 		spin_unlock_irqrestore(&dim_lock, flags);
162 		return -EAGAIN;
163 	}
164 
165 	if (!dim_get_channel_state(&hdm_ch->ch, &st)->ready) {
166 		spin_unlock_irqrestore(&dim_lock, flags);
167 		return -EAGAIN;
168 	}
169 
170 	mbo = list_first_entry(head, struct mbo, list);
171 	buf_size = mbo->buffer_length;
172 
173 	if (dim_dbr_space(&hdm_ch->ch) < buf_size) {
174 		spin_unlock_irqrestore(&dim_lock, flags);
175 		return -EAGAIN;
176 	}
177 
178 	BUG_ON(mbo->bus_address == 0);
179 	if (!dim_enqueue_buffer(&hdm_ch->ch, mbo->bus_address, buf_size)) {
180 		list_del(head->next);
181 		spin_unlock_irqrestore(&dim_lock, flags);
182 		mbo->processed_length = 0;
183 		mbo->status = MBO_E_INVAL;
184 		mbo->complete(mbo);
185 		return -EFAULT;
186 	}
187 
188 	list_move_tail(head->next, &hdm_ch->started_list);
189 	spin_unlock_irqrestore(&dim_lock, flags);
190 
191 	return 0;
192 }
193 
194 /**
195  * deliver_netinfo_thread - thread to deliver network status to mostcore
196  * @data: private data
197  *
198  * Wait for network status and deliver it to mostcore once it is received
199  */
200 static int deliver_netinfo_thread(void *data)
201 {
202 	struct dim2_hdm *dev = data;
203 
204 	while (!kthread_should_stop()) {
205 		wait_event_interruptible(dev->netinfo_waitq,
206 					 dev->deliver_netinfo ||
207 					 kthread_should_stop());
208 
209 		if (dev->deliver_netinfo) {
210 			dev->deliver_netinfo--;
211 			if (dev->on_netinfo) {
212 				dev->on_netinfo(&dev->most_iface,
213 						dev->link_state,
214 						dev->mac_addrs);
215 			}
216 		}
217 	}
218 
219 	return 0;
220 }
221 
222 /**
223  * retrieve_netinfo - retrieve network status from received buffer
224  * @dev: private data
225  * @mbo: received MBO
226  *
227  * Parse the message in buffer and get node address, link state, MAC address.
228  * Wake up a thread to deliver this status to mostcore
229  */
230 static void retrieve_netinfo(struct dim2_hdm *dev, struct mbo *mbo)
231 {
232 	u8 *data = mbo->virt_address;
233 
234 	pr_info("Node Address: 0x%03x\n", (u16)data[16] << 8 | data[17]);
235 	dev->link_state = data[18];
236 	pr_info("NIState: %d\n", dev->link_state);
237 	memcpy(dev->mac_addrs, data + 19, 6);
238 	dev->deliver_netinfo++;
239 	wake_up_interruptible(&dev->netinfo_waitq);
240 }
241 
242 /**
243  * service_done_flag - handle completed buffers
244  * @dev: private data
245  * @ch_idx: channel index
246  *
247  * Return back the completed buffers to mostcore, using completion callback
248  */
249 static void service_done_flag(struct dim2_hdm *dev, int ch_idx)
250 {
251 	struct hdm_channel *hdm_ch = dev->hch + ch_idx;
252 	struct dim_ch_state_t st;
253 	struct list_head *head;
254 	struct mbo *mbo;
255 	int done_buffers;
256 	unsigned long flags;
257 	u8 *data;
258 
259 	BUG_ON(!hdm_ch);
260 	BUG_ON(!hdm_ch->is_initialized);
261 
262 	spin_lock_irqsave(&dim_lock, flags);
263 
264 	done_buffers = dim_get_channel_state(&hdm_ch->ch, &st)->done_buffers;
265 	if (!done_buffers) {
266 		spin_unlock_irqrestore(&dim_lock, flags);
267 		return;
268 	}
269 
270 	if (!dim_detach_buffers(&hdm_ch->ch, done_buffers)) {
271 		spin_unlock_irqrestore(&dim_lock, flags);
272 		return;
273 	}
274 	spin_unlock_irqrestore(&dim_lock, flags);
275 
276 	head = &hdm_ch->started_list;
277 
278 	while (done_buffers) {
279 		spin_lock_irqsave(&dim_lock, flags);
280 		if (list_empty(head)) {
281 			spin_unlock_irqrestore(&dim_lock, flags);
282 			pr_crit("hard error: started_mbo list is empty whereas DIM2 has sent buffers\n");
283 			break;
284 		}
285 
286 		mbo = list_first_entry(head, struct mbo, list);
287 		list_del(head->next);
288 		spin_unlock_irqrestore(&dim_lock, flags);
289 
290 		data = mbo->virt_address;
291 
292 		if (hdm_ch->data_type == MOST_CH_ASYNC &&
293 		    hdm_ch->direction == MOST_CH_RX &&
294 		    PACKET_IS_NET_INFO(data)) {
295 			retrieve_netinfo(dev, mbo);
296 
297 			spin_lock_irqsave(&dim_lock, flags);
298 			list_add_tail(&mbo->list, &hdm_ch->pending_list);
299 			spin_unlock_irqrestore(&dim_lock, flags);
300 		} else {
301 			if (hdm_ch->data_type == MOST_CH_CONTROL ||
302 			    hdm_ch->data_type == MOST_CH_ASYNC) {
303 				u32 const data_size =
304 					(u32)data[0] * 256 + data[1] + 2;
305 
306 				mbo->processed_length =
307 					min_t(u32, data_size,
308 					      mbo->buffer_length);
309 			} else {
310 				mbo->processed_length = mbo->buffer_length;
311 			}
312 			mbo->status = MBO_SUCCESS;
313 			mbo->complete(mbo);
314 		}
315 
316 		done_buffers--;
317 	}
318 }
319 
320 static struct dim_channel **get_active_channels(struct dim2_hdm *dev,
321 						struct dim_channel **buffer)
322 {
323 	int idx = 0;
324 	int ch_idx;
325 
326 	for (ch_idx = 0; ch_idx < DMA_CHANNELS; ch_idx++) {
327 		if (dev->hch[ch_idx].is_initialized)
328 			buffer[idx++] = &dev->hch[ch_idx].ch;
329 	}
330 	buffer[idx++] = NULL;
331 
332 	return buffer;
333 }
334 
335 static irqreturn_t dim2_mlb_isr(int irq, void *_dev)
336 {
337 	struct dim2_hdm *dev = _dev;
338 	unsigned long flags;
339 
340 	spin_lock_irqsave(&dim_lock, flags);
341 	dim_service_mlb_int_irq();
342 	spin_unlock_irqrestore(&dim_lock, flags);
343 
344 	if (dev->atx_idx >= 0 && dev->hch[dev->atx_idx].is_initialized)
345 		while (!try_start_dim_transfer(dev->hch + dev->atx_idx))
346 			continue;
347 
348 	return IRQ_HANDLED;
349 }
350 
351 /**
352  * dim2_tasklet_fn - tasklet function
353  * @data: private data
354  *
355  * Service each initialized channel, if needed
356  */
357 static void dim2_tasklet_fn(unsigned long data)
358 {
359 	struct dim2_hdm *dev = (struct dim2_hdm *)data;
360 	unsigned long flags;
361 	int ch_idx;
362 
363 	for (ch_idx = 0; ch_idx < DMA_CHANNELS; ch_idx++) {
364 		if (!dev->hch[ch_idx].is_initialized)
365 			continue;
366 
367 		spin_lock_irqsave(&dim_lock, flags);
368 		dim_service_channel(&dev->hch[ch_idx].ch);
369 		spin_unlock_irqrestore(&dim_lock, flags);
370 
371 		service_done_flag(dev, ch_idx);
372 		while (!try_start_dim_transfer(dev->hch + ch_idx))
373 			continue;
374 	}
375 }
376 
377 /**
378  * dim2_ahb_isr - interrupt service routine
379  * @irq: irq number
380  * @_dev: private data
381  *
382  * Acknowledge the interrupt and schedule a tasklet to service channels.
383  * Return IRQ_HANDLED.
384  */
385 static irqreturn_t dim2_ahb_isr(int irq, void *_dev)
386 {
387 	struct dim2_hdm *dev = _dev;
388 	struct dim_channel *buffer[DMA_CHANNELS + 1];
389 	unsigned long flags;
390 
391 	spin_lock_irqsave(&dim_lock, flags);
392 	dim_service_ahb_int_irq(get_active_channels(dev, buffer));
393 	spin_unlock_irqrestore(&dim_lock, flags);
394 
395 	dim2_tasklet.data = (unsigned long)dev;
396 	tasklet_schedule(&dim2_tasklet);
397 	return IRQ_HANDLED;
398 }
399 
400 /**
401  * complete_all_mbos - complete MBO's in a list
402  * @head: list head
403  *
404  * Delete all the entries in list and return back MBO's to mostcore using
405  * completion call back.
406  */
407 static void complete_all_mbos(struct list_head *head)
408 {
409 	unsigned long flags;
410 	struct mbo *mbo;
411 
412 	for (;;) {
413 		spin_lock_irqsave(&dim_lock, flags);
414 		if (list_empty(head)) {
415 			spin_unlock_irqrestore(&dim_lock, flags);
416 			break;
417 		}
418 
419 		mbo = list_first_entry(head, struct mbo, list);
420 		list_del(head->next);
421 		spin_unlock_irqrestore(&dim_lock, flags);
422 
423 		mbo->processed_length = 0;
424 		mbo->status = MBO_E_CLOSE;
425 		mbo->complete(mbo);
426 	}
427 }
428 
429 /**
430  * configure_channel - initialize a channel
431  * @iface: interface the channel belongs to
432  * @channel: channel to be configured
433  * @channel_config: structure that holds the configuration information
434  *
435  * Receives configuration information from mostcore and initialize
436  * the corresponding channel. Return 0 on success, negative on failure.
437  */
438 static int configure_channel(struct most_interface *most_iface, int ch_idx,
439 			     struct most_channel_config *ccfg)
440 {
441 	struct dim2_hdm *dev = iface_to_hdm(most_iface);
442 	bool const is_tx = ccfg->direction == MOST_CH_TX;
443 	u16 const sub_size = ccfg->subbuffer_size;
444 	u16 const buf_size = ccfg->buffer_size;
445 	u16 new_size;
446 	unsigned long flags;
447 	u8 hal_ret;
448 	int const ch_addr = ch_idx * 2 + 2;
449 	struct hdm_channel *const hdm_ch = dev->hch + ch_idx;
450 
451 	BUG_ON(ch_idx < 0 || ch_idx >= DMA_CHANNELS);
452 
453 	if (hdm_ch->is_initialized)
454 		return -EPERM;
455 
456 	/* do not reset if the property was set by user, see poison_channel */
457 	hdm_ch->reset_dbr_size = ccfg->dbr_size ? NULL : &ccfg->dbr_size;
458 
459 	/* zero value is default dbr_size, see dim2 hal */
460 	hdm_ch->ch.dbr_size = ccfg->dbr_size;
461 
462 	switch (ccfg->data_type) {
463 	case MOST_CH_CONTROL:
464 		new_size = dim_norm_ctrl_async_buffer_size(buf_size);
465 		if (new_size == 0) {
466 			pr_err("%s: too small buffer size\n", hdm_ch->name);
467 			return -EINVAL;
468 		}
469 		ccfg->buffer_size = new_size;
470 		if (new_size != buf_size)
471 			pr_warn("%s: fixed buffer size (%d -> %d)\n",
472 				hdm_ch->name, buf_size, new_size);
473 		spin_lock_irqsave(&dim_lock, flags);
474 		hal_ret = dim_init_control(&hdm_ch->ch, is_tx, ch_addr,
475 					   is_tx ? new_size * 2 : new_size);
476 		break;
477 	case MOST_CH_ASYNC:
478 		new_size = dim_norm_ctrl_async_buffer_size(buf_size);
479 		if (new_size == 0) {
480 			pr_err("%s: too small buffer size\n", hdm_ch->name);
481 			return -EINVAL;
482 		}
483 		ccfg->buffer_size = new_size;
484 		if (new_size != buf_size)
485 			pr_warn("%s: fixed buffer size (%d -> %d)\n",
486 				hdm_ch->name, buf_size, new_size);
487 		spin_lock_irqsave(&dim_lock, flags);
488 		hal_ret = dim_init_async(&hdm_ch->ch, is_tx, ch_addr,
489 					 is_tx ? new_size * 2 : new_size);
490 		break;
491 	case MOST_CH_ISOC:
492 		new_size = dim_norm_isoc_buffer_size(buf_size, sub_size);
493 		if (new_size == 0) {
494 			pr_err("%s: invalid sub-buffer size or too small buffer size\n",
495 			       hdm_ch->name);
496 			return -EINVAL;
497 		}
498 		ccfg->buffer_size = new_size;
499 		if (new_size != buf_size)
500 			pr_warn("%s: fixed buffer size (%d -> %d)\n",
501 				hdm_ch->name, buf_size, new_size);
502 		spin_lock_irqsave(&dim_lock, flags);
503 		hal_ret = dim_init_isoc(&hdm_ch->ch, is_tx, ch_addr, sub_size);
504 		break;
505 	case MOST_CH_SYNC:
506 		new_size = dim_norm_sync_buffer_size(buf_size, sub_size);
507 		if (new_size == 0) {
508 			pr_err("%s: invalid sub-buffer size or too small buffer size\n",
509 			       hdm_ch->name);
510 			return -EINVAL;
511 		}
512 		ccfg->buffer_size = new_size;
513 		if (new_size != buf_size)
514 			pr_warn("%s: fixed buffer size (%d -> %d)\n",
515 				hdm_ch->name, buf_size, new_size);
516 		spin_lock_irqsave(&dim_lock, flags);
517 		hal_ret = dim_init_sync(&hdm_ch->ch, is_tx, ch_addr, sub_size);
518 		break;
519 	default:
520 		pr_err("%s: configure failed, bad channel type: %d\n",
521 		       hdm_ch->name, ccfg->data_type);
522 		return -EINVAL;
523 	}
524 
525 	if (hal_ret != DIM_NO_ERROR) {
526 		spin_unlock_irqrestore(&dim_lock, flags);
527 		pr_err("%s: configure failed (%d), type: %d, is_tx: %d\n",
528 		       hdm_ch->name, hal_ret, ccfg->data_type, (int)is_tx);
529 		return -ENODEV;
530 	}
531 
532 	hdm_ch->data_type = ccfg->data_type;
533 	hdm_ch->direction = ccfg->direction;
534 	hdm_ch->is_initialized = true;
535 
536 	if (hdm_ch->data_type == MOST_CH_ASYNC &&
537 	    hdm_ch->direction == MOST_CH_TX &&
538 	    dev->atx_idx < 0)
539 		dev->atx_idx = ch_idx;
540 
541 	spin_unlock_irqrestore(&dim_lock, flags);
542 	ccfg->dbr_size = hdm_ch->ch.dbr_size;
543 
544 	return 0;
545 }
546 
547 /**
548  * enqueue - enqueue a buffer for data transfer
549  * @iface: intended interface
550  * @channel: ID of the channel the buffer is intended for
551  * @mbo: pointer to the buffer object
552  *
553  * Push the buffer into pending_list and try to transfer one buffer from
554  * pending_list. Return 0 on success, negative on failure.
555  */
556 static int enqueue(struct most_interface *most_iface, int ch_idx,
557 		   struct mbo *mbo)
558 {
559 	struct dim2_hdm *dev = iface_to_hdm(most_iface);
560 	struct hdm_channel *hdm_ch = dev->hch + ch_idx;
561 	unsigned long flags;
562 
563 	BUG_ON(ch_idx < 0 || ch_idx >= DMA_CHANNELS);
564 
565 	if (!hdm_ch->is_initialized)
566 		return -EPERM;
567 
568 	if (mbo->bus_address == 0)
569 		return -EFAULT;
570 
571 	spin_lock_irqsave(&dim_lock, flags);
572 	list_add_tail(&mbo->list, &hdm_ch->pending_list);
573 	spin_unlock_irqrestore(&dim_lock, flags);
574 
575 	(void)try_start_dim_transfer(hdm_ch);
576 
577 	return 0;
578 }
579 
580 /**
581  * request_netinfo - triggers retrieving of network info
582  * @iface: pointer to the interface
583  * @channel_id: corresponding channel ID
584  *
585  * Send a command to INIC which triggers retrieving of network info by means of
586  * "Message exchange over MDP/MEP". Return 0 on success, negative on failure.
587  */
588 static void request_netinfo(struct most_interface *most_iface, int ch_idx,
589 			    void (*on_netinfo)(struct most_interface *,
590 					       unsigned char, unsigned char *))
591 {
592 	struct dim2_hdm *dev = iface_to_hdm(most_iface);
593 	struct mbo *mbo;
594 	u8 *data;
595 
596 	dev->on_netinfo = on_netinfo;
597 	if (!on_netinfo)
598 		return;
599 
600 	if (dev->atx_idx < 0) {
601 		pr_err("Async Tx Not initialized\n");
602 		return;
603 	}
604 
605 	mbo = most_get_mbo(&dev->most_iface, dev->atx_idx, NULL);
606 	if (!mbo)
607 		return;
608 
609 	mbo->buffer_length = 5;
610 
611 	data = mbo->virt_address;
612 
613 	data[0] = 0x00; /* PML High byte */
614 	data[1] = 0x03; /* PML Low byte */
615 	data[2] = 0x02; /* PMHL */
616 	data[3] = 0x08; /* FPH */
617 	data[4] = 0x40; /* FMF (FIFO cmd msg - Triggers NAOverMDP) */
618 
619 	most_submit_mbo(mbo);
620 }
621 
622 /**
623  * poison_channel - poison buffers of a channel
624  * @iface: pointer to the interface the channel to be poisoned belongs to
625  * @channel_id: corresponding channel ID
626  *
627  * Destroy a channel and complete all the buffers in both started_list &
628  * pending_list. Return 0 on success, negative on failure.
629  */
630 static int poison_channel(struct most_interface *most_iface, int ch_idx)
631 {
632 	struct dim2_hdm *dev = iface_to_hdm(most_iface);
633 	struct hdm_channel *hdm_ch = dev->hch + ch_idx;
634 	unsigned long flags;
635 	u8 hal_ret;
636 	int ret = 0;
637 
638 	BUG_ON(ch_idx < 0 || ch_idx >= DMA_CHANNELS);
639 
640 	if (!hdm_ch->is_initialized)
641 		return -EPERM;
642 
643 	tasklet_disable(&dim2_tasklet);
644 	spin_lock_irqsave(&dim_lock, flags);
645 	hal_ret = dim_destroy_channel(&hdm_ch->ch);
646 	hdm_ch->is_initialized = false;
647 	if (ch_idx == dev->atx_idx)
648 		dev->atx_idx = -1;
649 	spin_unlock_irqrestore(&dim_lock, flags);
650 	tasklet_enable(&dim2_tasklet);
651 	if (hal_ret != DIM_NO_ERROR) {
652 		pr_err("HAL Failed to close channel %s\n", hdm_ch->name);
653 		ret = -EFAULT;
654 	}
655 
656 	complete_all_mbos(&hdm_ch->started_list);
657 	complete_all_mbos(&hdm_ch->pending_list);
658 	if (hdm_ch->reset_dbr_size)
659 		*hdm_ch->reset_dbr_size = 0;
660 
661 	return ret;
662 }
663 
664 static void *dma_alloc(struct mbo *mbo, u32 size)
665 {
666 	struct device *dev = mbo->ifp->driver_dev;
667 
668 	return dma_alloc_coherent(dev, size, &mbo->bus_address, GFP_KERNEL);
669 }
670 
671 static void dma_free(struct mbo *mbo, u32 size)
672 {
673 	struct device *dev = mbo->ifp->driver_dev;
674 
675 	dma_free_coherent(dev, size, mbo->virt_address, mbo->bus_address);
676 }
677 
678 static const struct of_device_id dim2_of_match[];
679 
680 static struct {
681 	const char *clock_speed;
682 	u8 clk_speed;
683 } clk_mt[] = {
684 	{ "256fs", CLK_256FS },
685 	{ "512fs", CLK_512FS },
686 	{ "1024fs", CLK_1024FS },
687 	{ "2048fs", CLK_2048FS },
688 	{ "3072fs", CLK_3072FS },
689 	{ "4096fs", CLK_4096FS },
690 	{ "6144fs", CLK_6144FS },
691 	{ "8192fs", CLK_8192FS },
692 };
693 
694 /**
695  * get_dim2_clk_speed - converts string to DIM2 clock speed value
696  *
697  * @clock_speed: string in the format "{NUMBER}fs"
698  * @val: pointer to get one of the CLK_{NUMBER}FS values
699  *
700  * By success stores one of the CLK_{NUMBER}FS in the *val and returns 0,
701  * otherwise returns -EINVAL.
702  */
703 static int get_dim2_clk_speed(const char *clock_speed, u8 *val)
704 {
705 	int i;
706 
707 	for (i = 0; i < ARRAY_SIZE(clk_mt); i++) {
708 		if (!strcmp(clock_speed, clk_mt[i].clock_speed)) {
709 			*val = clk_mt[i].clk_speed;
710 			return 0;
711 		}
712 	}
713 	return -EINVAL;
714 }
715 
716 /*
717  * dim2_probe - dim2 probe handler
718  * @pdev: platform device structure
719  *
720  * Register the dim2 interface with mostcore and initialize it.
721  * Return 0 on success, negative on failure.
722  */
723 static int dim2_probe(struct platform_device *pdev)
724 {
725 	const struct dim2_platform_data *pdata;
726 	const struct of_device_id *of_id;
727 	const char *clock_speed;
728 	struct dim2_hdm *dev;
729 	struct resource *res;
730 	int ret, i;
731 	u8 hal_ret;
732 	int irq;
733 
734 	enum { MLB_INT_IDX, AHB0_INT_IDX };
735 
736 	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
737 	if (!dev)
738 		return -ENOMEM;
739 
740 	dev->atx_idx = -1;
741 
742 	platform_set_drvdata(pdev, dev);
743 
744 	ret = of_property_read_string(pdev->dev.of_node,
745 				      "microchip,clock-speed", &clock_speed);
746 	if (ret) {
747 		dev_err(&pdev->dev, "missing dt property clock-speed\n");
748 		return ret;
749 	}
750 
751 	ret = get_dim2_clk_speed(clock_speed, &dev->clk_speed);
752 	if (ret) {
753 		dev_err(&pdev->dev, "bad dt property clock-speed\n");
754 		return ret;
755 	}
756 
757 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
758 	dev->io_base = devm_ioremap_resource(&pdev->dev, res);
759 	if (IS_ERR(dev->io_base))
760 		return PTR_ERR(dev->io_base);
761 
762 	of_id = of_match_node(dim2_of_match, pdev->dev.of_node);
763 	pdata = of_id->data;
764 	ret = pdata && pdata->enable ? pdata->enable(pdev) : 0;
765 	if (ret)
766 		return ret;
767 
768 	dev->disable_platform = pdata ? pdata->disable : NULL;
769 
770 	dev_info(&pdev->dev, "sync: num of frames per sub-buffer: %u\n", fcnt);
771 	hal_ret = dim_startup(dev->io_base, dev->clk_speed, fcnt);
772 	if (hal_ret != DIM_NO_ERROR) {
773 		dev_err(&pdev->dev, "dim_startup failed: %d\n", hal_ret);
774 		ret = -ENODEV;
775 		goto err_disable_platform;
776 	}
777 
778 	irq = platform_get_irq(pdev, AHB0_INT_IDX);
779 	if (irq < 0) {
780 		ret = irq;
781 		goto err_shutdown_dim;
782 	}
783 
784 	ret = devm_request_irq(&pdev->dev, irq, dim2_ahb_isr, 0,
785 			       "dim2_ahb0_int", dev);
786 	if (ret) {
787 		dev_err(&pdev->dev, "failed to request ahb0_int irq %d\n", irq);
788 		goto err_shutdown_dim;
789 	}
790 
791 	irq = platform_get_irq(pdev, MLB_INT_IDX);
792 	if (irq < 0) {
793 		ret = irq;
794 		goto err_shutdown_dim;
795 	}
796 
797 	ret = devm_request_irq(&pdev->dev, irq, dim2_mlb_isr, 0,
798 			       "dim2_mlb_int", dev);
799 	if (ret) {
800 		dev_err(&pdev->dev, "failed to request mlb_int irq %d\n", irq);
801 		goto err_shutdown_dim;
802 	}
803 
804 	init_waitqueue_head(&dev->netinfo_waitq);
805 	dev->deliver_netinfo = 0;
806 	dev->netinfo_task = kthread_run(&deliver_netinfo_thread, dev,
807 					"dim2_netinfo");
808 	if (IS_ERR(dev->netinfo_task)) {
809 		ret = PTR_ERR(dev->netinfo_task);
810 		goto err_shutdown_dim;
811 	}
812 
813 	for (i = 0; i < DMA_CHANNELS; i++) {
814 		struct most_channel_capability *cap = dev->capabilities + i;
815 		struct hdm_channel *hdm_ch = dev->hch + i;
816 
817 		INIT_LIST_HEAD(&hdm_ch->pending_list);
818 		INIT_LIST_HEAD(&hdm_ch->started_list);
819 		hdm_ch->is_initialized = false;
820 		snprintf(hdm_ch->name, sizeof(hdm_ch->name), "ca%d", i * 2 + 2);
821 
822 		cap->name_suffix = hdm_ch->name;
823 		cap->direction = MOST_CH_RX | MOST_CH_TX;
824 		cap->data_type = MOST_CH_CONTROL | MOST_CH_ASYNC |
825 				 MOST_CH_ISOC | MOST_CH_SYNC;
826 		cap->num_buffers_packet = MAX_BUFFERS_PACKET;
827 		cap->buffer_size_packet = MAX_BUF_SIZE_PACKET;
828 		cap->num_buffers_streaming = MAX_BUFFERS_STREAMING;
829 		cap->buffer_size_streaming = MAX_BUF_SIZE_STREAMING;
830 	}
831 
832 	{
833 		const char *fmt;
834 
835 		if (sizeof(res->start) == sizeof(long long))
836 			fmt = "dim2-%016llx";
837 		else if (sizeof(res->start) == sizeof(long))
838 			fmt = "dim2-%016lx";
839 		else
840 			fmt = "dim2-%016x";
841 
842 		snprintf(dev->name, sizeof(dev->name), fmt, res->start);
843 	}
844 
845 	dev->most_iface.interface = ITYPE_MEDIALB_DIM2;
846 	dev->most_iface.description = dev->name;
847 	dev->most_iface.num_channels = DMA_CHANNELS;
848 	dev->most_iface.channel_vector = dev->capabilities;
849 	dev->most_iface.configure = configure_channel;
850 	dev->most_iface.enqueue = enqueue;
851 	dev->most_iface.dma_alloc = dma_alloc;
852 	dev->most_iface.dma_free = dma_free;
853 	dev->most_iface.poison_channel = poison_channel;
854 	dev->most_iface.request_netinfo = request_netinfo;
855 	dev->most_iface.driver_dev = &pdev->dev;
856 	dev->most_iface.dev = &dev->dev;
857 	dev->dev.init_name = "dim2_state";
858 	dev->dev.parent = &pdev->dev;
859 
860 	ret = most_register_interface(&dev->most_iface);
861 	if (ret) {
862 		dev_err(&pdev->dev, "failed to register MOST interface\n");
863 		goto err_stop_thread;
864 	}
865 
866 	ret = dim2_sysfs_probe(&dev->dev);
867 	if (ret) {
868 		dev_err(&pdev->dev, "failed to create sysfs attribute\n");
869 		goto err_unreg_iface;
870 	}
871 
872 	return 0;
873 
874 err_unreg_iface:
875 	most_deregister_interface(&dev->most_iface);
876 err_stop_thread:
877 	kthread_stop(dev->netinfo_task);
878 err_shutdown_dim:
879 	dim_shutdown();
880 err_disable_platform:
881 	if (dev->disable_platform)
882 		dev->disable_platform(pdev);
883 
884 	return ret;
885 }
886 
887 /**
888  * dim2_remove - dim2 remove handler
889  * @pdev: platform device structure
890  *
891  * Unregister the interface from mostcore
892  */
893 static int dim2_remove(struct platform_device *pdev)
894 {
895 	struct dim2_hdm *dev = platform_get_drvdata(pdev);
896 	unsigned long flags;
897 
898 	dim2_sysfs_destroy(&dev->dev);
899 	most_deregister_interface(&dev->most_iface);
900 	kthread_stop(dev->netinfo_task);
901 
902 	spin_lock_irqsave(&dim_lock, flags);
903 	dim_shutdown();
904 	spin_unlock_irqrestore(&dim_lock, flags);
905 
906 	if (dev->disable_platform)
907 		dev->disable_platform(pdev);
908 
909 	return 0;
910 }
911 
912 /* platform specific functions [[ */
913 
914 static int fsl_mx6_enable(struct platform_device *pdev)
915 {
916 	struct dim2_hdm *dev = platform_get_drvdata(pdev);
917 	int ret;
918 
919 	dev->clk = devm_clk_get(&pdev->dev, "mlb");
920 	if (IS_ERR_OR_NULL(dev->clk)) {
921 		dev_err(&pdev->dev, "unable to get mlb clock\n");
922 		return -EFAULT;
923 	}
924 
925 	ret = clk_prepare_enable(dev->clk);
926 	if (ret) {
927 		dev_err(&pdev->dev, "%s\n", "clk_prepare_enable failed");
928 		return ret;
929 	}
930 
931 	if (dev->clk_speed >= CLK_2048FS) {
932 		/* enable pll */
933 		dev->clk_pll = devm_clk_get(&pdev->dev, "pll8_mlb");
934 		if (IS_ERR_OR_NULL(dev->clk_pll)) {
935 			dev_err(&pdev->dev, "unable to get mlb pll clock\n");
936 			clk_disable_unprepare(dev->clk);
937 			return -EFAULT;
938 		}
939 
940 		writel(0x888, dev->io_base + 0x38);
941 		clk_prepare_enable(dev->clk_pll);
942 	}
943 
944 	return 0;
945 }
946 
947 static void fsl_mx6_disable(struct platform_device *pdev)
948 {
949 	struct dim2_hdm *dev = platform_get_drvdata(pdev);
950 
951 	if (dev->clk_speed >= CLK_2048FS)
952 		clk_disable_unprepare(dev->clk_pll);
953 
954 	clk_disable_unprepare(dev->clk);
955 }
956 
957 static int rcar_h2_enable(struct platform_device *pdev)
958 {
959 	struct dim2_hdm *dev = platform_get_drvdata(pdev);
960 	int ret;
961 
962 	dev->clk = devm_clk_get(&pdev->dev, NULL);
963 	if (IS_ERR(dev->clk)) {
964 		dev_err(&pdev->dev, "cannot get clock\n");
965 		return PTR_ERR(dev->clk);
966 	}
967 
968 	ret = clk_prepare_enable(dev->clk);
969 	if (ret) {
970 		dev_err(&pdev->dev, "%s\n", "clk_prepare_enable failed");
971 		return ret;
972 	}
973 
974 	if (dev->clk_speed >= CLK_2048FS) {
975 		/* enable MLP pll and LVDS drivers */
976 		writel(0x03, dev->io_base + 0x600);
977 		/* set bias */
978 		writel(0x888, dev->io_base + 0x38);
979 	} else {
980 		/* PLL */
981 		writel(0x04, dev->io_base + 0x600);
982 	}
983 
984 
985 	/* BBCR = 0b11 */
986 	writel(0x03, dev->io_base + 0x500);
987 	writel(0x0002FF02, dev->io_base + 0x508);
988 
989 	return 0;
990 }
991 
992 static void rcar_h2_disable(struct platform_device *pdev)
993 {
994 	struct dim2_hdm *dev = platform_get_drvdata(pdev);
995 
996 	clk_disable_unprepare(dev->clk);
997 
998 	/* disable PLLs and LVDS drivers */
999 	writel(0x0, dev->io_base + 0x600);
1000 }
1001 
1002 static int rcar_m3_enable(struct platform_device *pdev)
1003 {
1004 	struct dim2_hdm *dev = platform_get_drvdata(pdev);
1005 	u32 enable_512fs = dev->clk_speed == CLK_512FS;
1006 	int ret;
1007 
1008 	dev->clk = devm_clk_get(&pdev->dev, NULL);
1009 	if (IS_ERR(dev->clk)) {
1010 		dev_err(&pdev->dev, "cannot get clock\n");
1011 		return PTR_ERR(dev->clk);
1012 	}
1013 
1014 	ret = clk_prepare_enable(dev->clk);
1015 	if (ret) {
1016 		dev_err(&pdev->dev, "%s\n", "clk_prepare_enable failed");
1017 		return ret;
1018 	}
1019 
1020 	/* PLL */
1021 	writel(0x04, dev->io_base + 0x600);
1022 
1023 	writel(enable_512fs, dev->io_base + 0x604);
1024 
1025 	/* BBCR = 0b11 */
1026 	writel(0x03, dev->io_base + 0x500);
1027 	writel(0x0002FF02, dev->io_base + 0x508);
1028 
1029 	return 0;
1030 }
1031 
1032 static void rcar_m3_disable(struct platform_device *pdev)
1033 {
1034 	struct dim2_hdm *dev = platform_get_drvdata(pdev);
1035 
1036 	clk_disable_unprepare(dev->clk);
1037 
1038 	/* disable PLLs and LVDS drivers */
1039 	writel(0x0, dev->io_base + 0x600);
1040 }
1041 
1042 /* ]] platform specific functions */
1043 
1044 enum dim2_platforms { FSL_MX6, RCAR_H2, RCAR_M3 };
1045 
1046 static struct dim2_platform_data plat_data[] = {
1047 	[FSL_MX6] = { .enable = fsl_mx6_enable, .disable = fsl_mx6_disable },
1048 	[RCAR_H2] = { .enable = rcar_h2_enable, .disable = rcar_h2_disable },
1049 	[RCAR_M3] = { .enable = rcar_m3_enable, .disable = rcar_m3_disable },
1050 };
1051 
1052 static const struct of_device_id dim2_of_match[] = {
1053 	{
1054 		.compatible = "fsl,imx6q-mlb150",
1055 		.data = plat_data + FSL_MX6
1056 	},
1057 	{
1058 		.compatible = "renesas,mlp",
1059 		.data = plat_data + RCAR_H2
1060 	},
1061 	{
1062 		.compatible = "rcar,medialb-dim2",
1063 		.data = plat_data + RCAR_M3
1064 	},
1065 	{
1066 		.compatible = "xlnx,axi4-os62420_3pin-1.00.a",
1067 	},
1068 	{
1069 		.compatible = "xlnx,axi4-os62420_6pin-1.00.a",
1070 	},
1071 	{},
1072 };
1073 
1074 MODULE_DEVICE_TABLE(of, dim2_of_match);
1075 
1076 static struct platform_driver dim2_driver = {
1077 	.probe = dim2_probe,
1078 	.remove = dim2_remove,
1079 	.driver = {
1080 		.name = "hdm_dim2",
1081 		.of_match_table = dim2_of_match,
1082 	},
1083 };
1084 
1085 module_platform_driver(dim2_driver);
1086 
1087 MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@k2l.de>");
1088 MODULE_DESCRIPTION("MediaLB DIM2 Hardware Dependent Module");
1089 MODULE_LICENSE("GPL");
1090