1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Driver for Cadence MIPI-CSI2 RX Controller v1.3
4  *
5  * Copyright (C) 2017 Cadence Design Systems Inc.
6  */
7 
8 #include <linux/clk.h>
9 #include <linux/delay.h>
10 #include <linux/io.h>
11 #include <linux/module.h>
12 #include <linux/of.h>
13 #include <linux/of_graph.h>
14 #include <linux/phy/phy.h>
15 #include <linux/platform_device.h>
16 #include <linux/reset.h>
17 #include <linux/slab.h>
18 
19 #include <media/v4l2-ctrls.h>
20 #include <media/v4l2-device.h>
21 #include <media/v4l2-fwnode.h>
22 #include <media/v4l2-subdev.h>
23 
24 #define CSI2RX_DEVICE_CFG_REG			0x000
25 
26 #define CSI2RX_SOFT_RESET_REG			0x004
27 #define CSI2RX_SOFT_RESET_PROTOCOL			BIT(1)
28 #define CSI2RX_SOFT_RESET_FRONT				BIT(0)
29 
30 #define CSI2RX_STATIC_CFG_REG			0x008
31 #define CSI2RX_STATIC_CFG_DLANE_MAP(llane, plane)	((plane) << (16 + (llane) * 4))
32 #define CSI2RX_STATIC_CFG_LANES_MASK			GENMASK(11, 8)
33 
34 #define CSI2RX_DPHY_LANE_CTRL_REG		0x40
35 #define CSI2RX_DPHY_CL_RST			BIT(16)
36 #define CSI2RX_DPHY_DL_RST(i)			BIT((i) + 12)
37 #define CSI2RX_DPHY_CL_EN			BIT(4)
38 #define CSI2RX_DPHY_DL_EN(i)			BIT(i)
39 
40 #define CSI2RX_STREAM_BASE(n)		(((n) + 1) * 0x100)
41 
42 #define CSI2RX_STREAM_CTRL_REG(n)		(CSI2RX_STREAM_BASE(n) + 0x000)
43 #define CSI2RX_STREAM_CTRL_START			BIT(0)
44 
45 #define CSI2RX_STREAM_DATA_CFG_REG(n)		(CSI2RX_STREAM_BASE(n) + 0x008)
46 #define CSI2RX_STREAM_DATA_CFG_EN_VC_SELECT		BIT(31)
47 #define CSI2RX_STREAM_DATA_CFG_VC_SELECT(n)		BIT((n) + 16)
48 
49 #define CSI2RX_STREAM_CFG_REG(n)		(CSI2RX_STREAM_BASE(n) + 0x00c)
50 #define CSI2RX_STREAM_CFG_FIFO_MODE_LARGE_BUF		(1 << 8)
51 
52 #define CSI2RX_LANES_MAX	4
53 #define CSI2RX_STREAMS_MAX	4
54 
55 enum csi2rx_pads {
56 	CSI2RX_PAD_SINK,
57 	CSI2RX_PAD_SOURCE_STREAM0,
58 	CSI2RX_PAD_SOURCE_STREAM1,
59 	CSI2RX_PAD_SOURCE_STREAM2,
60 	CSI2RX_PAD_SOURCE_STREAM3,
61 	CSI2RX_PAD_MAX,
62 };
63 
64 struct csi2rx_priv {
65 	struct device			*dev;
66 	unsigned int			count;
67 
68 	/*
69 	 * Used to prevent race conditions between multiple,
70 	 * concurrent calls to start and stop.
71 	 */
72 	struct mutex			lock;
73 
74 	void __iomem			*base;
75 	struct clk			*sys_clk;
76 	struct clk			*p_clk;
77 	struct clk			*pixel_clk[CSI2RX_STREAMS_MAX];
78 	struct reset_control		*sys_rst;
79 	struct reset_control		*p_rst;
80 	struct reset_control		*pixel_rst[CSI2RX_STREAMS_MAX];
81 	struct phy			*dphy;
82 
83 	u8				lanes[CSI2RX_LANES_MAX];
84 	u8				num_lanes;
85 	u8				max_lanes;
86 	u8				max_streams;
87 	bool				has_internal_dphy;
88 
89 	struct v4l2_subdev		subdev;
90 	struct v4l2_async_notifier	notifier;
91 	struct media_pad		pads[CSI2RX_PAD_MAX];
92 
93 	/* Remote source */
94 	struct v4l2_subdev		*source_subdev;
95 	int				source_pad;
96 };
97 
98 static inline
99 struct csi2rx_priv *v4l2_subdev_to_csi2rx(struct v4l2_subdev *subdev)
100 {
101 	return container_of(subdev, struct csi2rx_priv, subdev);
102 }
103 
104 static void csi2rx_reset(struct csi2rx_priv *csi2rx)
105 {
106 	writel(CSI2RX_SOFT_RESET_PROTOCOL | CSI2RX_SOFT_RESET_FRONT,
107 	       csi2rx->base + CSI2RX_SOFT_RESET_REG);
108 
109 	udelay(10);
110 
111 	writel(0, csi2rx->base + CSI2RX_SOFT_RESET_REG);
112 }
113 
114 static int csi2rx_configure_ext_dphy(struct csi2rx_priv *csi2rx)
115 {
116 	union phy_configure_opts opts = { };
117 	int ret;
118 
119 	ret = phy_power_on(csi2rx->dphy);
120 	if (ret)
121 		return ret;
122 
123 	ret = phy_configure(csi2rx->dphy, &opts);
124 	if (ret) {
125 		phy_power_off(csi2rx->dphy);
126 		return ret;
127 	}
128 
129 	return 0;
130 }
131 
132 static int csi2rx_start(struct csi2rx_priv *csi2rx)
133 {
134 	unsigned int i;
135 	unsigned long lanes_used = 0;
136 	u32 reg;
137 	int ret;
138 
139 	ret = clk_prepare_enable(csi2rx->p_clk);
140 	if (ret)
141 		return ret;
142 
143 	reset_control_deassert(csi2rx->p_rst);
144 	csi2rx_reset(csi2rx);
145 
146 	reg = csi2rx->num_lanes << 8;
147 	for (i = 0; i < csi2rx->num_lanes; i++) {
148 		reg |= CSI2RX_STATIC_CFG_DLANE_MAP(i, csi2rx->lanes[i]);
149 		set_bit(csi2rx->lanes[i], &lanes_used);
150 	}
151 
152 	/*
153 	 * Even the unused lanes need to be mapped. In order to avoid
154 	 * to map twice to the same physical lane, keep the lanes used
155 	 * in the previous loop, and only map unused physical lanes to
156 	 * the rest of our logical lanes.
157 	 */
158 	for (i = csi2rx->num_lanes; i < csi2rx->max_lanes; i++) {
159 		unsigned int idx = find_first_zero_bit(&lanes_used,
160 						       csi2rx->max_lanes);
161 		set_bit(idx, &lanes_used);
162 		reg |= CSI2RX_STATIC_CFG_DLANE_MAP(i, i + 1);
163 	}
164 
165 	writel(reg, csi2rx->base + CSI2RX_STATIC_CFG_REG);
166 
167 	/* Enable DPHY clk and data lanes. */
168 	if (csi2rx->dphy) {
169 		reg = CSI2RX_DPHY_CL_EN | CSI2RX_DPHY_CL_RST;
170 		for (i = 0; i < csi2rx->num_lanes; i++) {
171 			reg |= CSI2RX_DPHY_DL_EN(csi2rx->lanes[i] - 1);
172 			reg |= CSI2RX_DPHY_DL_RST(csi2rx->lanes[i] - 1);
173 		}
174 
175 		writel(reg, csi2rx->base + CSI2RX_DPHY_LANE_CTRL_REG);
176 
177 		ret = csi2rx_configure_ext_dphy(csi2rx);
178 		if (ret) {
179 			dev_err(csi2rx->dev,
180 				"Failed to configure external DPHY: %d\n", ret);
181 			goto err_disable_pclk;
182 		}
183 	}
184 
185 	/*
186 	 * Create a static mapping between the CSI virtual channels
187 	 * and the output stream.
188 	 *
189 	 * This should be enhanced, but v4l2 lacks the support for
190 	 * changing that mapping dynamically.
191 	 *
192 	 * We also cannot enable and disable independent streams here,
193 	 * hence the reference counting.
194 	 */
195 	for (i = 0; i < csi2rx->max_streams; i++) {
196 		ret = clk_prepare_enable(csi2rx->pixel_clk[i]);
197 		if (ret)
198 			goto err_disable_pixclk;
199 
200 		reset_control_deassert(csi2rx->pixel_rst[i]);
201 
202 		writel(CSI2RX_STREAM_CFG_FIFO_MODE_LARGE_BUF,
203 		       csi2rx->base + CSI2RX_STREAM_CFG_REG(i));
204 
205 		writel(CSI2RX_STREAM_DATA_CFG_EN_VC_SELECT |
206 		       CSI2RX_STREAM_DATA_CFG_VC_SELECT(i),
207 		       csi2rx->base + CSI2RX_STREAM_DATA_CFG_REG(i));
208 
209 		writel(CSI2RX_STREAM_CTRL_START,
210 		       csi2rx->base + CSI2RX_STREAM_CTRL_REG(i));
211 	}
212 
213 	ret = clk_prepare_enable(csi2rx->sys_clk);
214 	if (ret)
215 		goto err_disable_pixclk;
216 
217 	reset_control_deassert(csi2rx->sys_rst);
218 
219 	ret = v4l2_subdev_call(csi2rx->source_subdev, video, s_stream, true);
220 	if (ret)
221 		goto err_disable_sysclk;
222 
223 	clk_disable_unprepare(csi2rx->p_clk);
224 
225 	return 0;
226 
227 err_disable_sysclk:
228 	clk_disable_unprepare(csi2rx->sys_clk);
229 err_disable_pixclk:
230 	for (; i > 0; i--) {
231 		reset_control_assert(csi2rx->pixel_rst[i - 1]);
232 		clk_disable_unprepare(csi2rx->pixel_clk[i - 1]);
233 	}
234 
235 	if (csi2rx->dphy) {
236 		writel(0, csi2rx->base + CSI2RX_DPHY_LANE_CTRL_REG);
237 		phy_power_off(csi2rx->dphy);
238 	}
239 err_disable_pclk:
240 	clk_disable_unprepare(csi2rx->p_clk);
241 
242 	return ret;
243 }
244 
245 static void csi2rx_stop(struct csi2rx_priv *csi2rx)
246 {
247 	unsigned int i;
248 
249 	clk_prepare_enable(csi2rx->p_clk);
250 	reset_control_assert(csi2rx->sys_rst);
251 	clk_disable_unprepare(csi2rx->sys_clk);
252 
253 	for (i = 0; i < csi2rx->max_streams; i++) {
254 		writel(0, csi2rx->base + CSI2RX_STREAM_CTRL_REG(i));
255 
256 		reset_control_assert(csi2rx->pixel_rst[i]);
257 		clk_disable_unprepare(csi2rx->pixel_clk[i]);
258 	}
259 
260 	reset_control_assert(csi2rx->p_rst);
261 	clk_disable_unprepare(csi2rx->p_clk);
262 
263 	if (v4l2_subdev_call(csi2rx->source_subdev, video, s_stream, false))
264 		dev_warn(csi2rx->dev, "Couldn't disable our subdev\n");
265 
266 	if (csi2rx->dphy) {
267 		writel(0, csi2rx->base + CSI2RX_DPHY_LANE_CTRL_REG);
268 
269 		if (phy_power_off(csi2rx->dphy))
270 			dev_warn(csi2rx->dev, "Couldn't power off DPHY\n");
271 	}
272 }
273 
274 static int csi2rx_s_stream(struct v4l2_subdev *subdev, int enable)
275 {
276 	struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev);
277 	int ret = 0;
278 
279 	mutex_lock(&csi2rx->lock);
280 
281 	if (enable) {
282 		/*
283 		 * If we're not the first users, there's no need to
284 		 * enable the whole controller.
285 		 */
286 		if (!csi2rx->count) {
287 			ret = csi2rx_start(csi2rx);
288 			if (ret)
289 				goto out;
290 		}
291 
292 		csi2rx->count++;
293 	} else {
294 		csi2rx->count--;
295 
296 		/*
297 		 * Let the last user turn off the lights.
298 		 */
299 		if (!csi2rx->count)
300 			csi2rx_stop(csi2rx);
301 	}
302 
303 out:
304 	mutex_unlock(&csi2rx->lock);
305 	return ret;
306 }
307 
308 static const struct v4l2_subdev_video_ops csi2rx_video_ops = {
309 	.s_stream	= csi2rx_s_stream,
310 };
311 
312 static const struct v4l2_subdev_ops csi2rx_subdev_ops = {
313 	.video		= &csi2rx_video_ops,
314 };
315 
316 static int csi2rx_async_bound(struct v4l2_async_notifier *notifier,
317 			      struct v4l2_subdev *s_subdev,
318 			      struct v4l2_async_connection *asd)
319 {
320 	struct v4l2_subdev *subdev = notifier->sd;
321 	struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev);
322 
323 	csi2rx->source_pad = media_entity_get_fwnode_pad(&s_subdev->entity,
324 							 asd->match.fwnode,
325 							 MEDIA_PAD_FL_SOURCE);
326 	if (csi2rx->source_pad < 0) {
327 		dev_err(csi2rx->dev, "Couldn't find output pad for subdev %s\n",
328 			s_subdev->name);
329 		return csi2rx->source_pad;
330 	}
331 
332 	csi2rx->source_subdev = s_subdev;
333 
334 	dev_dbg(csi2rx->dev, "Bound %s pad: %d\n", s_subdev->name,
335 		csi2rx->source_pad);
336 
337 	return media_create_pad_link(&csi2rx->source_subdev->entity,
338 				     csi2rx->source_pad,
339 				     &csi2rx->subdev.entity, 0,
340 				     MEDIA_LNK_FL_ENABLED |
341 				     MEDIA_LNK_FL_IMMUTABLE);
342 }
343 
344 static const struct v4l2_async_notifier_operations csi2rx_notifier_ops = {
345 	.bound		= csi2rx_async_bound,
346 };
347 
348 static int csi2rx_get_resources(struct csi2rx_priv *csi2rx,
349 				struct platform_device *pdev)
350 {
351 	unsigned char i;
352 	u32 dev_cfg;
353 	int ret;
354 
355 	csi2rx->base = devm_platform_ioremap_resource(pdev, 0);
356 	if (IS_ERR(csi2rx->base))
357 		return PTR_ERR(csi2rx->base);
358 
359 	csi2rx->sys_clk = devm_clk_get(&pdev->dev, "sys_clk");
360 	if (IS_ERR(csi2rx->sys_clk)) {
361 		dev_err(&pdev->dev, "Couldn't get sys clock\n");
362 		return PTR_ERR(csi2rx->sys_clk);
363 	}
364 
365 	csi2rx->p_clk = devm_clk_get(&pdev->dev, "p_clk");
366 	if (IS_ERR(csi2rx->p_clk)) {
367 		dev_err(&pdev->dev, "Couldn't get P clock\n");
368 		return PTR_ERR(csi2rx->p_clk);
369 	}
370 
371 	csi2rx->sys_rst = devm_reset_control_get_optional_exclusive(&pdev->dev,
372 								    "sys");
373 	if (IS_ERR(csi2rx->sys_rst))
374 		return PTR_ERR(csi2rx->sys_rst);
375 
376 	csi2rx->p_rst = devm_reset_control_get_optional_exclusive(&pdev->dev,
377 								  "reg_bank");
378 	if (IS_ERR(csi2rx->p_rst))
379 		return PTR_ERR(csi2rx->p_rst);
380 
381 	csi2rx->dphy = devm_phy_optional_get(&pdev->dev, "dphy");
382 	if (IS_ERR(csi2rx->dphy)) {
383 		dev_err(&pdev->dev, "Couldn't get external D-PHY\n");
384 		return PTR_ERR(csi2rx->dphy);
385 	}
386 
387 	ret = clk_prepare_enable(csi2rx->p_clk);
388 	if (ret) {
389 		dev_err(&pdev->dev, "Couldn't prepare and enable P clock\n");
390 		return ret;
391 	}
392 
393 	dev_cfg = readl(csi2rx->base + CSI2RX_DEVICE_CFG_REG);
394 	clk_disable_unprepare(csi2rx->p_clk);
395 
396 	csi2rx->max_lanes = dev_cfg & 7;
397 	if (csi2rx->max_lanes > CSI2RX_LANES_MAX) {
398 		dev_err(&pdev->dev, "Invalid number of lanes: %u\n",
399 			csi2rx->max_lanes);
400 		return -EINVAL;
401 	}
402 
403 	csi2rx->max_streams = (dev_cfg >> 4) & 7;
404 	if (csi2rx->max_streams > CSI2RX_STREAMS_MAX) {
405 		dev_err(&pdev->dev, "Invalid number of streams: %u\n",
406 			csi2rx->max_streams);
407 		return -EINVAL;
408 	}
409 
410 	csi2rx->has_internal_dphy = dev_cfg & BIT(3) ? true : false;
411 
412 	/*
413 	 * FIXME: Once we'll have internal D-PHY support, the check
414 	 * will need to be removed.
415 	 */
416 	if (!csi2rx->dphy && csi2rx->has_internal_dphy) {
417 		dev_err(&pdev->dev, "Internal D-PHY not supported yet\n");
418 		return -EINVAL;
419 	}
420 
421 	for (i = 0; i < csi2rx->max_streams; i++) {
422 		char name[16];
423 
424 		snprintf(name, sizeof(name), "pixel_if%u_clk", i);
425 		csi2rx->pixel_clk[i] = devm_clk_get(&pdev->dev, name);
426 		if (IS_ERR(csi2rx->pixel_clk[i])) {
427 			dev_err(&pdev->dev, "Couldn't get clock %s\n", name);
428 			return PTR_ERR(csi2rx->pixel_clk[i]);
429 		}
430 
431 		snprintf(name, sizeof(name), "pixel_if%u", i);
432 		csi2rx->pixel_rst[i] =
433 			devm_reset_control_get_optional_exclusive(&pdev->dev,
434 								  name);
435 		if (IS_ERR(csi2rx->pixel_rst[i]))
436 			return PTR_ERR(csi2rx->pixel_rst[i]);
437 	}
438 
439 	return 0;
440 }
441 
442 static int csi2rx_parse_dt(struct csi2rx_priv *csi2rx)
443 {
444 	struct v4l2_fwnode_endpoint v4l2_ep = { .bus_type = 0 };
445 	struct v4l2_async_connection *asd;
446 	struct fwnode_handle *fwh;
447 	struct device_node *ep;
448 	int ret;
449 
450 	ep = of_graph_get_endpoint_by_regs(csi2rx->dev->of_node, 0, 0);
451 	if (!ep)
452 		return -EINVAL;
453 
454 	fwh = of_fwnode_handle(ep);
455 	ret = v4l2_fwnode_endpoint_parse(fwh, &v4l2_ep);
456 	if (ret) {
457 		dev_err(csi2rx->dev, "Could not parse v4l2 endpoint\n");
458 		of_node_put(ep);
459 		return ret;
460 	}
461 
462 	if (v4l2_ep.bus_type != V4L2_MBUS_CSI2_DPHY) {
463 		dev_err(csi2rx->dev, "Unsupported media bus type: 0x%x\n",
464 			v4l2_ep.bus_type);
465 		of_node_put(ep);
466 		return -EINVAL;
467 	}
468 
469 	memcpy(csi2rx->lanes, v4l2_ep.bus.mipi_csi2.data_lanes,
470 	       sizeof(csi2rx->lanes));
471 	csi2rx->num_lanes = v4l2_ep.bus.mipi_csi2.num_data_lanes;
472 	if (csi2rx->num_lanes > csi2rx->max_lanes) {
473 		dev_err(csi2rx->dev, "Unsupported number of data-lanes: %d\n",
474 			csi2rx->num_lanes);
475 		of_node_put(ep);
476 		return -EINVAL;
477 	}
478 
479 	v4l2_async_subdev_nf_init(&csi2rx->notifier, &csi2rx->subdev);
480 
481 	asd = v4l2_async_nf_add_fwnode_remote(&csi2rx->notifier, fwh,
482 					      struct v4l2_async_connection);
483 	of_node_put(ep);
484 	if (IS_ERR(asd)) {
485 		v4l2_async_nf_cleanup(&csi2rx->notifier);
486 		return PTR_ERR(asd);
487 	}
488 
489 	csi2rx->notifier.ops = &csi2rx_notifier_ops;
490 
491 	ret = v4l2_async_nf_register(&csi2rx->notifier);
492 	if (ret)
493 		v4l2_async_nf_cleanup(&csi2rx->notifier);
494 
495 	return ret;
496 }
497 
498 static int csi2rx_probe(struct platform_device *pdev)
499 {
500 	struct csi2rx_priv *csi2rx;
501 	unsigned int i;
502 	int ret;
503 
504 	csi2rx = kzalloc(sizeof(*csi2rx), GFP_KERNEL);
505 	if (!csi2rx)
506 		return -ENOMEM;
507 	platform_set_drvdata(pdev, csi2rx);
508 	csi2rx->dev = &pdev->dev;
509 	mutex_init(&csi2rx->lock);
510 
511 	ret = csi2rx_get_resources(csi2rx, pdev);
512 	if (ret)
513 		goto err_free_priv;
514 
515 	ret = csi2rx_parse_dt(csi2rx);
516 	if (ret)
517 		goto err_free_priv;
518 
519 	csi2rx->subdev.owner = THIS_MODULE;
520 	csi2rx->subdev.dev = &pdev->dev;
521 	v4l2_subdev_init(&csi2rx->subdev, &csi2rx_subdev_ops);
522 	v4l2_set_subdevdata(&csi2rx->subdev, &pdev->dev);
523 	snprintf(csi2rx->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s.%s",
524 		 KBUILD_MODNAME, dev_name(&pdev->dev));
525 
526 	/* Create our media pads */
527 	csi2rx->subdev.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
528 	csi2rx->pads[CSI2RX_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
529 	for (i = CSI2RX_PAD_SOURCE_STREAM0; i < CSI2RX_PAD_MAX; i++)
530 		csi2rx->pads[i].flags = MEDIA_PAD_FL_SOURCE;
531 
532 	ret = media_entity_pads_init(&csi2rx->subdev.entity, CSI2RX_PAD_MAX,
533 				     csi2rx->pads);
534 	if (ret)
535 		goto err_cleanup;
536 
537 	ret = v4l2_async_register_subdev(&csi2rx->subdev);
538 	if (ret < 0)
539 		goto err_cleanup;
540 
541 	dev_info(&pdev->dev,
542 		 "Probed CSI2RX with %u/%u lanes, %u streams, %s D-PHY\n",
543 		 csi2rx->num_lanes, csi2rx->max_lanes, csi2rx->max_streams,
544 		 csi2rx->dphy ? "external" :
545 		 csi2rx->has_internal_dphy ? "internal" : "no");
546 
547 	return 0;
548 
549 err_cleanup:
550 	v4l2_async_nf_unregister(&csi2rx->notifier);
551 	v4l2_async_nf_cleanup(&csi2rx->notifier);
552 err_free_priv:
553 	kfree(csi2rx);
554 	return ret;
555 }
556 
557 static void csi2rx_remove(struct platform_device *pdev)
558 {
559 	struct csi2rx_priv *csi2rx = platform_get_drvdata(pdev);
560 
561 	v4l2_async_nf_unregister(&csi2rx->notifier);
562 	v4l2_async_nf_cleanup(&csi2rx->notifier);
563 	v4l2_async_unregister_subdev(&csi2rx->subdev);
564 	kfree(csi2rx);
565 }
566 
567 static const struct of_device_id csi2rx_of_table[] = {
568 	{ .compatible = "starfive,jh7110-csi2rx" },
569 	{ .compatible = "cdns,csi2rx" },
570 	{ },
571 };
572 MODULE_DEVICE_TABLE(of, csi2rx_of_table);
573 
574 static struct platform_driver csi2rx_driver = {
575 	.probe	= csi2rx_probe,
576 	.remove_new = csi2rx_remove,
577 
578 	.driver	= {
579 		.name		= "cdns-csi2rx",
580 		.of_match_table	= csi2rx_of_table,
581 	},
582 };
583 module_platform_driver(csi2rx_driver);
584 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@bootlin.com>");
585 MODULE_DESCRIPTION("Cadence CSI2-RX controller");
586 MODULE_LICENSE("GPL");
587