xref: /openbmc/linux/drivers/gpu/drm/i915/i915_irq.c (revision e8e0929d)
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28 
29 #include <linux/sysrq.h>
30 #include "drmP.h"
31 #include "drm.h"
32 #include "i915_drm.h"
33 #include "i915_drv.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36 
37 #define MAX_NOPID ((u32)~0)
38 
39 /**
40  * Interrupts that are always left unmasked.
41  *
42  * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
43  * we leave them always unmasked in IMR and then control enabling them through
44  * PIPESTAT alone.
45  */
46 #define I915_INTERRUPT_ENABLE_FIX (I915_ASLE_INTERRUPT |		 \
47 				   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
48 				   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
49 				   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
50 
51 /** Interrupts that we mask and unmask at runtime. */
52 #define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
53 
54 #define I915_PIPE_VBLANK_STATUS	(PIPE_START_VBLANK_INTERRUPT_STATUS |\
55 				 PIPE_VBLANK_INTERRUPT_STATUS)
56 
57 #define I915_PIPE_VBLANK_ENABLE	(PIPE_START_VBLANK_INTERRUPT_ENABLE |\
58 				 PIPE_VBLANK_INTERRUPT_ENABLE)
59 
60 #define DRM_I915_VBLANK_PIPE_ALL	(DRM_I915_VBLANK_PIPE_A | \
61 					 DRM_I915_VBLANK_PIPE_B)
62 
63 void
64 igdng_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
65 {
66 	if ((dev_priv->gt_irq_mask_reg & mask) != 0) {
67 		dev_priv->gt_irq_mask_reg &= ~mask;
68 		I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
69 		(void) I915_READ(GTIMR);
70 	}
71 }
72 
73 static inline void
74 igdng_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
75 {
76 	if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
77 		dev_priv->gt_irq_mask_reg |= mask;
78 		I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
79 		(void) I915_READ(GTIMR);
80 	}
81 }
82 
83 /* For display hotplug interrupt */
84 void
85 igdng_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
86 {
87 	if ((dev_priv->irq_mask_reg & mask) != 0) {
88 		dev_priv->irq_mask_reg &= ~mask;
89 		I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
90 		(void) I915_READ(DEIMR);
91 	}
92 }
93 
94 static inline void
95 igdng_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
96 {
97 	if ((dev_priv->irq_mask_reg & mask) != mask) {
98 		dev_priv->irq_mask_reg |= mask;
99 		I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
100 		(void) I915_READ(DEIMR);
101 	}
102 }
103 
104 void
105 i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
106 {
107 	if ((dev_priv->irq_mask_reg & mask) != 0) {
108 		dev_priv->irq_mask_reg &= ~mask;
109 		I915_WRITE(IMR, dev_priv->irq_mask_reg);
110 		(void) I915_READ(IMR);
111 	}
112 }
113 
114 static inline void
115 i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
116 {
117 	if ((dev_priv->irq_mask_reg & mask) != mask) {
118 		dev_priv->irq_mask_reg |= mask;
119 		I915_WRITE(IMR, dev_priv->irq_mask_reg);
120 		(void) I915_READ(IMR);
121 	}
122 }
123 
124 static inline u32
125 i915_pipestat(int pipe)
126 {
127 	if (pipe == 0)
128 		return PIPEASTAT;
129 	if (pipe == 1)
130 		return PIPEBSTAT;
131 	BUG();
132 }
133 
134 void
135 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
136 {
137 	if ((dev_priv->pipestat[pipe] & mask) != mask) {
138 		u32 reg = i915_pipestat(pipe);
139 
140 		dev_priv->pipestat[pipe] |= mask;
141 		/* Enable the interrupt, clear any pending status */
142 		I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
143 		(void) I915_READ(reg);
144 	}
145 }
146 
147 void
148 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
149 {
150 	if ((dev_priv->pipestat[pipe] & mask) != 0) {
151 		u32 reg = i915_pipestat(pipe);
152 
153 		dev_priv->pipestat[pipe] &= ~mask;
154 		I915_WRITE(reg, dev_priv->pipestat[pipe]);
155 		(void) I915_READ(reg);
156 	}
157 }
158 
159 /**
160  * i915_pipe_enabled - check if a pipe is enabled
161  * @dev: DRM device
162  * @pipe: pipe to check
163  *
164  * Reading certain registers when the pipe is disabled can hang the chip.
165  * Use this routine to make sure the PLL is running and the pipe is active
166  * before reading such registers if unsure.
167  */
168 static int
169 i915_pipe_enabled(struct drm_device *dev, int pipe)
170 {
171 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
172 	unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
173 
174 	if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
175 		return 1;
176 
177 	return 0;
178 }
179 
180 /* Called from drm generic code, passed a 'crtc', which
181  * we use as a pipe index
182  */
183 u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
184 {
185 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
186 	unsigned long high_frame;
187 	unsigned long low_frame;
188 	u32 high1, high2, low, count;
189 
190 	high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
191 	low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
192 
193 	if (!i915_pipe_enabled(dev, pipe)) {
194 		DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe);
195 		return 0;
196 	}
197 
198 	/*
199 	 * High & low register fields aren't synchronized, so make sure
200 	 * we get a low value that's stable across two reads of the high
201 	 * register.
202 	 */
203 	do {
204 		high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
205 			 PIPE_FRAME_HIGH_SHIFT);
206 		low =  ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
207 			PIPE_FRAME_LOW_SHIFT);
208 		high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
209 			 PIPE_FRAME_HIGH_SHIFT);
210 	} while (high1 != high2);
211 
212 	count = (high1 << 8) | low;
213 
214 	return count;
215 }
216 
217 u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
218 {
219 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
220 	int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45;
221 
222 	if (!i915_pipe_enabled(dev, pipe)) {
223 		DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe);
224 		return 0;
225 	}
226 
227 	return I915_READ(reg);
228 }
229 
230 /*
231  * Handle hotplug events outside the interrupt handler proper.
232  */
233 static void i915_hotplug_work_func(struct work_struct *work)
234 {
235 	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
236 						    hotplug_work);
237 	struct drm_device *dev = dev_priv->dev;
238 	struct drm_mode_config *mode_config = &dev->mode_config;
239 	struct drm_connector *connector;
240 
241 	if (mode_config->num_connector) {
242 		list_for_each_entry(connector, &mode_config->connector_list, head) {
243 			struct intel_output *intel_output = to_intel_output(connector);
244 
245 			if (intel_output->hot_plug)
246 				(*intel_output->hot_plug) (intel_output);
247 		}
248 	}
249 	/* Just fire off a uevent and let userspace tell us what to do */
250 	drm_sysfs_hotplug_event(dev);
251 }
252 
253 irqreturn_t igdng_irq_handler(struct drm_device *dev)
254 {
255 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
256 	int ret = IRQ_NONE;
257 	u32 de_iir, gt_iir;
258 	u32 new_de_iir, new_gt_iir;
259 	struct drm_i915_master_private *master_priv;
260 
261 	de_iir = I915_READ(DEIIR);
262 	gt_iir = I915_READ(GTIIR);
263 
264 	for (;;) {
265 		if (de_iir == 0 && gt_iir == 0)
266 			break;
267 
268 		ret = IRQ_HANDLED;
269 
270 		I915_WRITE(DEIIR, de_iir);
271 		new_de_iir = I915_READ(DEIIR);
272 		I915_WRITE(GTIIR, gt_iir);
273 		new_gt_iir = I915_READ(GTIIR);
274 
275 		if (dev->primary->master) {
276 			master_priv = dev->primary->master->driver_priv;
277 			if (master_priv->sarea_priv)
278 				master_priv->sarea_priv->last_dispatch =
279 					READ_BREADCRUMB(dev_priv);
280 		}
281 
282 		if (gt_iir & GT_USER_INTERRUPT) {
283 			u32 seqno = i915_get_gem_seqno(dev);
284 			dev_priv->mm.irq_gem_seqno = seqno;
285 			trace_i915_gem_request_complete(dev, seqno);
286 			DRM_WAKEUP(&dev_priv->irq_queue);
287 		}
288 
289 		de_iir = new_de_iir;
290 		gt_iir = new_gt_iir;
291 	}
292 
293 	return ret;
294 }
295 
296 /**
297  * i915_error_work_func - do process context error handling work
298  * @work: work struct
299  *
300  * Fire an error uevent so userspace can see that a hang or error
301  * was detected.
302  */
303 static void i915_error_work_func(struct work_struct *work)
304 {
305 	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
306 						    error_work);
307 	struct drm_device *dev = dev_priv->dev;
308 	char *error_event[] = { "ERROR=1", NULL };
309 	char *reset_event[] = { "RESET=1", NULL };
310 	char *reset_done_event[] = { "ERROR=0", NULL };
311 
312 	DRM_DEBUG("generating error event\n");
313 	kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
314 
315 	if (atomic_read(&dev_priv->mm.wedged)) {
316 		if (IS_I965G(dev)) {
317 			DRM_DEBUG("resetting chip\n");
318 			kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
319 			if (!i965_reset(dev, GDRST_RENDER)) {
320 				atomic_set(&dev_priv->mm.wedged, 0);
321 				kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
322 			}
323 		} else {
324 			printk("reboot required\n");
325 		}
326 	}
327 }
328 
329 /**
330  * i915_capture_error_state - capture an error record for later analysis
331  * @dev: drm device
332  *
333  * Should be called when an error is detected (either a hang or an error
334  * interrupt) to capture error state from the time of the error.  Fills
335  * out a structure which becomes available in debugfs for user level tools
336  * to pick up.
337  */
338 static void i915_capture_error_state(struct drm_device *dev)
339 {
340 	struct drm_i915_private *dev_priv = dev->dev_private;
341 	struct drm_i915_error_state *error;
342 	unsigned long flags;
343 
344 	spin_lock_irqsave(&dev_priv->error_lock, flags);
345 	if (dev_priv->first_error)
346 		goto out;
347 
348 	error = kmalloc(sizeof(*error), GFP_ATOMIC);
349 	if (!error) {
350 		DRM_DEBUG("out ot memory, not capturing error state\n");
351 		goto out;
352 	}
353 
354 	error->eir = I915_READ(EIR);
355 	error->pgtbl_er = I915_READ(PGTBL_ER);
356 	error->pipeastat = I915_READ(PIPEASTAT);
357 	error->pipebstat = I915_READ(PIPEBSTAT);
358 	error->instpm = I915_READ(INSTPM);
359 	if (!IS_I965G(dev)) {
360 		error->ipeir = I915_READ(IPEIR);
361 		error->ipehr = I915_READ(IPEHR);
362 		error->instdone = I915_READ(INSTDONE);
363 		error->acthd = I915_READ(ACTHD);
364 	} else {
365 		error->ipeir = I915_READ(IPEIR_I965);
366 		error->ipehr = I915_READ(IPEHR_I965);
367 		error->instdone = I915_READ(INSTDONE_I965);
368 		error->instps = I915_READ(INSTPS);
369 		error->instdone1 = I915_READ(INSTDONE1);
370 		error->acthd = I915_READ(ACTHD_I965);
371 	}
372 
373 	do_gettimeofday(&error->time);
374 
375 	dev_priv->first_error = error;
376 
377 out:
378 	spin_unlock_irqrestore(&dev_priv->error_lock, flags);
379 }
380 
381 /**
382  * i915_handle_error - handle an error interrupt
383  * @dev: drm device
384  *
385  * Do some basic checking of regsiter state at error interrupt time and
386  * dump it to the syslog.  Also call i915_capture_error_state() to make
387  * sure we get a record and make it available in debugfs.  Fire a uevent
388  * so userspace knows something bad happened (should trigger collection
389  * of a ring dump etc.).
390  */
391 static void i915_handle_error(struct drm_device *dev, bool wedged)
392 {
393 	struct drm_i915_private *dev_priv = dev->dev_private;
394 	u32 eir = I915_READ(EIR);
395 	u32 pipea_stats = I915_READ(PIPEASTAT);
396 	u32 pipeb_stats = I915_READ(PIPEBSTAT);
397 
398 	i915_capture_error_state(dev);
399 
400 	printk(KERN_ERR "render error detected, EIR: 0x%08x\n",
401 	       eir);
402 
403 	if (IS_G4X(dev)) {
404 		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
405 			u32 ipeir = I915_READ(IPEIR_I965);
406 
407 			printk(KERN_ERR "  IPEIR: 0x%08x\n",
408 			       I915_READ(IPEIR_I965));
409 			printk(KERN_ERR "  IPEHR: 0x%08x\n",
410 			       I915_READ(IPEHR_I965));
411 			printk(KERN_ERR "  INSTDONE: 0x%08x\n",
412 			       I915_READ(INSTDONE_I965));
413 			printk(KERN_ERR "  INSTPS: 0x%08x\n",
414 			       I915_READ(INSTPS));
415 			printk(KERN_ERR "  INSTDONE1: 0x%08x\n",
416 			       I915_READ(INSTDONE1));
417 			printk(KERN_ERR "  ACTHD: 0x%08x\n",
418 			       I915_READ(ACTHD_I965));
419 			I915_WRITE(IPEIR_I965, ipeir);
420 			(void)I915_READ(IPEIR_I965);
421 		}
422 		if (eir & GM45_ERROR_PAGE_TABLE) {
423 			u32 pgtbl_err = I915_READ(PGTBL_ER);
424 			printk(KERN_ERR "page table error\n");
425 			printk(KERN_ERR "  PGTBL_ER: 0x%08x\n",
426 			       pgtbl_err);
427 			I915_WRITE(PGTBL_ER, pgtbl_err);
428 			(void)I915_READ(PGTBL_ER);
429 		}
430 	}
431 
432 	if (IS_I9XX(dev)) {
433 		if (eir & I915_ERROR_PAGE_TABLE) {
434 			u32 pgtbl_err = I915_READ(PGTBL_ER);
435 			printk(KERN_ERR "page table error\n");
436 			printk(KERN_ERR "  PGTBL_ER: 0x%08x\n",
437 			       pgtbl_err);
438 			I915_WRITE(PGTBL_ER, pgtbl_err);
439 			(void)I915_READ(PGTBL_ER);
440 		}
441 	}
442 
443 	if (eir & I915_ERROR_MEMORY_REFRESH) {
444 		printk(KERN_ERR "memory refresh error\n");
445 		printk(KERN_ERR "PIPEASTAT: 0x%08x\n",
446 		       pipea_stats);
447 		printk(KERN_ERR "PIPEBSTAT: 0x%08x\n",
448 		       pipeb_stats);
449 		/* pipestat has already been acked */
450 	}
451 	if (eir & I915_ERROR_INSTRUCTION) {
452 		printk(KERN_ERR "instruction error\n");
453 		printk(KERN_ERR "  INSTPM: 0x%08x\n",
454 		       I915_READ(INSTPM));
455 		if (!IS_I965G(dev)) {
456 			u32 ipeir = I915_READ(IPEIR);
457 
458 			printk(KERN_ERR "  IPEIR: 0x%08x\n",
459 			       I915_READ(IPEIR));
460 			printk(KERN_ERR "  IPEHR: 0x%08x\n",
461 			       I915_READ(IPEHR));
462 			printk(KERN_ERR "  INSTDONE: 0x%08x\n",
463 			       I915_READ(INSTDONE));
464 			printk(KERN_ERR "  ACTHD: 0x%08x\n",
465 			       I915_READ(ACTHD));
466 			I915_WRITE(IPEIR, ipeir);
467 			(void)I915_READ(IPEIR);
468 		} else {
469 			u32 ipeir = I915_READ(IPEIR_I965);
470 
471 			printk(KERN_ERR "  IPEIR: 0x%08x\n",
472 			       I915_READ(IPEIR_I965));
473 			printk(KERN_ERR "  IPEHR: 0x%08x\n",
474 			       I915_READ(IPEHR_I965));
475 			printk(KERN_ERR "  INSTDONE: 0x%08x\n",
476 			       I915_READ(INSTDONE_I965));
477 			printk(KERN_ERR "  INSTPS: 0x%08x\n",
478 			       I915_READ(INSTPS));
479 			printk(KERN_ERR "  INSTDONE1: 0x%08x\n",
480 			       I915_READ(INSTDONE1));
481 			printk(KERN_ERR "  ACTHD: 0x%08x\n",
482 			       I915_READ(ACTHD_I965));
483 			I915_WRITE(IPEIR_I965, ipeir);
484 			(void)I915_READ(IPEIR_I965);
485 		}
486 	}
487 
488 	I915_WRITE(EIR, eir);
489 	(void)I915_READ(EIR);
490 	eir = I915_READ(EIR);
491 	if (eir) {
492 		/*
493 		 * some errors might have become stuck,
494 		 * mask them.
495 		 */
496 		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
497 		I915_WRITE(EMR, I915_READ(EMR) | eir);
498 		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
499 	}
500 
501 	if (wedged) {
502 		atomic_set(&dev_priv->mm.wedged, 1);
503 
504 		/*
505 		 * Wakeup waiting processes so they don't hang
506 		 */
507 		printk("i915: Waking up sleeping processes\n");
508 		DRM_WAKEUP(&dev_priv->irq_queue);
509 	}
510 
511 	queue_work(dev_priv->wq, &dev_priv->error_work);
512 }
513 
514 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
515 {
516 	struct drm_device *dev = (struct drm_device *) arg;
517 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
518 	struct drm_i915_master_private *master_priv;
519 	u32 iir, new_iir;
520 	u32 pipea_stats, pipeb_stats;
521 	u32 vblank_status;
522 	u32 vblank_enable;
523 	int vblank = 0;
524 	unsigned long irqflags;
525 	int irq_received;
526 	int ret = IRQ_NONE;
527 
528 	atomic_inc(&dev_priv->irq_received);
529 
530 	if (IS_IGDNG(dev))
531 		return igdng_irq_handler(dev);
532 
533 	iir = I915_READ(IIR);
534 
535 	if (IS_I965G(dev)) {
536 		vblank_status = I915_START_VBLANK_INTERRUPT_STATUS;
537 		vblank_enable = PIPE_START_VBLANK_INTERRUPT_ENABLE;
538 	} else {
539 		vblank_status = I915_VBLANK_INTERRUPT_STATUS;
540 		vblank_enable = I915_VBLANK_INTERRUPT_ENABLE;
541 	}
542 
543 	for (;;) {
544 		irq_received = iir != 0;
545 
546 		/* Can't rely on pipestat interrupt bit in iir as it might
547 		 * have been cleared after the pipestat interrupt was received.
548 		 * It doesn't set the bit in iir again, but it still produces
549 		 * interrupts (for non-MSI).
550 		 */
551 		spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
552 		pipea_stats = I915_READ(PIPEASTAT);
553 		pipeb_stats = I915_READ(PIPEBSTAT);
554 
555 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
556 			i915_handle_error(dev, false);
557 
558 		/*
559 		 * Clear the PIPE(A|B)STAT regs before the IIR
560 		 */
561 		if (pipea_stats & 0x8000ffff) {
562 			if (pipea_stats &  PIPE_FIFO_UNDERRUN_STATUS)
563 				DRM_DEBUG("pipe a underrun\n");
564 			I915_WRITE(PIPEASTAT, pipea_stats);
565 			irq_received = 1;
566 		}
567 
568 		if (pipeb_stats & 0x8000ffff) {
569 			if (pipeb_stats &  PIPE_FIFO_UNDERRUN_STATUS)
570 				DRM_DEBUG("pipe b underrun\n");
571 			I915_WRITE(PIPEBSTAT, pipeb_stats);
572 			irq_received = 1;
573 		}
574 		spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
575 
576 		if (!irq_received)
577 			break;
578 
579 		ret = IRQ_HANDLED;
580 
581 		/* Consume port.  Then clear IIR or we'll miss events */
582 		if ((I915_HAS_HOTPLUG(dev)) &&
583 		    (iir & I915_DISPLAY_PORT_INTERRUPT)) {
584 			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
585 
586 			DRM_DEBUG("hotplug event received, stat 0x%08x\n",
587 				  hotplug_status);
588 			if (hotplug_status & dev_priv->hotplug_supported_mask)
589 				queue_work(dev_priv->wq,
590 					   &dev_priv->hotplug_work);
591 
592 			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
593 			I915_READ(PORT_HOTPLUG_STAT);
594 
595 			/* EOS interrupts occurs */
596 			if (IS_IGD(dev) &&
597 				(hotplug_status & CRT_EOS_INT_STATUS)) {
598 				u32 temp;
599 
600 				DRM_DEBUG("EOS interrupt occurs\n");
601 				/* status is already cleared */
602 				temp = I915_READ(ADPA);
603 				temp &= ~ADPA_DAC_ENABLE;
604 				I915_WRITE(ADPA, temp);
605 
606 				temp = I915_READ(PORT_HOTPLUG_EN);
607 				temp &= ~CRT_EOS_INT_EN;
608 				I915_WRITE(PORT_HOTPLUG_EN, temp);
609 
610 				temp = I915_READ(PORT_HOTPLUG_STAT);
611 				if (temp & CRT_EOS_INT_STATUS)
612 					I915_WRITE(PORT_HOTPLUG_STAT,
613 						CRT_EOS_INT_STATUS);
614 			}
615 		}
616 
617 		I915_WRITE(IIR, iir);
618 		new_iir = I915_READ(IIR); /* Flush posted writes */
619 
620 		if (dev->primary->master) {
621 			master_priv = dev->primary->master->driver_priv;
622 			if (master_priv->sarea_priv)
623 				master_priv->sarea_priv->last_dispatch =
624 					READ_BREADCRUMB(dev_priv);
625 		}
626 
627 		if (iir & I915_USER_INTERRUPT) {
628 			u32 seqno = i915_get_gem_seqno(dev);
629 			dev_priv->mm.irq_gem_seqno = seqno;
630 			trace_i915_gem_request_complete(dev, seqno);
631 			DRM_WAKEUP(&dev_priv->irq_queue);
632 			dev_priv->hangcheck_count = 0;
633 			mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
634 		}
635 
636 		if (pipea_stats & vblank_status) {
637 			vblank++;
638 			drm_handle_vblank(dev, 0);
639 		}
640 
641 		if (pipeb_stats & vblank_status) {
642 			vblank++;
643 			drm_handle_vblank(dev, 1);
644 		}
645 
646 		if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
647 		    (iir & I915_ASLE_INTERRUPT))
648 			opregion_asle_intr(dev);
649 
650 		/* With MSI, interrupts are only generated when iir
651 		 * transitions from zero to nonzero.  If another bit got
652 		 * set while we were handling the existing iir bits, then
653 		 * we would never get another interrupt.
654 		 *
655 		 * This is fine on non-MSI as well, as if we hit this path
656 		 * we avoid exiting the interrupt handler only to generate
657 		 * another one.
658 		 *
659 		 * Note that for MSI this could cause a stray interrupt report
660 		 * if an interrupt landed in the time between writing IIR and
661 		 * the posting read.  This should be rare enough to never
662 		 * trigger the 99% of 100,000 interrupts test for disabling
663 		 * stray interrupts.
664 		 */
665 		iir = new_iir;
666 	}
667 
668 	return ret;
669 }
670 
671 static int i915_emit_irq(struct drm_device * dev)
672 {
673 	drm_i915_private_t *dev_priv = dev->dev_private;
674 	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
675 	RING_LOCALS;
676 
677 	i915_kernel_lost_context(dev);
678 
679 	DRM_DEBUG("\n");
680 
681 	dev_priv->counter++;
682 	if (dev_priv->counter > 0x7FFFFFFFUL)
683 		dev_priv->counter = 1;
684 	if (master_priv->sarea_priv)
685 		master_priv->sarea_priv->last_enqueue = dev_priv->counter;
686 
687 	BEGIN_LP_RING(4);
688 	OUT_RING(MI_STORE_DWORD_INDEX);
689 	OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
690 	OUT_RING(dev_priv->counter);
691 	OUT_RING(MI_USER_INTERRUPT);
692 	ADVANCE_LP_RING();
693 
694 	return dev_priv->counter;
695 }
696 
697 void i915_user_irq_get(struct drm_device *dev)
698 {
699 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
700 	unsigned long irqflags;
701 
702 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
703 	if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
704 		if (IS_IGDNG(dev))
705 			igdng_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
706 		else
707 			i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
708 	}
709 	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
710 }
711 
712 void i915_user_irq_put(struct drm_device *dev)
713 {
714 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
715 	unsigned long irqflags;
716 
717 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
718 	BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
719 	if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
720 		if (IS_IGDNG(dev))
721 			igdng_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
722 		else
723 			i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
724 	}
725 	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
726 }
727 
728 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
729 {
730 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
731 	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
732 	int ret = 0;
733 
734 	DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
735 		  READ_BREADCRUMB(dev_priv));
736 
737 	if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
738 		if (master_priv->sarea_priv)
739 			master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
740 		return 0;
741 	}
742 
743 	if (master_priv->sarea_priv)
744 		master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
745 
746 	i915_user_irq_get(dev);
747 	DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
748 		    READ_BREADCRUMB(dev_priv) >= irq_nr);
749 	i915_user_irq_put(dev);
750 
751 	if (ret == -EBUSY) {
752 		DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
753 			  READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
754 	}
755 
756 	return ret;
757 }
758 
759 /* Needs the lock as it touches the ring.
760  */
761 int i915_irq_emit(struct drm_device *dev, void *data,
762 			 struct drm_file *file_priv)
763 {
764 	drm_i915_private_t *dev_priv = dev->dev_private;
765 	drm_i915_irq_emit_t *emit = data;
766 	int result;
767 
768 	if (!dev_priv || !dev_priv->ring.virtual_start) {
769 		DRM_ERROR("called with no initialization\n");
770 		return -EINVAL;
771 	}
772 
773 	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
774 
775 	mutex_lock(&dev->struct_mutex);
776 	result = i915_emit_irq(dev);
777 	mutex_unlock(&dev->struct_mutex);
778 
779 	if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
780 		DRM_ERROR("copy_to_user\n");
781 		return -EFAULT;
782 	}
783 
784 	return 0;
785 }
786 
787 /* Doesn't need the hardware lock.
788  */
789 int i915_irq_wait(struct drm_device *dev, void *data,
790 			 struct drm_file *file_priv)
791 {
792 	drm_i915_private_t *dev_priv = dev->dev_private;
793 	drm_i915_irq_wait_t *irqwait = data;
794 
795 	if (!dev_priv) {
796 		DRM_ERROR("called with no initialization\n");
797 		return -EINVAL;
798 	}
799 
800 	return i915_wait_irq(dev, irqwait->irq_seq);
801 }
802 
803 /* Called from drm generic code, passed 'crtc' which
804  * we use as a pipe index
805  */
806 int i915_enable_vblank(struct drm_device *dev, int pipe)
807 {
808 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
809 	unsigned long irqflags;
810 	int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
811 	u32 pipeconf;
812 
813 	pipeconf = I915_READ(pipeconf_reg);
814 	if (!(pipeconf & PIPEACONF_ENABLE))
815 		return -EINVAL;
816 
817 	if (IS_IGDNG(dev))
818 		return 0;
819 
820 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
821 	if (IS_I965G(dev))
822 		i915_enable_pipestat(dev_priv, pipe,
823 				     PIPE_START_VBLANK_INTERRUPT_ENABLE);
824 	else
825 		i915_enable_pipestat(dev_priv, pipe,
826 				     PIPE_VBLANK_INTERRUPT_ENABLE);
827 	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
828 	return 0;
829 }
830 
831 /* Called from drm generic code, passed 'crtc' which
832  * we use as a pipe index
833  */
834 void i915_disable_vblank(struct drm_device *dev, int pipe)
835 {
836 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
837 	unsigned long irqflags;
838 
839 	if (IS_IGDNG(dev))
840 		return;
841 
842 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
843 	i915_disable_pipestat(dev_priv, pipe,
844 			      PIPE_VBLANK_INTERRUPT_ENABLE |
845 			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
846 	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
847 }
848 
849 void i915_enable_interrupt (struct drm_device *dev)
850 {
851 	struct drm_i915_private *dev_priv = dev->dev_private;
852 
853 	if (!IS_IGDNG(dev))
854 		opregion_enable_asle(dev);
855 	dev_priv->irq_enabled = 1;
856 }
857 
858 
859 /* Set the vblank monitor pipe
860  */
861 int i915_vblank_pipe_set(struct drm_device *dev, void *data,
862 			 struct drm_file *file_priv)
863 {
864 	drm_i915_private_t *dev_priv = dev->dev_private;
865 
866 	if (!dev_priv) {
867 		DRM_ERROR("called with no initialization\n");
868 		return -EINVAL;
869 	}
870 
871 	return 0;
872 }
873 
874 int i915_vblank_pipe_get(struct drm_device *dev, void *data,
875 			 struct drm_file *file_priv)
876 {
877 	drm_i915_private_t *dev_priv = dev->dev_private;
878 	drm_i915_vblank_pipe_t *pipe = data;
879 
880 	if (!dev_priv) {
881 		DRM_ERROR("called with no initialization\n");
882 		return -EINVAL;
883 	}
884 
885 	pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
886 
887 	return 0;
888 }
889 
890 /**
891  * Schedule buffer swap at given vertical blank.
892  */
893 int i915_vblank_swap(struct drm_device *dev, void *data,
894 		     struct drm_file *file_priv)
895 {
896 	/* The delayed swap mechanism was fundamentally racy, and has been
897 	 * removed.  The model was that the client requested a delayed flip/swap
898 	 * from the kernel, then waited for vblank before continuing to perform
899 	 * rendering.  The problem was that the kernel might wake the client
900 	 * up before it dispatched the vblank swap (since the lock has to be
901 	 * held while touching the ringbuffer), in which case the client would
902 	 * clear and start the next frame before the swap occurred, and
903 	 * flicker would occur in addition to likely missing the vblank.
904 	 *
905 	 * In the absence of this ioctl, userland falls back to a correct path
906 	 * of waiting for a vblank, then dispatching the swap on its own.
907 	 * Context switching to userland and back is plenty fast enough for
908 	 * meeting the requirements of vblank swapping.
909 	 */
910 	return -EINVAL;
911 }
912 
913 struct drm_i915_gem_request *i915_get_tail_request(struct drm_device *dev) {
914 	drm_i915_private_t *dev_priv = dev->dev_private;
915 	return list_entry(dev_priv->mm.request_list.prev, struct drm_i915_gem_request, list);
916 }
917 
918 /**
919  * This is called when the chip hasn't reported back with completed
920  * batchbuffers in a long time. The first time this is called we simply record
921  * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
922  * again, we assume the chip is wedged and try to fix it.
923  */
924 void i915_hangcheck_elapsed(unsigned long data)
925 {
926 	struct drm_device *dev = (struct drm_device *)data;
927 	drm_i915_private_t *dev_priv = dev->dev_private;
928 	uint32_t acthd;
929 
930 	if (!IS_I965G(dev))
931 		acthd = I915_READ(ACTHD);
932 	else
933 		acthd = I915_READ(ACTHD_I965);
934 
935 	/* If all work is done then ACTHD clearly hasn't advanced. */
936 	if (list_empty(&dev_priv->mm.request_list) ||
937 		       i915_seqno_passed(i915_get_gem_seqno(dev), i915_get_tail_request(dev)->seqno)) {
938 		dev_priv->hangcheck_count = 0;
939 		return;
940 	}
941 
942 	if (dev_priv->last_acthd == acthd && dev_priv->hangcheck_count > 0) {
943 		DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
944 		i915_handle_error(dev, true);
945 		return;
946 	}
947 
948 	/* Reset timer case chip hangs without another request being added */
949 	mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
950 
951 	if (acthd != dev_priv->last_acthd)
952 		dev_priv->hangcheck_count = 0;
953 	else
954 		dev_priv->hangcheck_count++;
955 
956 	dev_priv->last_acthd = acthd;
957 }
958 
959 /* drm_dma.h hooks
960 */
961 static void igdng_irq_preinstall(struct drm_device *dev)
962 {
963 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
964 
965 	I915_WRITE(HWSTAM, 0xeffe);
966 
967 	/* XXX hotplug from PCH */
968 
969 	I915_WRITE(DEIMR, 0xffffffff);
970 	I915_WRITE(DEIER, 0x0);
971 	(void) I915_READ(DEIER);
972 
973 	/* and GT */
974 	I915_WRITE(GTIMR, 0xffffffff);
975 	I915_WRITE(GTIER, 0x0);
976 	(void) I915_READ(GTIER);
977 }
978 
979 static int igdng_irq_postinstall(struct drm_device *dev)
980 {
981 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
982 	/* enable kind of interrupts always enabled */
983 	u32 display_mask = DE_MASTER_IRQ_CONTROL /*| DE_PCH_EVENT */;
984 	u32 render_mask = GT_USER_INTERRUPT;
985 
986 	dev_priv->irq_mask_reg = ~display_mask;
987 	dev_priv->de_irq_enable_reg = display_mask;
988 
989 	/* should always can generate irq */
990 	I915_WRITE(DEIIR, I915_READ(DEIIR));
991 	I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
992 	I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
993 	(void) I915_READ(DEIER);
994 
995 	/* user interrupt should be enabled, but masked initial */
996 	dev_priv->gt_irq_mask_reg = 0xffffffff;
997 	dev_priv->gt_irq_enable_reg = render_mask;
998 
999 	I915_WRITE(GTIIR, I915_READ(GTIIR));
1000 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
1001 	I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
1002 	(void) I915_READ(GTIER);
1003 
1004 	return 0;
1005 }
1006 
1007 void i915_driver_irq_preinstall(struct drm_device * dev)
1008 {
1009 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1010 
1011 	atomic_set(&dev_priv->irq_received, 0);
1012 
1013 	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
1014 	INIT_WORK(&dev_priv->error_work, i915_error_work_func);
1015 
1016 	if (IS_IGDNG(dev)) {
1017 		igdng_irq_preinstall(dev);
1018 		return;
1019 	}
1020 
1021 	if (I915_HAS_HOTPLUG(dev)) {
1022 		I915_WRITE(PORT_HOTPLUG_EN, 0);
1023 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1024 	}
1025 
1026 	I915_WRITE(HWSTAM, 0xeffe);
1027 	I915_WRITE(PIPEASTAT, 0);
1028 	I915_WRITE(PIPEBSTAT, 0);
1029 	I915_WRITE(IMR, 0xffffffff);
1030 	I915_WRITE(IER, 0x0);
1031 	(void) I915_READ(IER);
1032 }
1033 
1034 int i915_driver_irq_postinstall(struct drm_device *dev)
1035 {
1036 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1037 	u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
1038 	u32 error_mask;
1039 
1040 	DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
1041 
1042 	dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1043 
1044 	if (IS_IGDNG(dev))
1045 		return igdng_irq_postinstall(dev);
1046 
1047 	/* Unmask the interrupts that we always want on. */
1048 	dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
1049 
1050 	dev_priv->pipestat[0] = 0;
1051 	dev_priv->pipestat[1] = 0;
1052 
1053 	if (I915_HAS_HOTPLUG(dev)) {
1054 		u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1055 
1056 		/* Leave other bits alone */
1057 		hotplug_en |= HOTPLUG_EN_MASK;
1058 		I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
1059 
1060 		dev_priv->hotplug_supported_mask = CRT_HOTPLUG_INT_STATUS |
1061 			TV_HOTPLUG_INT_STATUS | SDVOC_HOTPLUG_INT_STATUS |
1062 			SDVOB_HOTPLUG_INT_STATUS;
1063 		if (IS_G4X(dev)) {
1064 			dev_priv->hotplug_supported_mask |=
1065 				HDMIB_HOTPLUG_INT_STATUS |
1066 				HDMIC_HOTPLUG_INT_STATUS |
1067 				HDMID_HOTPLUG_INT_STATUS;
1068 		}
1069 		/* Enable in IER... */
1070 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
1071 		/* and unmask in IMR */
1072 		i915_enable_irq(dev_priv, I915_DISPLAY_PORT_INTERRUPT);
1073 	}
1074 
1075 	/*
1076 	 * Enable some error detection, note the instruction error mask
1077 	 * bit is reserved, so we leave it masked.
1078 	 */
1079 	if (IS_G4X(dev)) {
1080 		error_mask = ~(GM45_ERROR_PAGE_TABLE |
1081 			       GM45_ERROR_MEM_PRIV |
1082 			       GM45_ERROR_CP_PRIV |
1083 			       I915_ERROR_MEMORY_REFRESH);
1084 	} else {
1085 		error_mask = ~(I915_ERROR_PAGE_TABLE |
1086 			       I915_ERROR_MEMORY_REFRESH);
1087 	}
1088 	I915_WRITE(EMR, error_mask);
1089 
1090 	/* Disable pipe interrupt enables, clear pending pipe status */
1091 	I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
1092 	I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
1093 	/* Clear pending interrupt status */
1094 	I915_WRITE(IIR, I915_READ(IIR));
1095 
1096 	I915_WRITE(IER, enable_mask);
1097 	I915_WRITE(IMR, dev_priv->irq_mask_reg);
1098 	(void) I915_READ(IER);
1099 
1100 	opregion_enable_asle(dev);
1101 
1102 	return 0;
1103 }
1104 
1105 static void igdng_irq_uninstall(struct drm_device *dev)
1106 {
1107 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1108 	I915_WRITE(HWSTAM, 0xffffffff);
1109 
1110 	I915_WRITE(DEIMR, 0xffffffff);
1111 	I915_WRITE(DEIER, 0x0);
1112 	I915_WRITE(DEIIR, I915_READ(DEIIR));
1113 
1114 	I915_WRITE(GTIMR, 0xffffffff);
1115 	I915_WRITE(GTIER, 0x0);
1116 	I915_WRITE(GTIIR, I915_READ(GTIIR));
1117 }
1118 
1119 void i915_driver_irq_uninstall(struct drm_device * dev)
1120 {
1121 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1122 
1123 	if (!dev_priv)
1124 		return;
1125 
1126 	dev_priv->vblank_pipe = 0;
1127 
1128 	if (IS_IGDNG(dev)) {
1129 		igdng_irq_uninstall(dev);
1130 		return;
1131 	}
1132 
1133 	if (I915_HAS_HOTPLUG(dev)) {
1134 		I915_WRITE(PORT_HOTPLUG_EN, 0);
1135 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1136 	}
1137 
1138 	I915_WRITE(HWSTAM, 0xffffffff);
1139 	I915_WRITE(PIPEASTAT, 0);
1140 	I915_WRITE(PIPEBSTAT, 0);
1141 	I915_WRITE(IMR, 0xffffffff);
1142 	I915_WRITE(IER, 0x0);
1143 
1144 	I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
1145 	I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
1146 	I915_WRITE(IIR, I915_READ(IIR));
1147 }
1148