1 /*
2  * Copyright © 2013 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "i915_drv.h"
25 #include "intel_drv.h"
26 
27 #define FORCEWAKE_ACK_TIMEOUT_MS 2
28 
29 #define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
30 #define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
31 
32 #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
33 #define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
34 
35 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
36 #define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
37 
38 #define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
39 #define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
40 
41 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
42 
43 static void
44 assert_device_not_suspended(struct drm_i915_private *dev_priv)
45 {
46 	WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
47 	     "Device suspended\n");
48 }
49 
50 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
51 {
52 	u32 gt_thread_status_mask;
53 
54 	if (IS_HASWELL(dev_priv->dev))
55 		gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
56 	else
57 		gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
58 
59 	/* w/a for a sporadic read returning 0 by waiting for the GT
60 	 * thread to wake up.
61 	 */
62 	if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
63 		DRM_ERROR("GT thread status wait timed out\n");
64 }
65 
66 static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
67 {
68 	__raw_i915_write32(dev_priv, FORCEWAKE, 0);
69 	/* something from same cacheline, but !FORCEWAKE */
70 	__raw_posting_read(dev_priv, ECOBUS);
71 }
72 
73 static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv,
74 							int fw_engine)
75 {
76 	if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
77 			    FORCEWAKE_ACK_TIMEOUT_MS))
78 		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
79 
80 	__raw_i915_write32(dev_priv, FORCEWAKE, 1);
81 	/* something from same cacheline, but !FORCEWAKE */
82 	__raw_posting_read(dev_priv, ECOBUS);
83 
84 	if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1),
85 			    FORCEWAKE_ACK_TIMEOUT_MS))
86 		DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
87 
88 	/* WaRsForcewakeWaitTC0:snb */
89 	__gen6_gt_wait_for_thread_c0(dev_priv);
90 }
91 
92 static void __gen7_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
93 {
94 	__raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
95 	/* something from same cacheline, but !FORCEWAKE_MT */
96 	__raw_posting_read(dev_priv, ECOBUS);
97 }
98 
99 static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
100 							int fw_engine)
101 {
102 	u32 forcewake_ack;
103 
104 	if (IS_HASWELL(dev_priv->dev) || IS_GEN8(dev_priv->dev))
105 		forcewake_ack = FORCEWAKE_ACK_HSW;
106 	else
107 		forcewake_ack = FORCEWAKE_MT_ACK;
108 
109 	if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0,
110 			    FORCEWAKE_ACK_TIMEOUT_MS))
111 		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
112 
113 	__raw_i915_write32(dev_priv, FORCEWAKE_MT,
114 			   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
115 	/* something from same cacheline, but !FORCEWAKE_MT */
116 	__raw_posting_read(dev_priv, ECOBUS);
117 
118 	if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL),
119 			    FORCEWAKE_ACK_TIMEOUT_MS))
120 		DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
121 
122 	/* WaRsForcewakeWaitTC0:ivb,hsw */
123 	if (INTEL_INFO(dev_priv->dev)->gen < 8)
124 		__gen6_gt_wait_for_thread_c0(dev_priv);
125 }
126 
127 static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
128 {
129 	u32 gtfifodbg;
130 
131 	gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
132 	if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
133 		__raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
134 }
135 
136 static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv,
137 							int fw_engine)
138 {
139 	__raw_i915_write32(dev_priv, FORCEWAKE, 0);
140 	/* something from same cacheline, but !FORCEWAKE */
141 	__raw_posting_read(dev_priv, ECOBUS);
142 	gen6_gt_check_fifodbg(dev_priv);
143 }
144 
145 static void __gen7_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
146 							int fw_engine)
147 {
148 	__raw_i915_write32(dev_priv, FORCEWAKE_MT,
149 			   _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
150 	/* something from same cacheline, but !FORCEWAKE_MT */
151 	__raw_posting_read(dev_priv, ECOBUS);
152 
153 	if (IS_GEN7(dev_priv->dev))
154 		gen6_gt_check_fifodbg(dev_priv);
155 }
156 
157 static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
158 {
159 	int ret = 0;
160 
161 	/* On VLV, FIFO will be shared by both SW and HW.
162 	 * So, we need to read the FREE_ENTRIES everytime */
163 	if (IS_VALLEYVIEW(dev_priv->dev))
164 		dev_priv->uncore.fifo_count =
165 			__raw_i915_read32(dev_priv, GTFIFOCTL) &
166 						GT_FIFO_FREE_ENTRIES_MASK;
167 
168 	if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
169 		int loop = 500;
170 		u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
171 		while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
172 			udelay(10);
173 			fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
174 		}
175 		if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
176 			++ret;
177 		dev_priv->uncore.fifo_count = fifo;
178 	}
179 	dev_priv->uncore.fifo_count--;
180 
181 	return ret;
182 }
183 
184 static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
185 {
186 	__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
187 			   _MASKED_BIT_DISABLE(0xffff));
188 	__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
189 			   _MASKED_BIT_DISABLE(0xffff));
190 	/* something from same cacheline, but !FORCEWAKE_VLV */
191 	__raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
192 }
193 
194 static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
195 						int fw_engine)
196 {
197 	/* Check for Render Engine */
198 	if (FORCEWAKE_RENDER & fw_engine) {
199 		if (wait_for_atomic((__raw_i915_read32(dev_priv,
200 						FORCEWAKE_ACK_VLV) &
201 						FORCEWAKE_KERNEL) == 0,
202 					FORCEWAKE_ACK_TIMEOUT_MS))
203 			DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
204 
205 		__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
206 				   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
207 
208 		if (wait_for_atomic((__raw_i915_read32(dev_priv,
209 						FORCEWAKE_ACK_VLV) &
210 						FORCEWAKE_KERNEL),
211 					FORCEWAKE_ACK_TIMEOUT_MS))
212 			DRM_ERROR("Timed out: waiting for Render to ack.\n");
213 	}
214 
215 	/* Check for Media Engine */
216 	if (FORCEWAKE_MEDIA & fw_engine) {
217 		if (wait_for_atomic((__raw_i915_read32(dev_priv,
218 						FORCEWAKE_ACK_MEDIA_VLV) &
219 						FORCEWAKE_KERNEL) == 0,
220 					FORCEWAKE_ACK_TIMEOUT_MS))
221 			DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
222 
223 		__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
224 				   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
225 
226 		if (wait_for_atomic((__raw_i915_read32(dev_priv,
227 						FORCEWAKE_ACK_MEDIA_VLV) &
228 						FORCEWAKE_KERNEL),
229 					FORCEWAKE_ACK_TIMEOUT_MS))
230 			DRM_ERROR("Timed out: waiting for media to ack.\n");
231 	}
232 
233 	/* WaRsForcewakeWaitTC0:vlv */
234 	__gen6_gt_wait_for_thread_c0(dev_priv);
235 
236 }
237 
238 static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
239 					int fw_engine)
240 {
241 
242 	/* Check for Render Engine */
243 	if (FORCEWAKE_RENDER & fw_engine)
244 		__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
245 					_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
246 
247 
248 	/* Check for Media Engine */
249 	if (FORCEWAKE_MEDIA & fw_engine)
250 		__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
251 				_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
252 
253 	/* The below doubles as a POSTING_READ */
254 	gen6_gt_check_fifodbg(dev_priv);
255 
256 }
257 
258 static void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
259 {
260 	unsigned long irqflags;
261 
262 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
263 
264 	if (fw_engine & FORCEWAKE_RENDER &&
265 	    dev_priv->uncore.fw_rendercount++ != 0)
266 		fw_engine &= ~FORCEWAKE_RENDER;
267 	if (fw_engine & FORCEWAKE_MEDIA &&
268 	    dev_priv->uncore.fw_mediacount++ != 0)
269 		fw_engine &= ~FORCEWAKE_MEDIA;
270 
271 	if (fw_engine)
272 		dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_engine);
273 
274 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
275 }
276 
277 static void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
278 {
279 	unsigned long irqflags;
280 
281 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
282 
283 	if (fw_engine & FORCEWAKE_RENDER) {
284 		WARN_ON(!dev_priv->uncore.fw_rendercount);
285 		if (--dev_priv->uncore.fw_rendercount != 0)
286 			fw_engine &= ~FORCEWAKE_RENDER;
287 	}
288 
289 	if (fw_engine & FORCEWAKE_MEDIA) {
290 		WARN_ON(!dev_priv->uncore.fw_mediacount);
291 		if (--dev_priv->uncore.fw_mediacount != 0)
292 			fw_engine &= ~FORCEWAKE_MEDIA;
293 	}
294 
295 	if (fw_engine)
296 		dev_priv->uncore.funcs.force_wake_put(dev_priv, fw_engine);
297 
298 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
299 }
300 
301 static void gen6_force_wake_timer(unsigned long arg)
302 {
303 	struct drm_i915_private *dev_priv = (void *)arg;
304 	unsigned long irqflags;
305 
306 	assert_device_not_suspended(dev_priv);
307 
308 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
309 	WARN_ON(!dev_priv->uncore.forcewake_count);
310 
311 	if (--dev_priv->uncore.forcewake_count == 0)
312 		dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
313 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
314 
315 	intel_runtime_pm_put(dev_priv);
316 }
317 
318 static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
319 {
320 	struct drm_i915_private *dev_priv = dev->dev_private;
321 	unsigned long irqflags;
322 
323 	if (del_timer_sync(&dev_priv->uncore.force_wake_timer))
324 		gen6_force_wake_timer((unsigned long)dev_priv);
325 
326 	/* Hold uncore.lock across reset to prevent any register access
327 	 * with forcewake not set correctly
328 	 */
329 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
330 
331 	if (IS_VALLEYVIEW(dev))
332 		vlv_force_wake_reset(dev_priv);
333 	else if (IS_GEN6(dev) || IS_GEN7(dev))
334 		__gen6_gt_force_wake_reset(dev_priv);
335 
336 	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_GEN8(dev))
337 		__gen7_gt_force_wake_mt_reset(dev_priv);
338 
339 	if (restore) { /* If reset with a user forcewake, try to restore */
340 		unsigned fw = 0;
341 
342 		if (IS_VALLEYVIEW(dev)) {
343 			if (dev_priv->uncore.fw_rendercount)
344 				fw |= FORCEWAKE_RENDER;
345 
346 			if (dev_priv->uncore.fw_mediacount)
347 				fw |= FORCEWAKE_MEDIA;
348 		} else {
349 			if (dev_priv->uncore.forcewake_count)
350 				fw = FORCEWAKE_ALL;
351 		}
352 
353 		if (fw)
354 			dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
355 
356 		if (IS_GEN6(dev) || IS_GEN7(dev))
357 			dev_priv->uncore.fifo_count =
358 				__raw_i915_read32(dev_priv, GTFIFOCTL) &
359 				GT_FIFO_FREE_ENTRIES_MASK;
360 	} else {
361 		dev_priv->uncore.forcewake_count = 0;
362 		dev_priv->uncore.fw_rendercount = 0;
363 		dev_priv->uncore.fw_mediacount = 0;
364 	}
365 
366 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
367 }
368 
369 void intel_uncore_early_sanitize(struct drm_device *dev)
370 {
371 	struct drm_i915_private *dev_priv = dev->dev_private;
372 
373 	if (HAS_FPGA_DBG_UNCLAIMED(dev))
374 		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
375 
376 	if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
377 	    (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
378 		/* The docs do not explain exactly how the calculation can be
379 		 * made. It is somewhat guessable, but for now, it's always
380 		 * 128MB.
381 		 * NB: We can't write IDICR yet because we do not have gt funcs
382 		 * set up */
383 		dev_priv->ellc_size = 128;
384 		DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
385 	}
386 
387 	/* clear out old GT FIFO errors */
388 	if (IS_GEN6(dev) || IS_GEN7(dev))
389 		__raw_i915_write32(dev_priv, GTFIFODBG,
390 				   __raw_i915_read32(dev_priv, GTFIFODBG));
391 
392 	intel_uncore_forcewake_reset(dev, false);
393 }
394 
395 void intel_uncore_sanitize(struct drm_device *dev)
396 {
397 	/* BIOS often leaves RC6 enabled, but disable it for hw init */
398 	intel_disable_gt_powersave(dev);
399 }
400 
401 /*
402  * Generally this is called implicitly by the register read function. However,
403  * if some sequence requires the GT to not power down then this function should
404  * be called at the beginning of the sequence followed by a call to
405  * gen6_gt_force_wake_put() at the end of the sequence.
406  */
407 void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
408 {
409 	unsigned long irqflags;
410 
411 	if (!dev_priv->uncore.funcs.force_wake_get)
412 		return;
413 
414 	intel_runtime_pm_get(dev_priv);
415 
416 	/* Redirect to VLV specific routine */
417 	if (IS_VALLEYVIEW(dev_priv->dev))
418 		return vlv_force_wake_get(dev_priv, fw_engine);
419 
420 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
421 	if (dev_priv->uncore.forcewake_count++ == 0)
422 		dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
423 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
424 }
425 
426 /*
427  * see gen6_gt_force_wake_get()
428  */
429 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
430 {
431 	unsigned long irqflags;
432 	bool delayed = false;
433 
434 	if (!dev_priv->uncore.funcs.force_wake_put)
435 		return;
436 
437 	/* Redirect to VLV specific routine */
438 	if (IS_VALLEYVIEW(dev_priv->dev)) {
439 		vlv_force_wake_put(dev_priv, fw_engine);
440 		goto out;
441 	}
442 
443 
444 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
445 	WARN_ON(!dev_priv->uncore.forcewake_count);
446 
447 	if (--dev_priv->uncore.forcewake_count == 0) {
448 		dev_priv->uncore.forcewake_count++;
449 		delayed = true;
450 		mod_timer_pinned(&dev_priv->uncore.force_wake_timer,
451 				 jiffies + 1);
452 	}
453 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
454 
455 out:
456 	if (!delayed)
457 		intel_runtime_pm_put(dev_priv);
458 }
459 
460 void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
461 {
462 	if (!dev_priv->uncore.funcs.force_wake_get)
463 		return;
464 
465 	WARN_ON(dev_priv->uncore.forcewake_count > 0);
466 }
467 
468 /* We give fast paths for the really cool registers */
469 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
470 	 ((reg) < 0x40000 && (reg) != FORCEWAKE)
471 
472 #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
473 	(((reg) >= 0x2000 && (reg) < 0x4000) ||\
474 	((reg) >= 0x5000 && (reg) < 0x8000) ||\
475 	((reg) >= 0xB000 && (reg) < 0x12000) ||\
476 	((reg) >= 0x2E000 && (reg) < 0x30000))
477 
478 #define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\
479 	(((reg) >= 0x12000 && (reg) < 0x14000) ||\
480 	((reg) >= 0x22000 && (reg) < 0x24000) ||\
481 	((reg) >= 0x30000 && (reg) < 0x40000))
482 
483 static void
484 ilk_dummy_write(struct drm_i915_private *dev_priv)
485 {
486 	/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
487 	 * the chip from rc6 before touching it for real. MI_MODE is masked,
488 	 * hence harmless to write 0 into. */
489 	__raw_i915_write32(dev_priv, MI_MODE, 0);
490 }
491 
492 static void
493 hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
494 {
495 	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
496 		DRM_ERROR("Unknown unclaimed register before writing to %x\n",
497 			  reg);
498 		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
499 	}
500 }
501 
502 static void
503 hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
504 {
505 	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
506 		DRM_ERROR("Unclaimed write to %x\n", reg);
507 		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
508 	}
509 }
510 
511 #define REG_READ_HEADER(x) \
512 	unsigned long irqflags; \
513 	u##x val = 0; \
514 	assert_device_not_suspended(dev_priv); \
515 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
516 
517 #define REG_READ_FOOTER \
518 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
519 	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
520 	return val
521 
522 #define __gen4_read(x) \
523 static u##x \
524 gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
525 	REG_READ_HEADER(x); \
526 	val = __raw_i915_read##x(dev_priv, reg); \
527 	REG_READ_FOOTER; \
528 }
529 
530 #define __gen5_read(x) \
531 static u##x \
532 gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
533 	REG_READ_HEADER(x); \
534 	ilk_dummy_write(dev_priv); \
535 	val = __raw_i915_read##x(dev_priv, reg); \
536 	REG_READ_FOOTER; \
537 }
538 
539 #define __gen6_read(x) \
540 static u##x \
541 gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
542 	REG_READ_HEADER(x); \
543 	if (dev_priv->uncore.forcewake_count == 0 && \
544 	    NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
545 		dev_priv->uncore.funcs.force_wake_get(dev_priv, \
546 						      FORCEWAKE_ALL); \
547 		val = __raw_i915_read##x(dev_priv, reg); \
548 		dev_priv->uncore.funcs.force_wake_put(dev_priv, \
549 						      FORCEWAKE_ALL); \
550 	} else { \
551 		val = __raw_i915_read##x(dev_priv, reg); \
552 	} \
553 	REG_READ_FOOTER; \
554 }
555 
556 #define __vlv_read(x) \
557 static u##x \
558 vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
559 	unsigned fwengine = 0; \
560 	REG_READ_HEADER(x); \
561 	if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \
562 		if (dev_priv->uncore.fw_rendercount == 0) \
563 			fwengine = FORCEWAKE_RENDER; \
564 	} else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \
565 		if (dev_priv->uncore.fw_mediacount == 0) \
566 			fwengine = FORCEWAKE_MEDIA; \
567 	}  \
568 	if (fwengine) \
569 		dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
570 	val = __raw_i915_read##x(dev_priv, reg); \
571 	if (fwengine) \
572 		dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
573 	REG_READ_FOOTER; \
574 }
575 
576 
577 __vlv_read(8)
578 __vlv_read(16)
579 __vlv_read(32)
580 __vlv_read(64)
581 __gen6_read(8)
582 __gen6_read(16)
583 __gen6_read(32)
584 __gen6_read(64)
585 __gen5_read(8)
586 __gen5_read(16)
587 __gen5_read(32)
588 __gen5_read(64)
589 __gen4_read(8)
590 __gen4_read(16)
591 __gen4_read(32)
592 __gen4_read(64)
593 
594 #undef __vlv_read
595 #undef __gen6_read
596 #undef __gen5_read
597 #undef __gen4_read
598 #undef REG_READ_FOOTER
599 #undef REG_READ_HEADER
600 
601 #define REG_WRITE_HEADER \
602 	unsigned long irqflags; \
603 	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
604 	assert_device_not_suspended(dev_priv); \
605 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
606 
607 #define REG_WRITE_FOOTER \
608 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
609 
610 #define __gen4_write(x) \
611 static void \
612 gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
613 	REG_WRITE_HEADER; \
614 	__raw_i915_write##x(dev_priv, reg, val); \
615 	REG_WRITE_FOOTER; \
616 }
617 
618 #define __gen5_write(x) \
619 static void \
620 gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
621 	REG_WRITE_HEADER; \
622 	ilk_dummy_write(dev_priv); \
623 	__raw_i915_write##x(dev_priv, reg, val); \
624 	REG_WRITE_FOOTER; \
625 }
626 
627 #define __gen6_write(x) \
628 static void \
629 gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
630 	u32 __fifo_ret = 0; \
631 	REG_WRITE_HEADER; \
632 	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
633 		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
634 	} \
635 	__raw_i915_write##x(dev_priv, reg, val); \
636 	if (unlikely(__fifo_ret)) { \
637 		gen6_gt_check_fifodbg(dev_priv); \
638 	} \
639 	REG_WRITE_FOOTER; \
640 }
641 
642 #define __hsw_write(x) \
643 static void \
644 hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
645 	u32 __fifo_ret = 0; \
646 	REG_WRITE_HEADER; \
647 	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
648 		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
649 	} \
650 	hsw_unclaimed_reg_clear(dev_priv, reg); \
651 	__raw_i915_write##x(dev_priv, reg, val); \
652 	if (unlikely(__fifo_ret)) { \
653 		gen6_gt_check_fifodbg(dev_priv); \
654 	} \
655 	hsw_unclaimed_reg_check(dev_priv, reg); \
656 	REG_WRITE_FOOTER; \
657 }
658 
659 static const u32 gen8_shadowed_regs[] = {
660 	FORCEWAKE_MT,
661 	GEN6_RPNSWREQ,
662 	GEN6_RC_VIDEO_FREQ,
663 	RING_TAIL(RENDER_RING_BASE),
664 	RING_TAIL(GEN6_BSD_RING_BASE),
665 	RING_TAIL(VEBOX_RING_BASE),
666 	RING_TAIL(BLT_RING_BASE),
667 	/* TODO: Other registers are not yet used */
668 };
669 
670 static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
671 {
672 	int i;
673 	for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
674 		if (reg == gen8_shadowed_regs[i])
675 			return true;
676 
677 	return false;
678 }
679 
680 #define __gen8_write(x) \
681 static void \
682 gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
683 	REG_WRITE_HEADER; \
684 	if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \
685 		if (dev_priv->uncore.forcewake_count == 0) \
686 			dev_priv->uncore.funcs.force_wake_get(dev_priv,	\
687 							      FORCEWAKE_ALL); \
688 		__raw_i915_write##x(dev_priv, reg, val); \
689 		if (dev_priv->uncore.forcewake_count == 0) \
690 			dev_priv->uncore.funcs.force_wake_put(dev_priv, \
691 							      FORCEWAKE_ALL); \
692 	} else { \
693 		__raw_i915_write##x(dev_priv, reg, val); \
694 	} \
695 	REG_WRITE_FOOTER; \
696 }
697 
698 __gen8_write(8)
699 __gen8_write(16)
700 __gen8_write(32)
701 __gen8_write(64)
702 __hsw_write(8)
703 __hsw_write(16)
704 __hsw_write(32)
705 __hsw_write(64)
706 __gen6_write(8)
707 __gen6_write(16)
708 __gen6_write(32)
709 __gen6_write(64)
710 __gen5_write(8)
711 __gen5_write(16)
712 __gen5_write(32)
713 __gen5_write(64)
714 __gen4_write(8)
715 __gen4_write(16)
716 __gen4_write(32)
717 __gen4_write(64)
718 
719 #undef __gen8_write
720 #undef __hsw_write
721 #undef __gen6_write
722 #undef __gen5_write
723 #undef __gen4_write
724 #undef REG_WRITE_FOOTER
725 #undef REG_WRITE_HEADER
726 
727 void intel_uncore_init(struct drm_device *dev)
728 {
729 	struct drm_i915_private *dev_priv = dev->dev_private;
730 
731 	setup_timer(&dev_priv->uncore.force_wake_timer,
732 		    gen6_force_wake_timer, (unsigned long)dev_priv);
733 
734 	intel_uncore_early_sanitize(dev);
735 
736 	if (IS_VALLEYVIEW(dev)) {
737 		dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
738 		dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
739 	} else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
740 		dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get;
741 		dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put;
742 	} else if (IS_IVYBRIDGE(dev)) {
743 		u32 ecobus;
744 
745 		/* IVB configs may use multi-threaded forcewake */
746 
747 		/* A small trick here - if the bios hasn't configured
748 		 * MT forcewake, and if the device is in RC6, then
749 		 * force_wake_mt_get will not wake the device and the
750 		 * ECOBUS read will return zero. Which will be
751 		 * (correctly) interpreted by the test below as MT
752 		 * forcewake being disabled.
753 		 */
754 		mutex_lock(&dev->struct_mutex);
755 		__gen7_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
756 		ecobus = __raw_i915_read32(dev_priv, ECOBUS);
757 		__gen7_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
758 		mutex_unlock(&dev->struct_mutex);
759 
760 		if (ecobus & FORCEWAKE_MT_ENABLE) {
761 			dev_priv->uncore.funcs.force_wake_get =
762 				__gen7_gt_force_wake_mt_get;
763 			dev_priv->uncore.funcs.force_wake_put =
764 				__gen7_gt_force_wake_mt_put;
765 		} else {
766 			DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
767 			DRM_INFO("when using vblank-synced partial screen updates.\n");
768 			dev_priv->uncore.funcs.force_wake_get =
769 				__gen6_gt_force_wake_get;
770 			dev_priv->uncore.funcs.force_wake_put =
771 				__gen6_gt_force_wake_put;
772 		}
773 	} else if (IS_GEN6(dev)) {
774 		dev_priv->uncore.funcs.force_wake_get =
775 			__gen6_gt_force_wake_get;
776 		dev_priv->uncore.funcs.force_wake_put =
777 			__gen6_gt_force_wake_put;
778 	}
779 
780 	switch (INTEL_INFO(dev)->gen) {
781 	default:
782 		dev_priv->uncore.funcs.mmio_writeb  = gen8_write8;
783 		dev_priv->uncore.funcs.mmio_writew  = gen8_write16;
784 		dev_priv->uncore.funcs.mmio_writel  = gen8_write32;
785 		dev_priv->uncore.funcs.mmio_writeq  = gen8_write64;
786 		dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
787 		dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
788 		dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
789 		dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
790 		break;
791 	case 7:
792 	case 6:
793 		if (IS_HASWELL(dev)) {
794 			dev_priv->uncore.funcs.mmio_writeb  = hsw_write8;
795 			dev_priv->uncore.funcs.mmio_writew  = hsw_write16;
796 			dev_priv->uncore.funcs.mmio_writel  = hsw_write32;
797 			dev_priv->uncore.funcs.mmio_writeq  = hsw_write64;
798 		} else {
799 			dev_priv->uncore.funcs.mmio_writeb  = gen6_write8;
800 			dev_priv->uncore.funcs.mmio_writew  = gen6_write16;
801 			dev_priv->uncore.funcs.mmio_writel  = gen6_write32;
802 			dev_priv->uncore.funcs.mmio_writeq  = gen6_write64;
803 		}
804 
805 		if (IS_VALLEYVIEW(dev)) {
806 			dev_priv->uncore.funcs.mmio_readb  = vlv_read8;
807 			dev_priv->uncore.funcs.mmio_readw  = vlv_read16;
808 			dev_priv->uncore.funcs.mmio_readl  = vlv_read32;
809 			dev_priv->uncore.funcs.mmio_readq  = vlv_read64;
810 		} else {
811 			dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
812 			dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
813 			dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
814 			dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
815 		}
816 		break;
817 	case 5:
818 		dev_priv->uncore.funcs.mmio_writeb  = gen5_write8;
819 		dev_priv->uncore.funcs.mmio_writew  = gen5_write16;
820 		dev_priv->uncore.funcs.mmio_writel  = gen5_write32;
821 		dev_priv->uncore.funcs.mmio_writeq  = gen5_write64;
822 		dev_priv->uncore.funcs.mmio_readb  = gen5_read8;
823 		dev_priv->uncore.funcs.mmio_readw  = gen5_read16;
824 		dev_priv->uncore.funcs.mmio_readl  = gen5_read32;
825 		dev_priv->uncore.funcs.mmio_readq  = gen5_read64;
826 		break;
827 	case 4:
828 	case 3:
829 	case 2:
830 		dev_priv->uncore.funcs.mmio_writeb  = gen4_write8;
831 		dev_priv->uncore.funcs.mmio_writew  = gen4_write16;
832 		dev_priv->uncore.funcs.mmio_writel  = gen4_write32;
833 		dev_priv->uncore.funcs.mmio_writeq  = gen4_write64;
834 		dev_priv->uncore.funcs.mmio_readb  = gen4_read8;
835 		dev_priv->uncore.funcs.mmio_readw  = gen4_read16;
836 		dev_priv->uncore.funcs.mmio_readl  = gen4_read32;
837 		dev_priv->uncore.funcs.mmio_readq  = gen4_read64;
838 		break;
839 	}
840 }
841 
842 void intel_uncore_fini(struct drm_device *dev)
843 {
844 	/* Paranoia: make sure we have disabled everything before we exit. */
845 	intel_uncore_sanitize(dev);
846 	intel_uncore_forcewake_reset(dev, false);
847 }
848 
849 #define GEN_RANGE(l, h) GENMASK(h, l)
850 
851 static const struct register_whitelist {
852 	uint64_t offset;
853 	uint32_t size;
854 	/* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
855 	uint32_t gen_bitmask;
856 } whitelist[] = {
857 	{ RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 8) },
858 };
859 
860 int i915_reg_read_ioctl(struct drm_device *dev,
861 			void *data, struct drm_file *file)
862 {
863 	struct drm_i915_private *dev_priv = dev->dev_private;
864 	struct drm_i915_reg_read *reg = data;
865 	struct register_whitelist const *entry = whitelist;
866 	int i, ret = 0;
867 
868 	for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
869 		if (entry->offset == reg->offset &&
870 		    (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
871 			break;
872 	}
873 
874 	if (i == ARRAY_SIZE(whitelist))
875 		return -EINVAL;
876 
877 	intel_runtime_pm_get(dev_priv);
878 
879 	switch (entry->size) {
880 	case 8:
881 		reg->val = I915_READ64(reg->offset);
882 		break;
883 	case 4:
884 		reg->val = I915_READ(reg->offset);
885 		break;
886 	case 2:
887 		reg->val = I915_READ16(reg->offset);
888 		break;
889 	case 1:
890 		reg->val = I915_READ8(reg->offset);
891 		break;
892 	default:
893 		WARN_ON(1);
894 		ret = -EINVAL;
895 		goto out;
896 	}
897 
898 out:
899 	intel_runtime_pm_put(dev_priv);
900 	return ret;
901 }
902 
903 int i915_get_reset_stats_ioctl(struct drm_device *dev,
904 			       void *data, struct drm_file *file)
905 {
906 	struct drm_i915_private *dev_priv = dev->dev_private;
907 	struct drm_i915_reset_stats *args = data;
908 	struct i915_ctx_hang_stats *hs;
909 	struct intel_context *ctx;
910 	int ret;
911 
912 	if (args->flags || args->pad)
913 		return -EINVAL;
914 
915 	if (args->ctx_id == DEFAULT_CONTEXT_ID && !capable(CAP_SYS_ADMIN))
916 		return -EPERM;
917 
918 	ret = mutex_lock_interruptible(&dev->struct_mutex);
919 	if (ret)
920 		return ret;
921 
922 	ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
923 	if (IS_ERR(ctx)) {
924 		mutex_unlock(&dev->struct_mutex);
925 		return PTR_ERR(ctx);
926 	}
927 	hs = &ctx->hang_stats;
928 
929 	if (capable(CAP_SYS_ADMIN))
930 		args->reset_count = i915_reset_count(&dev_priv->gpu_error);
931 	else
932 		args->reset_count = 0;
933 
934 	args->batch_active = hs->batch_active;
935 	args->batch_pending = hs->batch_pending;
936 
937 	mutex_unlock(&dev->struct_mutex);
938 
939 	return 0;
940 }
941 
942 static int i965_reset_complete(struct drm_device *dev)
943 {
944 	u8 gdrst;
945 	pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
946 	return (gdrst & GRDOM_RESET_ENABLE) == 0;
947 }
948 
949 static int i965_do_reset(struct drm_device *dev)
950 {
951 	int ret;
952 
953 	/* FIXME: i965g/gm need a display save/restore for gpu reset. */
954 	return -ENODEV;
955 
956 	/*
957 	 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
958 	 * well as the reset bit (GR/bit 0).  Setting the GR bit
959 	 * triggers the reset; when done, the hardware will clear it.
960 	 */
961 	pci_write_config_byte(dev->pdev, I965_GDRST,
962 			      GRDOM_RENDER | GRDOM_RESET_ENABLE);
963 	ret =  wait_for(i965_reset_complete(dev), 500);
964 	if (ret)
965 		return ret;
966 
967 	pci_write_config_byte(dev->pdev, I965_GDRST,
968 			      GRDOM_MEDIA | GRDOM_RESET_ENABLE);
969 
970 	ret =  wait_for(i965_reset_complete(dev), 500);
971 	if (ret)
972 		return ret;
973 
974 	pci_write_config_byte(dev->pdev, I965_GDRST, 0);
975 
976 	return 0;
977 }
978 
979 static int g4x_do_reset(struct drm_device *dev)
980 {
981 	struct drm_i915_private *dev_priv = dev->dev_private;
982 	int ret;
983 
984 	pci_write_config_byte(dev->pdev, I965_GDRST,
985 			      GRDOM_RENDER | GRDOM_RESET_ENABLE);
986 	ret =  wait_for(i965_reset_complete(dev), 500);
987 	if (ret)
988 		return ret;
989 
990 	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
991 	I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
992 	POSTING_READ(VDECCLK_GATE_D);
993 
994 	pci_write_config_byte(dev->pdev, I965_GDRST,
995 			      GRDOM_MEDIA | GRDOM_RESET_ENABLE);
996 	ret =  wait_for(i965_reset_complete(dev), 500);
997 	if (ret)
998 		return ret;
999 
1000 	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
1001 	I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1002 	POSTING_READ(VDECCLK_GATE_D);
1003 
1004 	pci_write_config_byte(dev->pdev, I965_GDRST, 0);
1005 
1006 	return 0;
1007 }
1008 
1009 static int ironlake_do_reset(struct drm_device *dev)
1010 {
1011 	struct drm_i915_private *dev_priv = dev->dev_private;
1012 	int ret;
1013 
1014 	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
1015 		   ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
1016 	ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
1017 			ILK_GRDOM_RESET_ENABLE) == 0, 500);
1018 	if (ret)
1019 		return ret;
1020 
1021 	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
1022 		   ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
1023 	ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
1024 			ILK_GRDOM_RESET_ENABLE) == 0, 500);
1025 	if (ret)
1026 		return ret;
1027 
1028 	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 0);
1029 
1030 	return 0;
1031 }
1032 
1033 static int gen6_do_reset(struct drm_device *dev)
1034 {
1035 	struct drm_i915_private *dev_priv = dev->dev_private;
1036 	int	ret;
1037 
1038 	/* Reset the chip */
1039 
1040 	/* GEN6_GDRST is not in the gt power well, no need to check
1041 	 * for fifo space for the write or forcewake the chip for
1042 	 * the read
1043 	 */
1044 	__raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
1045 
1046 	/* Spin waiting for the device to ack the reset request */
1047 	ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
1048 
1049 	intel_uncore_forcewake_reset(dev, true);
1050 
1051 	return ret;
1052 }
1053 
1054 int intel_gpu_reset(struct drm_device *dev)
1055 {
1056 	switch (INTEL_INFO(dev)->gen) {
1057 	case 8:
1058 	case 7:
1059 	case 6: return gen6_do_reset(dev);
1060 	case 5: return ironlake_do_reset(dev);
1061 	case 4:
1062 		if (IS_G4X(dev))
1063 			return g4x_do_reset(dev);
1064 		else
1065 			return i965_do_reset(dev);
1066 	default: return -ENODEV;
1067 	}
1068 }
1069 
1070 void intel_uncore_check_errors(struct drm_device *dev)
1071 {
1072 	struct drm_i915_private *dev_priv = dev->dev_private;
1073 
1074 	if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
1075 	    (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1076 		DRM_ERROR("Unclaimed register before interrupt\n");
1077 		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1078 	}
1079 }
1080