1 /* 2 * Copyright 2018 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #include "priv.h" 23 #include "chan.h" 24 #include "hdmi.h" 25 #include "head.h" 26 #include "ior.h" 27 #include "outp.h" 28 29 #include <core/client.h> 30 #include <core/gpuobj.h> 31 #include <core/ramht.h> 32 #include <subdev/timer.h> 33 34 #include <nvif/class.h> 35 #include <nvif/unpack.h> 36 37 static void 38 gv100_sor_hda_device_entry(struct nvkm_ior *ior, int head) 39 { 40 struct nvkm_device *device = ior->disp->engine.subdev.device; 41 const u32 hoff = 0x800 * head; 42 43 nvkm_mask(device, 0x616528 + hoff, 0x00000070, head << 4); 44 } 45 46 const struct nvkm_ior_func_hda 47 gv100_sor_hda = { 48 .hpd = gf119_sor_hda_hpd, 49 .eld = gf119_sor_hda_eld, 50 .device_entry = gv100_sor_hda_device_entry, 51 }; 52 53 void 54 gv100_sor_dp_watermark(struct nvkm_ior *sor, int head, u8 watermark) 55 { 56 struct nvkm_device *device = sor->disp->engine.subdev.device; 57 const u32 hoff = head * 0x800; 58 59 nvkm_mask(device, 0x616550 + hoff, 0x0c00003f, 0x08000000 | watermark); 60 } 61 62 void 63 gv100_sor_dp_audio_sym(struct nvkm_ior *sor, int head, u16 h, u32 v) 64 { 65 struct nvkm_device *device = sor->disp->engine.subdev.device; 66 const u32 hoff = head * 0x800; 67 68 nvkm_mask(device, 0x616568 + hoff, 0x0000ffff, h); 69 nvkm_mask(device, 0x61656c + hoff, 0x00ffffff, v); 70 } 71 72 void 73 gv100_sor_dp_audio(struct nvkm_ior *sor, int head, bool enable) 74 { 75 struct nvkm_device *device = sor->disp->engine.subdev.device; 76 const u32 hoff = 0x800 * head; 77 const u32 data = 0x80000000 | (0x00000001 * enable); 78 const u32 mask = 0x8000000d; 79 80 nvkm_mask(device, 0x616560 + hoff, mask, data); 81 nvkm_msec(device, 2000, 82 if (!(nvkm_rd32(device, 0x616560 + hoff) & 0x80000000)) 83 break; 84 ); 85 } 86 87 static const struct nvkm_ior_func_dp 88 gv100_sor_dp = { 89 .lanes = { 0, 1, 2, 3 }, 90 .links = gf119_sor_dp_links, 91 .power = g94_sor_dp_power, 92 .pattern = gm107_sor_dp_pattern, 93 .drive = gm200_sor_dp_drive, 94 .audio = gv100_sor_dp_audio, 95 .audio_sym = gv100_sor_dp_audio_sym, 96 .watermark = gv100_sor_dp_watermark, 97 }; 98 99 void 100 gv100_sor_hdmi_ctrl(struct nvkm_ior *ior, int head, bool enable, u8 max_ac_packet, 101 u8 rekey, u8 *avi, u8 avi_size, u8 *vendor, u8 vendor_size) 102 { 103 struct nvkm_device *device = ior->disp->engine.subdev.device; 104 const u32 ctrl = 0x40000000 * enable | 105 max_ac_packet << 16 | 106 rekey; 107 const u32 hoff = head * 0x800; 108 const u32 hdmi = head * 0x400; 109 struct packed_hdmi_infoframe avi_infoframe; 110 struct packed_hdmi_infoframe vendor_infoframe; 111 112 pack_hdmi_infoframe(&avi_infoframe, avi, avi_size); 113 pack_hdmi_infoframe(&vendor_infoframe, vendor, vendor_size); 114 115 if (!(ctrl & 0x40000000)) { 116 nvkm_mask(device, 0x6165c0 + hoff, 0x40000000, 0x00000000); 117 nvkm_mask(device, 0x6f0100 + hdmi, 0x00000001, 0x00000000); 118 nvkm_mask(device, 0x6f00c0 + hdmi, 0x00000001, 0x00000000); 119 nvkm_mask(device, 0x6f0000 + hdmi, 0x00000001, 0x00000000); 120 return; 121 } 122 123 /* AVI InfoFrame (AVI). */ 124 nvkm_mask(device, 0x6f0000 + hdmi, 0x00000001, 0x00000000); 125 if (avi_size) { 126 nvkm_wr32(device, 0x6f0008 + hdmi, avi_infoframe.header); 127 nvkm_wr32(device, 0x6f000c + hdmi, avi_infoframe.subpack0_low); 128 nvkm_wr32(device, 0x6f0010 + hdmi, avi_infoframe.subpack0_high); 129 nvkm_wr32(device, 0x6f0014 + hdmi, avi_infoframe.subpack1_low); 130 nvkm_wr32(device, 0x6f0018 + hdmi, avi_infoframe.subpack1_high); 131 nvkm_mask(device, 0x6f0000 + hdmi, 0x00000001, 0x00000001); 132 } 133 134 /* Vendor-specific InfoFrame (VSI). */ 135 nvkm_mask(device, 0x6f0100 + hdmi, 0x00010001, 0x00000000); 136 if (vendor_size) { 137 nvkm_wr32(device, 0x6f0108 + hdmi, vendor_infoframe.header); 138 nvkm_wr32(device, 0x6f010c + hdmi, vendor_infoframe.subpack0_low); 139 nvkm_wr32(device, 0x6f0110 + hdmi, vendor_infoframe.subpack0_high); 140 nvkm_wr32(device, 0x6f0114 + hdmi, 0x00000000); 141 nvkm_wr32(device, 0x6f0118 + hdmi, 0x00000000); 142 nvkm_wr32(device, 0x6f011c + hdmi, 0x00000000); 143 nvkm_wr32(device, 0x6f0120 + hdmi, 0x00000000); 144 nvkm_wr32(device, 0x6f0124 + hdmi, 0x00000000); 145 nvkm_mask(device, 0x6f0100 + hdmi, 0x00000001, 0x00000001); 146 } 147 148 149 /* General Control (GCP). */ 150 nvkm_mask(device, 0x6f00c0 + hdmi, 0x00000001, 0x00000000); 151 nvkm_wr32(device, 0x6f00cc + hdmi, 0x00000010); 152 nvkm_mask(device, 0x6f00c0 + hdmi, 0x00000001, 0x00000001); 153 154 /* Audio Clock Regeneration (ACR). */ 155 nvkm_wr32(device, 0x6f0080 + hdmi, 0x82000000); 156 157 /* NV_PDISP_SF_HDMI_CTRL. */ 158 nvkm_mask(device, 0x6165c0 + hoff, 0x401f007f, ctrl); 159 } 160 161 void 162 gv100_sor_state(struct nvkm_ior *sor, struct nvkm_ior_state *state) 163 { 164 struct nvkm_device *device = sor->disp->engine.subdev.device; 165 const u32 coff = (state == &sor->arm) * 0x8000 + sor->id * 0x20; 166 u32 ctrl = nvkm_rd32(device, 0x680300 + coff); 167 168 state->proto_evo = (ctrl & 0x00000f00) >> 8; 169 switch (state->proto_evo) { 170 case 0: state->proto = LVDS; state->link = 1; break; 171 case 1: state->proto = TMDS; state->link = 1; break; 172 case 2: state->proto = TMDS; state->link = 2; break; 173 case 5: state->proto = TMDS; state->link = 3; break; 174 case 8: state->proto = DP; state->link = 1; break; 175 case 9: state->proto = DP; state->link = 2; break; 176 default: 177 state->proto = UNKNOWN; 178 break; 179 } 180 181 state->head = ctrl & 0x000000ff; 182 } 183 184 static const struct nvkm_ior_func 185 gv100_sor = { 186 .route = { 187 .get = gm200_sor_route_get, 188 .set = gm200_sor_route_set, 189 }, 190 .state = gv100_sor_state, 191 .power = nv50_sor_power, 192 .clock = gf119_sor_clock, 193 .hdmi = { 194 .ctrl = gv100_sor_hdmi_ctrl, 195 .scdc = gm200_sor_hdmi_scdc, 196 }, 197 .dp = &gv100_sor_dp, 198 .hda = &gv100_sor_hda, 199 }; 200 201 static int 202 gv100_sor_new(struct nvkm_disp *disp, int id) 203 { 204 struct nvkm_device *device = disp->engine.subdev.device; 205 u32 hda; 206 207 if (!((hda = nvkm_rd32(device, 0x08a15c)) & 0x40000000)) 208 hda = nvkm_rd32(device, 0x118fb0) >> 8; 209 210 return nvkm_ior_new_(&gv100_sor, disp, SOR, id, hda & BIT(id)); 211 } 212 213 int 214 gv100_sor_cnt(struct nvkm_disp *disp, unsigned long *pmask) 215 { 216 struct nvkm_device *device = disp->engine.subdev.device; 217 218 *pmask = (nvkm_rd32(device, 0x610060) & 0x0000ff00) >> 8; 219 return (nvkm_rd32(device, 0x610074) & 0x00000f00) >> 8; 220 } 221 222 static void 223 gv100_head_vblank_put(struct nvkm_head *head) 224 { 225 struct nvkm_device *device = head->disp->engine.subdev.device; 226 nvkm_mask(device, 0x611d80 + (head->id * 4), 0x00000004, 0x00000000); 227 } 228 229 static void 230 gv100_head_vblank_get(struct nvkm_head *head) 231 { 232 struct nvkm_device *device = head->disp->engine.subdev.device; 233 nvkm_mask(device, 0x611d80 + (head->id * 4), 0x00000004, 0x00000004); 234 } 235 236 static void 237 gv100_head_rgpos(struct nvkm_head *head, u16 *hline, u16 *vline) 238 { 239 struct nvkm_device *device = head->disp->engine.subdev.device; 240 const u32 hoff = head->id * 0x800; 241 /* vline read locks hline. */ 242 *vline = nvkm_rd32(device, 0x616330 + hoff) & 0x0000ffff; 243 *hline = nvkm_rd32(device, 0x616334 + hoff) & 0x0000ffff; 244 } 245 246 static void 247 gv100_head_state(struct nvkm_head *head, struct nvkm_head_state *state) 248 { 249 struct nvkm_device *device = head->disp->engine.subdev.device; 250 const u32 hoff = (state == &head->arm) * 0x8000 + head->id * 0x400; 251 u32 data; 252 253 data = nvkm_rd32(device, 0x682064 + hoff); 254 state->vtotal = (data & 0xffff0000) >> 16; 255 state->htotal = (data & 0x0000ffff); 256 data = nvkm_rd32(device, 0x682068 + hoff); 257 state->vsynce = (data & 0xffff0000) >> 16; 258 state->hsynce = (data & 0x0000ffff); 259 data = nvkm_rd32(device, 0x68206c + hoff); 260 state->vblanke = (data & 0xffff0000) >> 16; 261 state->hblanke = (data & 0x0000ffff); 262 data = nvkm_rd32(device, 0x682070 + hoff); 263 state->vblanks = (data & 0xffff0000) >> 16; 264 state->hblanks = (data & 0x0000ffff); 265 state->hz = nvkm_rd32(device, 0x68200c + hoff); 266 267 data = nvkm_rd32(device, 0x682004 + hoff); 268 switch ((data & 0x000000f0) >> 4) { 269 case 5: state->or.depth = 30; break; 270 case 4: state->or.depth = 24; break; 271 case 1: state->or.depth = 18; break; 272 default: 273 state->or.depth = 18; 274 WARN_ON(1); 275 break; 276 } 277 } 278 279 static const struct nvkm_head_func 280 gv100_head = { 281 .state = gv100_head_state, 282 .rgpos = gv100_head_rgpos, 283 .rgclk = gf119_head_rgclk, 284 .vblank_get = gv100_head_vblank_get, 285 .vblank_put = gv100_head_vblank_put, 286 }; 287 288 int 289 gv100_head_new(struct nvkm_disp *disp, int id) 290 { 291 struct nvkm_device *device = disp->engine.subdev.device; 292 293 if (!(nvkm_rd32(device, 0x610060) & (0x00000001 << id))) 294 return 0; 295 296 return nvkm_head_new_(&gv100_head, disp, id); 297 } 298 299 int 300 gv100_head_cnt(struct nvkm_disp *disp, unsigned long *pmask) 301 { 302 struct nvkm_device *device = disp->engine.subdev.device; 303 304 *pmask = nvkm_rd32(device, 0x610060) & 0x000000ff; 305 return nvkm_rd32(device, 0x610074) & 0x0000000f; 306 } 307 308 const struct nvkm_event_func 309 gv100_disp_chan_uevent = { 310 }; 311 312 u64 313 gv100_disp_chan_user(struct nvkm_disp_chan *chan, u64 *psize) 314 { 315 *psize = 0x1000; 316 return 0x690000 + ((chan->chid.user - 1) * 0x1000); 317 } 318 319 static int 320 gv100_disp_dmac_idle(struct nvkm_disp_chan *chan) 321 { 322 struct nvkm_device *device = chan->disp->engine.subdev.device; 323 const u32 soff = (chan->chid.ctrl - 1) * 0x04; 324 nvkm_msec(device, 2000, 325 u32 stat = nvkm_rd32(device, 0x610664 + soff); 326 if ((stat & 0x000f0000) == 0x00040000) 327 return 0; 328 ); 329 return -EBUSY; 330 } 331 332 int 333 gv100_disp_dmac_bind(struct nvkm_disp_chan *chan, 334 struct nvkm_object *object, u32 handle) 335 { 336 return nvkm_ramht_insert(chan->disp->ramht, object, chan->chid.user, -9, handle, 337 chan->chid.user << 25 | 0x00000040); 338 } 339 340 void 341 gv100_disp_dmac_fini(struct nvkm_disp_chan *chan) 342 { 343 struct nvkm_device *device = chan->disp->engine.subdev.device; 344 const u32 uoff = (chan->chid.ctrl - 1) * 0x1000; 345 const u32 coff = chan->chid.ctrl * 0x04; 346 nvkm_mask(device, 0x6104e0 + coff, 0x00000010, 0x00000000); 347 gv100_disp_dmac_idle(chan); 348 nvkm_mask(device, 0x6104e0 + coff, 0x00000002, 0x00000000); 349 chan->suspend_put = nvkm_rd32(device, 0x690000 + uoff); 350 } 351 352 int 353 gv100_disp_dmac_init(struct nvkm_disp_chan *chan) 354 { 355 struct nvkm_subdev *subdev = &chan->disp->engine.subdev; 356 struct nvkm_device *device = subdev->device; 357 const u32 uoff = (chan->chid.ctrl - 1) * 0x1000; 358 const u32 poff = chan->chid.ctrl * 0x10; 359 const u32 coff = chan->chid.ctrl * 0x04; 360 361 nvkm_wr32(device, 0x610b24 + poff, lower_32_bits(chan->push)); 362 nvkm_wr32(device, 0x610b20 + poff, upper_32_bits(chan->push)); 363 nvkm_wr32(device, 0x610b28 + poff, 0x00000001); 364 nvkm_wr32(device, 0x610b2c + poff, 0x00000040); 365 366 nvkm_mask(device, 0x6104e0 + coff, 0x00000010, 0x00000010); 367 nvkm_wr32(device, 0x690000 + uoff, chan->suspend_put); 368 nvkm_wr32(device, 0x6104e0 + coff, 0x00000013); 369 return gv100_disp_dmac_idle(chan); 370 } 371 372 static void 373 gv100_disp_wimm_intr(struct nvkm_disp_chan *chan, bool en) 374 { 375 struct nvkm_device *device = chan->disp->engine.subdev.device; 376 const u32 mask = 0x00000001 << chan->head; 377 const u32 data = en ? mask : 0; 378 nvkm_mask(device, 0x611da8, mask, data); 379 } 380 381 static const struct nvkm_disp_chan_func 382 gv100_disp_wimm_func = { 383 .push = nv50_disp_dmac_push, 384 .init = gv100_disp_dmac_init, 385 .fini = gv100_disp_dmac_fini, 386 .intr = gv100_disp_wimm_intr, 387 .user = gv100_disp_chan_user, 388 }; 389 390 const struct nvkm_disp_chan_user 391 gv100_disp_wimm = { 392 .func = &gv100_disp_wimm_func, 393 .ctrl = 33, 394 .user = 33, 395 }; 396 397 static const struct nvkm_disp_mthd_list 398 gv100_disp_wndw_mthd_base = { 399 .mthd = 0x0000, 400 .addr = 0x000000, 401 .data = { 402 { 0x0200, 0x690200 }, 403 { 0x020c, 0x69020c }, 404 { 0x0210, 0x690210 }, 405 { 0x0214, 0x690214 }, 406 { 0x0218, 0x690218 }, 407 { 0x021c, 0x69021c }, 408 { 0x0220, 0x690220 }, 409 { 0x0224, 0x690224 }, 410 { 0x0228, 0x690228 }, 411 { 0x022c, 0x69022c }, 412 { 0x0230, 0x690230 }, 413 { 0x0234, 0x690234 }, 414 { 0x0238, 0x690238 }, 415 { 0x0240, 0x690240 }, 416 { 0x0244, 0x690244 }, 417 { 0x0248, 0x690248 }, 418 { 0x024c, 0x69024c }, 419 { 0x0250, 0x690250 }, 420 { 0x0254, 0x690254 }, 421 { 0x0260, 0x690260 }, 422 { 0x0264, 0x690264 }, 423 { 0x0268, 0x690268 }, 424 { 0x026c, 0x69026c }, 425 { 0x0270, 0x690270 }, 426 { 0x0274, 0x690274 }, 427 { 0x0280, 0x690280 }, 428 { 0x0284, 0x690284 }, 429 { 0x0288, 0x690288 }, 430 { 0x028c, 0x69028c }, 431 { 0x0290, 0x690290 }, 432 { 0x0298, 0x690298 }, 433 { 0x029c, 0x69029c }, 434 { 0x02a0, 0x6902a0 }, 435 { 0x02a4, 0x6902a4 }, 436 { 0x02a8, 0x6902a8 }, 437 { 0x02ac, 0x6902ac }, 438 { 0x02b0, 0x6902b0 }, 439 { 0x02b4, 0x6902b4 }, 440 { 0x02b8, 0x6902b8 }, 441 { 0x02bc, 0x6902bc }, 442 { 0x02c0, 0x6902c0 }, 443 { 0x02c4, 0x6902c4 }, 444 { 0x02c8, 0x6902c8 }, 445 { 0x02cc, 0x6902cc }, 446 { 0x02d0, 0x6902d0 }, 447 { 0x02d4, 0x6902d4 }, 448 { 0x02d8, 0x6902d8 }, 449 { 0x02dc, 0x6902dc }, 450 { 0x02e0, 0x6902e0 }, 451 { 0x02e4, 0x6902e4 }, 452 { 0x02e8, 0x6902e8 }, 453 { 0x02ec, 0x6902ec }, 454 { 0x02f0, 0x6902f0 }, 455 { 0x02f4, 0x6902f4 }, 456 { 0x02f8, 0x6902f8 }, 457 { 0x02fc, 0x6902fc }, 458 { 0x0300, 0x690300 }, 459 { 0x0304, 0x690304 }, 460 { 0x0308, 0x690308 }, 461 { 0x0310, 0x690310 }, 462 { 0x0314, 0x690314 }, 463 { 0x0318, 0x690318 }, 464 { 0x031c, 0x69031c }, 465 { 0x0320, 0x690320 }, 466 { 0x0324, 0x690324 }, 467 { 0x0328, 0x690328 }, 468 { 0x032c, 0x69032c }, 469 { 0x033c, 0x69033c }, 470 { 0x0340, 0x690340 }, 471 { 0x0344, 0x690344 }, 472 { 0x0348, 0x690348 }, 473 { 0x034c, 0x69034c }, 474 { 0x0350, 0x690350 }, 475 { 0x0354, 0x690354 }, 476 { 0x0358, 0x690358 }, 477 { 0x0364, 0x690364 }, 478 { 0x0368, 0x690368 }, 479 { 0x036c, 0x69036c }, 480 { 0x0370, 0x690370 }, 481 { 0x0374, 0x690374 }, 482 { 0x0380, 0x690380 }, 483 {} 484 } 485 }; 486 487 static const struct nvkm_disp_chan_mthd 488 gv100_disp_wndw_mthd = { 489 .name = "Window", 490 .addr = 0x001000, 491 .prev = 0x000800, 492 .data = { 493 { "Global", 1, &gv100_disp_wndw_mthd_base }, 494 {} 495 } 496 }; 497 498 static void 499 gv100_disp_wndw_intr(struct nvkm_disp_chan *chan, bool en) 500 { 501 struct nvkm_device *device = chan->disp->engine.subdev.device; 502 const u32 mask = 0x00000001 << chan->head; 503 const u32 data = en ? mask : 0; 504 nvkm_mask(device, 0x611da4, mask, data); 505 } 506 507 static const struct nvkm_disp_chan_func 508 gv100_disp_wndw_func = { 509 .push = nv50_disp_dmac_push, 510 .init = gv100_disp_dmac_init, 511 .fini = gv100_disp_dmac_fini, 512 .intr = gv100_disp_wndw_intr, 513 .user = gv100_disp_chan_user, 514 .bind = gv100_disp_dmac_bind, 515 }; 516 517 const struct nvkm_disp_chan_user 518 gv100_disp_wndw = { 519 .func = &gv100_disp_wndw_func, 520 .ctrl = 1, 521 .user = 1, 522 .mthd = &gv100_disp_wndw_mthd, 523 }; 524 525 int 526 gv100_disp_wndw_cnt(struct nvkm_disp *disp, unsigned long *pmask) 527 { 528 struct nvkm_device *device = disp->engine.subdev.device; 529 530 *pmask = nvkm_rd32(device, 0x610064); 531 return (nvkm_rd32(device, 0x610074) & 0x03f00000) >> 20; 532 } 533 534 static int 535 gv100_disp_curs_idle(struct nvkm_disp_chan *chan) 536 { 537 struct nvkm_device *device = chan->disp->engine.subdev.device; 538 const u32 soff = (chan->chid.ctrl - 1) * 0x04; 539 nvkm_msec(device, 2000, 540 u32 stat = nvkm_rd32(device, 0x610664 + soff); 541 if ((stat & 0x00070000) == 0x00040000) 542 return 0; 543 ); 544 return -EBUSY; 545 } 546 547 static void 548 gv100_disp_curs_intr(struct nvkm_disp_chan *chan, bool en) 549 { 550 struct nvkm_device *device = chan->disp->engine.subdev.device; 551 const u32 mask = 0x00010000 << chan->head; 552 const u32 data = en ? mask : 0; 553 nvkm_mask(device, 0x611dac, mask, data); 554 } 555 556 static void 557 gv100_disp_curs_fini(struct nvkm_disp_chan *chan) 558 { 559 struct nvkm_device *device = chan->disp->engine.subdev.device; 560 const u32 hoff = chan->chid.ctrl * 4; 561 nvkm_mask(device, 0x6104e0 + hoff, 0x00000010, 0x00000010); 562 gv100_disp_curs_idle(chan); 563 nvkm_mask(device, 0x6104e0 + hoff, 0x00000001, 0x00000000); 564 } 565 566 static int 567 gv100_disp_curs_init(struct nvkm_disp_chan *chan) 568 { 569 struct nvkm_subdev *subdev = &chan->disp->engine.subdev; 570 struct nvkm_device *device = subdev->device; 571 nvkm_wr32(device, 0x6104e0 + chan->chid.ctrl * 4, 0x00000001); 572 return gv100_disp_curs_idle(chan); 573 } 574 575 static const struct nvkm_disp_chan_func 576 gv100_disp_curs_func = { 577 .init = gv100_disp_curs_init, 578 .fini = gv100_disp_curs_fini, 579 .intr = gv100_disp_curs_intr, 580 .user = gv100_disp_chan_user, 581 }; 582 583 const struct nvkm_disp_chan_user 584 gv100_disp_curs = { 585 .func = &gv100_disp_curs_func, 586 .ctrl = 73, 587 .user = 73, 588 }; 589 590 const struct nvkm_disp_mthd_list 591 gv100_disp_core_mthd_base = { 592 .mthd = 0x0000, 593 .addr = 0x000000, 594 .data = { 595 { 0x0200, 0x680200 }, 596 { 0x0208, 0x680208 }, 597 { 0x020c, 0x68020c }, 598 { 0x0210, 0x680210 }, 599 { 0x0214, 0x680214 }, 600 { 0x0218, 0x680218 }, 601 { 0x021c, 0x68021c }, 602 {} 603 } 604 }; 605 606 static const struct nvkm_disp_mthd_list 607 gv100_disp_core_mthd_sor = { 608 .mthd = 0x0020, 609 .addr = 0x000020, 610 .data = { 611 { 0x0300, 0x680300 }, 612 { 0x0304, 0x680304 }, 613 { 0x0308, 0x680308 }, 614 { 0x030c, 0x68030c }, 615 {} 616 } 617 }; 618 619 static const struct nvkm_disp_mthd_list 620 gv100_disp_core_mthd_wndw = { 621 .mthd = 0x0080, 622 .addr = 0x000080, 623 .data = { 624 { 0x1000, 0x681000 }, 625 { 0x1004, 0x681004 }, 626 { 0x1008, 0x681008 }, 627 { 0x100c, 0x68100c }, 628 { 0x1010, 0x681010 }, 629 {} 630 } 631 }; 632 633 static const struct nvkm_disp_mthd_list 634 gv100_disp_core_mthd_head = { 635 .mthd = 0x0400, 636 .addr = 0x000400, 637 .data = { 638 { 0x2000, 0x682000 }, 639 { 0x2004, 0x682004 }, 640 { 0x2008, 0x682008 }, 641 { 0x200c, 0x68200c }, 642 { 0x2014, 0x682014 }, 643 { 0x2018, 0x682018 }, 644 { 0x201c, 0x68201c }, 645 { 0x2020, 0x682020 }, 646 { 0x2028, 0x682028 }, 647 { 0x202c, 0x68202c }, 648 { 0x2030, 0x682030 }, 649 { 0x2038, 0x682038 }, 650 { 0x203c, 0x68203c }, 651 { 0x2048, 0x682048 }, 652 { 0x204c, 0x68204c }, 653 { 0x2050, 0x682050 }, 654 { 0x2054, 0x682054 }, 655 { 0x2058, 0x682058 }, 656 { 0x205c, 0x68205c }, 657 { 0x2060, 0x682060 }, 658 { 0x2064, 0x682064 }, 659 { 0x2068, 0x682068 }, 660 { 0x206c, 0x68206c }, 661 { 0x2070, 0x682070 }, 662 { 0x2074, 0x682074 }, 663 { 0x2078, 0x682078 }, 664 { 0x207c, 0x68207c }, 665 { 0x2080, 0x682080 }, 666 { 0x2088, 0x682088 }, 667 { 0x2090, 0x682090 }, 668 { 0x209c, 0x68209c }, 669 { 0x20a0, 0x6820a0 }, 670 { 0x20a4, 0x6820a4 }, 671 { 0x20a8, 0x6820a8 }, 672 { 0x20ac, 0x6820ac }, 673 { 0x2180, 0x682180 }, 674 { 0x2184, 0x682184 }, 675 { 0x218c, 0x68218c }, 676 { 0x2194, 0x682194 }, 677 { 0x2198, 0x682198 }, 678 { 0x219c, 0x68219c }, 679 { 0x21a0, 0x6821a0 }, 680 { 0x21a4, 0x6821a4 }, 681 { 0x2214, 0x682214 }, 682 { 0x2218, 0x682218 }, 683 {} 684 } 685 }; 686 687 static const struct nvkm_disp_chan_mthd 688 gv100_disp_core_mthd = { 689 .name = "Core", 690 .addr = 0x000000, 691 .prev = 0x008000, 692 .data = { 693 { "Global", 1, &gv100_disp_core_mthd_base }, 694 { "SOR", 4, &gv100_disp_core_mthd_sor }, 695 { "WINDOW", 8, &gv100_disp_core_mthd_wndw }, 696 { "HEAD", 4, &gv100_disp_core_mthd_head }, 697 {} 698 } 699 }; 700 701 static int 702 gv100_disp_core_idle(struct nvkm_disp_chan *chan) 703 { 704 struct nvkm_device *device = chan->disp->engine.subdev.device; 705 nvkm_msec(device, 2000, 706 u32 stat = nvkm_rd32(device, 0x610630); 707 if ((stat & 0x001f0000) == 0x000b0000) 708 return 0; 709 ); 710 return -EBUSY; 711 } 712 713 static u64 714 gv100_disp_core_user(struct nvkm_disp_chan *chan, u64 *psize) 715 { 716 *psize = 0x10000; 717 return 0x680000; 718 } 719 720 static void 721 gv100_disp_core_intr(struct nvkm_disp_chan *chan, bool en) 722 { 723 struct nvkm_device *device = chan->disp->engine.subdev.device; 724 const u32 mask = 0x00000001; 725 const u32 data = en ? mask : 0; 726 nvkm_mask(device, 0x611dac, mask, data); 727 } 728 729 static void 730 gv100_disp_core_fini(struct nvkm_disp_chan *chan) 731 { 732 struct nvkm_device *device = chan->disp->engine.subdev.device; 733 nvkm_mask(device, 0x6104e0, 0x00000010, 0x00000000); 734 gv100_disp_core_idle(chan); 735 nvkm_mask(device, 0x6104e0, 0x00000002, 0x00000000); 736 chan->suspend_put = nvkm_rd32(device, 0x680000); 737 } 738 739 static int 740 gv100_disp_core_init(struct nvkm_disp_chan *chan) 741 { 742 struct nvkm_subdev *subdev = &chan->disp->engine.subdev; 743 struct nvkm_device *device = subdev->device; 744 745 nvkm_wr32(device, 0x610b24, lower_32_bits(chan->push)); 746 nvkm_wr32(device, 0x610b20, upper_32_bits(chan->push)); 747 nvkm_wr32(device, 0x610b28, 0x00000001); 748 nvkm_wr32(device, 0x610b2c, 0x00000040); 749 750 nvkm_mask(device, 0x6104e0, 0x00000010, 0x00000010); 751 nvkm_wr32(device, 0x680000, chan->suspend_put); 752 nvkm_wr32(device, 0x6104e0, 0x00000013); 753 return gv100_disp_core_idle(chan); 754 } 755 756 static const struct nvkm_disp_chan_func 757 gv100_disp_core_func = { 758 .push = nv50_disp_dmac_push, 759 .init = gv100_disp_core_init, 760 .fini = gv100_disp_core_fini, 761 .intr = gv100_disp_core_intr, 762 .user = gv100_disp_core_user, 763 .bind = gv100_disp_dmac_bind, 764 }; 765 766 const struct nvkm_disp_chan_user 767 gv100_disp_core = { 768 .func = &gv100_disp_core_func, 769 .ctrl = 0, 770 .user = 0, 771 .mthd = &gv100_disp_core_mthd, 772 }; 773 774 #define gv100_disp_caps(p) container_of((p), struct gv100_disp_caps, object) 775 776 struct gv100_disp_caps { 777 struct nvkm_object object; 778 struct nvkm_disp *disp; 779 }; 780 781 static int 782 gv100_disp_caps_map(struct nvkm_object *object, void *argv, u32 argc, 783 enum nvkm_object_map *type, u64 *addr, u64 *size) 784 { 785 struct gv100_disp_caps *caps = gv100_disp_caps(object); 786 struct nvkm_device *device = caps->disp->engine.subdev.device; 787 *type = NVKM_OBJECT_MAP_IO; 788 *addr = 0x640000 + device->func->resource_addr(device, 0); 789 *size = 0x1000; 790 return 0; 791 } 792 793 static const struct nvkm_object_func 794 gv100_disp_caps = { 795 .map = gv100_disp_caps_map, 796 }; 797 798 int 799 gv100_disp_caps_new(const struct nvkm_oclass *oclass, void *argv, u32 argc, 800 struct nvkm_object **pobject) 801 { 802 struct nvkm_disp *disp = nvkm_udisp(oclass->parent); 803 struct gv100_disp_caps *caps; 804 805 if (!(caps = kzalloc(sizeof(*caps), GFP_KERNEL))) 806 return -ENOMEM; 807 *pobject = &caps->object; 808 809 nvkm_object_ctor(&gv100_disp_caps, oclass, &caps->object); 810 caps->disp = disp; 811 return 0; 812 } 813 814 void 815 gv100_disp_super(struct work_struct *work) 816 { 817 struct nvkm_disp *disp = container_of(work, struct nvkm_disp, super.work); 818 struct nvkm_subdev *subdev = &disp->engine.subdev; 819 struct nvkm_device *device = subdev->device; 820 struct nvkm_head *head; 821 u32 stat, mask[4]; 822 823 mutex_lock(&disp->super.mutex); 824 stat = nvkm_rd32(device, 0x6107a8); 825 826 nvkm_debug(subdev, "supervisor %d: %08x\n", ffs(disp->super.pending), stat); 827 list_for_each_entry(head, &disp->heads, head) { 828 mask[head->id] = nvkm_rd32(device, 0x6107ac + (head->id * 4)); 829 HEAD_DBG(head, "%08x", mask[head->id]); 830 } 831 832 if (disp->super.pending & 0x00000001) { 833 nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG); 834 nv50_disp_super_1(disp); 835 list_for_each_entry(head, &disp->heads, head) { 836 if (!(mask[head->id] & 0x00001000)) 837 continue; 838 nv50_disp_super_1_0(disp, head); 839 } 840 } else 841 if (disp->super.pending & 0x00000002) { 842 list_for_each_entry(head, &disp->heads, head) { 843 if (!(mask[head->id] & 0x00001000)) 844 continue; 845 nv50_disp_super_2_0(disp, head); 846 } 847 nvkm_outp_route(disp); 848 list_for_each_entry(head, &disp->heads, head) { 849 if (!(mask[head->id] & 0x00010000)) 850 continue; 851 nv50_disp_super_2_1(disp, head); 852 } 853 list_for_each_entry(head, &disp->heads, head) { 854 if (!(mask[head->id] & 0x00001000)) 855 continue; 856 nv50_disp_super_2_2(disp, head); 857 } 858 } else 859 if (disp->super.pending & 0x00000004) { 860 list_for_each_entry(head, &disp->heads, head) { 861 if (!(mask[head->id] & 0x00001000)) 862 continue; 863 nv50_disp_super_3_0(disp, head); 864 } 865 } 866 867 list_for_each_entry(head, &disp->heads, head) 868 nvkm_wr32(device, 0x6107ac + (head->id * 4), 0x00000000); 869 870 nvkm_wr32(device, 0x6107a8, 0x80000000); 871 mutex_unlock(&disp->super.mutex); 872 } 873 874 static void 875 gv100_disp_exception(struct nvkm_disp *disp, int chid) 876 { 877 struct nvkm_subdev *subdev = &disp->engine.subdev; 878 struct nvkm_device *device = subdev->device; 879 u32 stat = nvkm_rd32(device, 0x611020 + (chid * 12)); 880 u32 type = (stat & 0x00007000) >> 12; 881 u32 mthd = (stat & 0x00000fff) << 2; 882 const struct nvkm_enum *reason = 883 nvkm_enum_find(nv50_disp_intr_error_type, type); 884 885 /*TODO: Suspect 33->41 are for WRBK channel exceptions, but we 886 * don't support those currently. 887 * 888 * CORE+WIN CHIDs map directly to the FE_EXCEPT() slots. 889 */ 890 if (chid <= 32) { 891 u32 data = nvkm_rd32(device, 0x611024 + (chid * 12)); 892 u32 code = nvkm_rd32(device, 0x611028 + (chid * 12)); 893 nvkm_error(subdev, "chid %d stat %08x reason %d [%s] " 894 "mthd %04x data %08x code %08x\n", 895 chid, stat, type, reason ? reason->name : "", 896 mthd, data, code); 897 } else { 898 nvkm_error(subdev, "chid %d stat %08x reason %d [%s] " 899 "mthd %04x\n", 900 chid, stat, type, reason ? reason->name : "", mthd); 901 } 902 903 if (chid < ARRAY_SIZE(disp->chan) && disp->chan[chid]) { 904 switch (mthd) { 905 case 0x0200: 906 nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR); 907 break; 908 default: 909 break; 910 } 911 } 912 913 nvkm_wr32(device, 0x611020 + (chid * 12), 0x90000000); 914 } 915 916 static void 917 gv100_disp_intr_ctrl_disp(struct nvkm_disp *disp) 918 { 919 struct nvkm_subdev *subdev = &disp->engine.subdev; 920 struct nvkm_device *device = subdev->device; 921 u32 stat = nvkm_rd32(device, 0x611c30); 922 923 if (stat & 0x00000007) { 924 disp->super.pending = (stat & 0x00000007); 925 queue_work(disp->super.wq, &disp->super.work); 926 nvkm_wr32(device, 0x611860, disp->super.pending); 927 stat &= ~0x00000007; 928 } 929 930 /*TODO: I would guess this is VBIOS_RELEASE, however, NFI how to 931 * ACK it, nor does RM appear to bother. 932 */ 933 if (stat & 0x00000008) 934 stat &= ~0x00000008; 935 936 if (stat & 0x00000080) { 937 u32 error = nvkm_mask(device, 0x611848, 0x00000000, 0x00000000); 938 nvkm_warn(subdev, "error %08x\n", error); 939 stat &= ~0x00000080; 940 } 941 942 if (stat & 0x00000100) { 943 unsigned long wndws = nvkm_rd32(device, 0x611858); 944 unsigned long other = nvkm_rd32(device, 0x61185c); 945 int wndw; 946 947 nvkm_wr32(device, 0x611858, wndws); 948 nvkm_wr32(device, 0x61185c, other); 949 950 /* AWAKEN_OTHER_CORE. */ 951 if (other & 0x00000001) 952 nv50_disp_chan_uevent_send(disp, 0); 953 954 /* AWAKEN_WIN_CH(n). */ 955 for_each_set_bit(wndw, &wndws, disp->wndw.nr) { 956 nv50_disp_chan_uevent_send(disp, 1 + wndw); 957 } 958 } 959 960 if (stat) 961 nvkm_warn(subdev, "ctrl %08x\n", stat); 962 } 963 964 static void 965 gv100_disp_intr_exc_other(struct nvkm_disp *disp) 966 { 967 struct nvkm_subdev *subdev = &disp->engine.subdev; 968 struct nvkm_device *device = subdev->device; 969 u32 stat = nvkm_rd32(device, 0x611854); 970 unsigned long mask; 971 int head; 972 973 if (stat & 0x00000001) { 974 nvkm_wr32(device, 0x611854, 0x00000001); 975 gv100_disp_exception(disp, 0); 976 stat &= ~0x00000001; 977 } 978 979 if ((mask = (stat & 0x00ff0000) >> 16)) { 980 for_each_set_bit(head, &mask, disp->wndw.nr) { 981 nvkm_wr32(device, 0x611854, 0x00010000 << head); 982 gv100_disp_exception(disp, 73 + head); 983 stat &= ~(0x00010000 << head); 984 } 985 } 986 987 if (stat) { 988 nvkm_warn(subdev, "exception %08x\n", stat); 989 nvkm_wr32(device, 0x611854, stat); 990 } 991 } 992 993 static void 994 gv100_disp_intr_exc_winim(struct nvkm_disp *disp) 995 { 996 struct nvkm_subdev *subdev = &disp->engine.subdev; 997 struct nvkm_device *device = subdev->device; 998 unsigned long stat = nvkm_rd32(device, 0x611850); 999 int wndw; 1000 1001 for_each_set_bit(wndw, &stat, disp->wndw.nr) { 1002 nvkm_wr32(device, 0x611850, BIT(wndw)); 1003 gv100_disp_exception(disp, 33 + wndw); 1004 stat &= ~BIT(wndw); 1005 } 1006 1007 if (stat) { 1008 nvkm_warn(subdev, "wimm %08x\n", (u32)stat); 1009 nvkm_wr32(device, 0x611850, stat); 1010 } 1011 } 1012 1013 static void 1014 gv100_disp_intr_exc_win(struct nvkm_disp *disp) 1015 { 1016 struct nvkm_subdev *subdev = &disp->engine.subdev; 1017 struct nvkm_device *device = subdev->device; 1018 unsigned long stat = nvkm_rd32(device, 0x61184c); 1019 int wndw; 1020 1021 for_each_set_bit(wndw, &stat, disp->wndw.nr) { 1022 nvkm_wr32(device, 0x61184c, BIT(wndw)); 1023 gv100_disp_exception(disp, 1 + wndw); 1024 stat &= ~BIT(wndw); 1025 } 1026 1027 if (stat) { 1028 nvkm_warn(subdev, "wndw %08x\n", (u32)stat); 1029 nvkm_wr32(device, 0x61184c, stat); 1030 } 1031 } 1032 1033 static void 1034 gv100_disp_intr_head_timing(struct nvkm_disp *disp, int head) 1035 { 1036 struct nvkm_subdev *subdev = &disp->engine.subdev; 1037 struct nvkm_device *device = subdev->device; 1038 u32 stat = nvkm_rd32(device, 0x611800 + (head * 0x04)); 1039 1040 /* LAST_DATA, LOADV. */ 1041 if (stat & 0x00000003) { 1042 nvkm_wr32(device, 0x611800 + (head * 0x04), stat & 0x00000003); 1043 stat &= ~0x00000003; 1044 } 1045 1046 if (stat & 0x00000004) { 1047 nvkm_disp_vblank(disp, head); 1048 nvkm_wr32(device, 0x611800 + (head * 0x04), 0x00000004); 1049 stat &= ~0x00000004; 1050 } 1051 1052 if (stat) { 1053 nvkm_warn(subdev, "head %08x\n", stat); 1054 nvkm_wr32(device, 0x611800 + (head * 0x04), stat); 1055 } 1056 } 1057 1058 void 1059 gv100_disp_intr(struct nvkm_disp *disp) 1060 { 1061 struct nvkm_subdev *subdev = &disp->engine.subdev; 1062 struct nvkm_device *device = subdev->device; 1063 u32 stat = nvkm_rd32(device, 0x611ec0); 1064 unsigned long mask; 1065 int head; 1066 1067 if ((mask = (stat & 0x000000ff))) { 1068 for_each_set_bit(head, &mask, 8) { 1069 gv100_disp_intr_head_timing(disp, head); 1070 stat &= ~BIT(head); 1071 } 1072 } 1073 1074 if (stat & 0x00000200) { 1075 gv100_disp_intr_exc_win(disp); 1076 stat &= ~0x00000200; 1077 } 1078 1079 if (stat & 0x00000400) { 1080 gv100_disp_intr_exc_winim(disp); 1081 stat &= ~0x00000400; 1082 } 1083 1084 if (stat & 0x00000800) { 1085 gv100_disp_intr_exc_other(disp); 1086 stat &= ~0x00000800; 1087 } 1088 1089 if (stat & 0x00001000) { 1090 gv100_disp_intr_ctrl_disp(disp); 1091 stat &= ~0x00001000; 1092 } 1093 1094 if (stat) 1095 nvkm_warn(subdev, "intr %08x\n", stat); 1096 } 1097 1098 void 1099 gv100_disp_fini(struct nvkm_disp *disp) 1100 { 1101 struct nvkm_device *device = disp->engine.subdev.device; 1102 nvkm_wr32(device, 0x611db0, 0x00000000); 1103 } 1104 1105 static int 1106 gv100_disp_init(struct nvkm_disp *disp) 1107 { 1108 struct nvkm_device *device = disp->engine.subdev.device; 1109 struct nvkm_head *head; 1110 int i, j; 1111 u32 tmp; 1112 1113 /* Claim ownership of display. */ 1114 if (nvkm_rd32(device, 0x6254e8) & 0x00000002) { 1115 nvkm_mask(device, 0x6254e8, 0x00000001, 0x00000000); 1116 if (nvkm_msec(device, 2000, 1117 if (!(nvkm_rd32(device, 0x6254e8) & 0x00000002)) 1118 break; 1119 ) < 0) 1120 return -EBUSY; 1121 } 1122 1123 /* Lock pin capabilities. */ 1124 tmp = nvkm_rd32(device, 0x610068); 1125 nvkm_wr32(device, 0x640008, tmp); 1126 1127 /* SOR capabilities. */ 1128 for (i = 0; i < disp->sor.nr; i++) { 1129 tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800)); 1130 nvkm_mask(device, 0x640000, 0x00000100 << i, 0x00000100 << i); 1131 nvkm_wr32(device, 0x640144 + (i * 0x08), tmp); 1132 } 1133 1134 /* Head capabilities. */ 1135 list_for_each_entry(head, &disp->heads, head) { 1136 const int id = head->id; 1137 1138 /* RG. */ 1139 tmp = nvkm_rd32(device, 0x616300 + (id * 0x800)); 1140 nvkm_wr32(device, 0x640048 + (id * 0x020), tmp); 1141 1142 /* POSTCOMP. */ 1143 for (j = 0; j < 6 * 4; j += 4) { 1144 tmp = nvkm_rd32(device, 0x616100 + (id * 0x800) + j); 1145 nvkm_wr32(device, 0x640030 + (id * 0x20) + j, tmp); 1146 } 1147 } 1148 1149 /* Window capabilities. */ 1150 for (i = 0; i < disp->wndw.nr; i++) { 1151 nvkm_mask(device, 0x640004, 1 << i, 1 << i); 1152 for (j = 0; j < 6 * 4; j += 4) { 1153 tmp = nvkm_rd32(device, 0x630050 + (i * 0x800) + j); 1154 nvkm_wr32(device, 0x6401e4 + (i * 0x20) + j, tmp); 1155 } 1156 } 1157 1158 /* IHUB capabilities. */ 1159 for (i = 0; i < 4; i++) { 1160 tmp = nvkm_rd32(device, 0x62e000 + (i * 0x04)); 1161 nvkm_wr32(device, 0x640010 + (i * 0x04), tmp); 1162 } 1163 1164 nvkm_mask(device, 0x610078, 0x00000001, 0x00000001); 1165 1166 /* Setup instance memory. */ 1167 switch (nvkm_memory_target(disp->inst->memory)) { 1168 case NVKM_MEM_TARGET_VRAM: tmp = 0x00000001; break; 1169 case NVKM_MEM_TARGET_NCOH: tmp = 0x00000002; break; 1170 case NVKM_MEM_TARGET_HOST: tmp = 0x00000003; break; 1171 default: 1172 break; 1173 } 1174 nvkm_wr32(device, 0x610010, 0x00000008 | tmp); 1175 nvkm_wr32(device, 0x610014, disp->inst->addr >> 16); 1176 1177 /* CTRL_DISP: AWAKEN, ERROR, SUPERVISOR[1-3]. */ 1178 nvkm_wr32(device, 0x611cf0, 0x00000187); /* MSK. */ 1179 nvkm_wr32(device, 0x611db0, 0x00000187); /* EN. */ 1180 1181 /* EXC_OTHER: CURSn, CORE. */ 1182 nvkm_wr32(device, 0x611cec, disp->head.mask << 16 | 1183 0x00000001); /* MSK. */ 1184 nvkm_wr32(device, 0x611dac, 0x00000000); /* EN. */ 1185 1186 /* EXC_WINIM. */ 1187 nvkm_wr32(device, 0x611ce8, disp->wndw.mask); /* MSK. */ 1188 nvkm_wr32(device, 0x611da8, 0x00000000); /* EN. */ 1189 1190 /* EXC_WIN. */ 1191 nvkm_wr32(device, 0x611ce4, disp->wndw.mask); /* MSK. */ 1192 nvkm_wr32(device, 0x611da4, 0x00000000); /* EN. */ 1193 1194 /* HEAD_TIMING(n): VBLANK. */ 1195 list_for_each_entry(head, &disp->heads, head) { 1196 const u32 hoff = head->id * 4; 1197 nvkm_wr32(device, 0x611cc0 + hoff, 0x00000004); /* MSK. */ 1198 nvkm_wr32(device, 0x611d80 + hoff, 0x00000000); /* EN. */ 1199 } 1200 1201 /* OR. */ 1202 nvkm_wr32(device, 0x611cf4, 0x00000000); /* MSK. */ 1203 nvkm_wr32(device, 0x611db4, 0x00000000); /* EN. */ 1204 return 0; 1205 } 1206 1207 static const struct nvkm_disp_func 1208 gv100_disp = { 1209 .oneinit = nv50_disp_oneinit, 1210 .init = gv100_disp_init, 1211 .fini = gv100_disp_fini, 1212 .intr = gv100_disp_intr, 1213 .super = gv100_disp_super, 1214 .uevent = &gv100_disp_chan_uevent, 1215 .wndw = { .cnt = gv100_disp_wndw_cnt }, 1216 .head = { .cnt = gv100_head_cnt, .new = gv100_head_new }, 1217 .sor = { .cnt = gv100_sor_cnt, .new = gv100_sor_new }, 1218 .ramht_size = 0x2000, 1219 .root = { 0, 0,GV100_DISP }, 1220 .user = { 1221 {{-1,-1,GV100_DISP_CAPS }, gv100_disp_caps_new }, 1222 {{ 0, 0,GV100_DISP_CURSOR }, nvkm_disp_chan_new, &gv100_disp_curs }, 1223 {{ 0, 0,GV100_DISP_WINDOW_IMM_CHANNEL_DMA}, nvkm_disp_wndw_new, &gv100_disp_wimm }, 1224 {{ 0, 0,GV100_DISP_CORE_CHANNEL_DMA }, nvkm_disp_core_new, &gv100_disp_core }, 1225 {{ 0, 0,GV100_DISP_WINDOW_CHANNEL_DMA }, nvkm_disp_wndw_new, &gv100_disp_wndw }, 1226 {} 1227 }, 1228 }; 1229 1230 int 1231 gv100_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, 1232 struct nvkm_disp **pdisp) 1233 { 1234 return nvkm_disp_new_(&gv100_disp, device, type, inst, pdisp); 1235 } 1236