1 /***************************************************************************\ 2 |* *| 3 |* Copyright 1993-2003 NVIDIA, Corporation. All rights reserved. *| 4 |* *| 5 |* NOTICE TO USER: The source code is copyrighted under U.S. and *| 6 |* international laws. Users and possessors of this source code are *| 7 |* hereby granted a nonexclusive, royalty-free copyright license to *| 8 |* use this code in individual and commercial software. *| 9 |* *| 10 |* Any use of this source code must include, in the user documenta- *| 11 |* tion and internal comments to the code, notices to the end user *| 12 |* as follows: *| 13 |* *| 14 |* Copyright 1993-2003 NVIDIA, Corporation. All rights reserved. *| 15 |* *| 16 |* NVIDIA, CORPORATION MAKES NO REPRESENTATION ABOUT THE SUITABILITY *| 17 |* OF THIS SOURCE CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" *| 18 |* WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. NVIDIA, CORPOR- *| 19 |* ATION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOURCE CODE, *| 20 |* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, NONINFRINGE- *| 21 |* MENT, AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL *| 22 |* NVIDIA, CORPORATION BE LIABLE FOR ANY SPECIAL, INDIRECT, INCI- *| 23 |* DENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RE- *| 24 |* SULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION *| 25 |* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF *| 26 |* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOURCE CODE. *| 27 |* *| 28 |* U.S. Government End Users. This source code is a "commercial *| 29 |* item," as that term is defined at 48 C.F.R. 2.101 (OCT 1995), *| 30 |* consisting of "commercial computer software" and "commercial *| 31 |* computer software documentation," as such terms are used in *| 32 |* 48 C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Govern- *| 33 |* ment only as a commercial end item. Consistent with 48 C.F.R. *| 34 |* 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), *| 35 |* all U.S. Government End Users acquire the source code with only *| 36 |* those rights set forth herein. *| 37 |* *| 38 \***************************************************************************/ 39 40 /* 41 * GPL Licensing Note - According to Mark Vojkovich, author of the Xorg/ 42 * XFree86 'nv' driver, this source code is provided under MIT-style licensing 43 * where the source code is provided "as is" without warranty of any kind. 44 * The only usage restriction is for the copyright notices to be retained 45 * whenever code is used. 46 * 47 * Antonino Daplas <adaplas@pol.net> 2005-03-11 48 */ 49 50 #include <linux/fb.h> 51 #include "nv_type.h" 52 #include "nv_proto.h" 53 #include "nv_dma.h" 54 #include "nv_local.h" 55 56 /* There is a HW race condition with videoram command buffers. 57 You can't jump to the location of your put offset. We write put 58 at the jump offset + SKIPS dwords with noop padding in between 59 to solve this problem */ 60 #define SKIPS 8 61 62 static const int NVCopyROP[16] = { 63 0xCC, /* copy */ 64 0x55 /* invert */ 65 }; 66 67 static const int NVCopyROP_PM[16] = { 68 0xCA, /* copy */ 69 0x5A, /* invert */ 70 }; 71 72 static inline void nvidiafb_safe_mode(struct fb_info *info) 73 { 74 struct nvidia_par *par = info->par; 75 76 touch_softlockup_watchdog(); 77 info->pixmap.scan_align = 1; 78 par->lockup = 1; 79 } 80 81 static inline void NVFlush(struct fb_info *info) 82 { 83 struct nvidia_par *par = info->par; 84 int count = 1000000000; 85 86 while (--count && READ_GET(par) != par->dmaPut) ; 87 88 if (!count) { 89 printk("nvidiafb: DMA Flush lockup\n"); 90 nvidiafb_safe_mode(info); 91 } 92 } 93 94 static inline void NVSync(struct fb_info *info) 95 { 96 struct nvidia_par *par = info->par; 97 int count = 1000000000; 98 99 while (--count && NV_RD32(par->PGRAPH, 0x0700)) ; 100 101 if (!count) { 102 printk("nvidiafb: DMA Sync lockup\n"); 103 nvidiafb_safe_mode(info); 104 } 105 } 106 107 static void NVDmaKickoff(struct nvidia_par *par) 108 { 109 if (par->dmaCurrent != par->dmaPut) { 110 par->dmaPut = par->dmaCurrent; 111 WRITE_PUT(par, par->dmaPut); 112 } 113 } 114 115 static void NVDmaWait(struct fb_info *info, int size) 116 { 117 struct nvidia_par *par = info->par; 118 int dmaGet; 119 int count = 1000000000, cnt; 120 size++; 121 122 while (par->dmaFree < size && --count && !par->lockup) { 123 dmaGet = READ_GET(par); 124 125 if (par->dmaPut >= dmaGet) { 126 par->dmaFree = par->dmaMax - par->dmaCurrent; 127 if (par->dmaFree < size) { 128 NVDmaNext(par, 0x20000000); 129 if (dmaGet <= SKIPS) { 130 if (par->dmaPut <= SKIPS) 131 WRITE_PUT(par, SKIPS + 1); 132 cnt = 1000000000; 133 do { 134 dmaGet = READ_GET(par); 135 } while (--cnt && dmaGet <= SKIPS); 136 if (!cnt) { 137 printk("DMA Get lockup\n"); 138 par->lockup = 1; 139 } 140 } 141 WRITE_PUT(par, SKIPS); 142 par->dmaCurrent = par->dmaPut = SKIPS; 143 par->dmaFree = dmaGet - (SKIPS + 1); 144 } 145 } else 146 par->dmaFree = dmaGet - par->dmaCurrent - 1; 147 } 148 149 if (!count) { 150 printk("nvidiafb: DMA Wait Lockup\n"); 151 nvidiafb_safe_mode(info); 152 } 153 } 154 155 static void NVSetPattern(struct fb_info *info, u32 clr0, u32 clr1, 156 u32 pat0, u32 pat1) 157 { 158 struct nvidia_par *par = info->par; 159 160 NVDmaStart(info, par, PATTERN_COLOR_0, 4); 161 NVDmaNext(par, clr0); 162 NVDmaNext(par, clr1); 163 NVDmaNext(par, pat0); 164 NVDmaNext(par, pat1); 165 } 166 167 static void NVSetRopSolid(struct fb_info *info, u32 rop, u32 planemask) 168 { 169 struct nvidia_par *par = info->par; 170 171 if (planemask != ~0) { 172 NVSetPattern(info, 0, planemask, ~0, ~0); 173 if (par->currentRop != (rop + 32)) { 174 NVDmaStart(info, par, ROP_SET, 1); 175 NVDmaNext(par, NVCopyROP_PM[rop]); 176 par->currentRop = rop + 32; 177 } 178 } else if (par->currentRop != rop) { 179 if (par->currentRop >= 16) 180 NVSetPattern(info, ~0, ~0, ~0, ~0); 181 NVDmaStart(info, par, ROP_SET, 1); 182 NVDmaNext(par, NVCopyROP[rop]); 183 par->currentRop = rop; 184 } 185 } 186 187 static void NVSetClippingRectangle(struct fb_info *info, int x1, int y1, 188 int x2, int y2) 189 { 190 struct nvidia_par *par = info->par; 191 int h = y2 - y1 + 1; 192 int w = x2 - x1 + 1; 193 194 NVDmaStart(info, par, CLIP_POINT, 2); 195 NVDmaNext(par, (y1 << 16) | x1); 196 NVDmaNext(par, (h << 16) | w); 197 } 198 199 void NVResetGraphics(struct fb_info *info) 200 { 201 struct nvidia_par *par = info->par; 202 u32 surfaceFormat, patternFormat, rectFormat, lineFormat; 203 int pitch, i; 204 205 pitch = info->fix.line_length; 206 207 par->dmaBase = (u32 __iomem *) (&par->FbStart[par->FbUsableSize]); 208 209 for (i = 0; i < SKIPS; i++) 210 NV_WR32(&par->dmaBase[i], 0, 0x00000000); 211 212 NV_WR32(&par->dmaBase[0x0 + SKIPS], 0, 0x00040000); 213 NV_WR32(&par->dmaBase[0x1 + SKIPS], 0, 0x80000010); 214 NV_WR32(&par->dmaBase[0x2 + SKIPS], 0, 0x00042000); 215 NV_WR32(&par->dmaBase[0x3 + SKIPS], 0, 0x80000011); 216 NV_WR32(&par->dmaBase[0x4 + SKIPS], 0, 0x00044000); 217 NV_WR32(&par->dmaBase[0x5 + SKIPS], 0, 0x80000012); 218 NV_WR32(&par->dmaBase[0x6 + SKIPS], 0, 0x00046000); 219 NV_WR32(&par->dmaBase[0x7 + SKIPS], 0, 0x80000013); 220 NV_WR32(&par->dmaBase[0x8 + SKIPS], 0, 0x00048000); 221 NV_WR32(&par->dmaBase[0x9 + SKIPS], 0, 0x80000014); 222 NV_WR32(&par->dmaBase[0xA + SKIPS], 0, 0x0004A000); 223 NV_WR32(&par->dmaBase[0xB + SKIPS], 0, 0x80000015); 224 NV_WR32(&par->dmaBase[0xC + SKIPS], 0, 0x0004C000); 225 NV_WR32(&par->dmaBase[0xD + SKIPS], 0, 0x80000016); 226 NV_WR32(&par->dmaBase[0xE + SKIPS], 0, 0x0004E000); 227 NV_WR32(&par->dmaBase[0xF + SKIPS], 0, 0x80000017); 228 229 par->dmaPut = 0; 230 par->dmaCurrent = 16 + SKIPS; 231 par->dmaMax = 8191; 232 par->dmaFree = par->dmaMax - par->dmaCurrent; 233 234 switch (info->var.bits_per_pixel) { 235 case 32: 236 case 24: 237 surfaceFormat = SURFACE_FORMAT_DEPTH24; 238 patternFormat = PATTERN_FORMAT_DEPTH24; 239 rectFormat = RECT_FORMAT_DEPTH24; 240 lineFormat = LINE_FORMAT_DEPTH24; 241 break; 242 case 16: 243 surfaceFormat = SURFACE_FORMAT_DEPTH16; 244 patternFormat = PATTERN_FORMAT_DEPTH16; 245 rectFormat = RECT_FORMAT_DEPTH16; 246 lineFormat = LINE_FORMAT_DEPTH16; 247 break; 248 default: 249 surfaceFormat = SURFACE_FORMAT_DEPTH8; 250 patternFormat = PATTERN_FORMAT_DEPTH8; 251 rectFormat = RECT_FORMAT_DEPTH8; 252 lineFormat = LINE_FORMAT_DEPTH8; 253 break; 254 } 255 256 NVDmaStart(info, par, SURFACE_FORMAT, 4); 257 NVDmaNext(par, surfaceFormat); 258 NVDmaNext(par, pitch | (pitch << 16)); 259 NVDmaNext(par, 0); 260 NVDmaNext(par, 0); 261 262 NVDmaStart(info, par, PATTERN_FORMAT, 1); 263 NVDmaNext(par, patternFormat); 264 265 NVDmaStart(info, par, RECT_FORMAT, 1); 266 NVDmaNext(par, rectFormat); 267 268 NVDmaStart(info, par, LINE_FORMAT, 1); 269 NVDmaNext(par, lineFormat); 270 271 par->currentRop = ~0; /* set to something invalid */ 272 NVSetRopSolid(info, ROP_COPY, ~0); 273 274 NVSetClippingRectangle(info, 0, 0, info->var.xres_virtual, 275 info->var.yres_virtual); 276 277 NVDmaKickoff(par); 278 } 279 280 int nvidiafb_sync(struct fb_info *info) 281 { 282 struct nvidia_par *par = info->par; 283 284 if (info->state != FBINFO_STATE_RUNNING) 285 return 0; 286 287 if (!par->lockup) 288 NVFlush(info); 289 290 if (!par->lockup) 291 NVSync(info); 292 293 return 0; 294 } 295 296 void nvidiafb_copyarea(struct fb_info *info, const struct fb_copyarea *region) 297 { 298 struct nvidia_par *par = info->par; 299 300 if (info->state != FBINFO_STATE_RUNNING) 301 return; 302 303 if (par->lockup) { 304 cfb_copyarea(info, region); 305 return; 306 } 307 308 NVDmaStart(info, par, BLIT_POINT_SRC, 3); 309 NVDmaNext(par, (region->sy << 16) | region->sx); 310 NVDmaNext(par, (region->dy << 16) | region->dx); 311 NVDmaNext(par, (region->height << 16) | region->width); 312 313 NVDmaKickoff(par); 314 } 315 316 void nvidiafb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 317 { 318 struct nvidia_par *par = info->par; 319 u32 color; 320 321 if (info->state != FBINFO_STATE_RUNNING) 322 return; 323 324 if (par->lockup) { 325 cfb_fillrect(info, rect); 326 return; 327 } 328 329 if (info->var.bits_per_pixel == 8) 330 color = rect->color; 331 else 332 color = ((u32 *) info->pseudo_palette)[rect->color]; 333 334 if (rect->rop != ROP_COPY) 335 NVSetRopSolid(info, rect->rop, ~0); 336 337 NVDmaStart(info, par, RECT_SOLID_COLOR, 1); 338 NVDmaNext(par, color); 339 340 NVDmaStart(info, par, RECT_SOLID_RECTS(0), 2); 341 NVDmaNext(par, (rect->dx << 16) | rect->dy); 342 NVDmaNext(par, (rect->width << 16) | rect->height); 343 344 NVDmaKickoff(par); 345 346 if (rect->rop != ROP_COPY) 347 NVSetRopSolid(info, ROP_COPY, ~0); 348 } 349 350 static void nvidiafb_mono_color_expand(struct fb_info *info, 351 const struct fb_image *image) 352 { 353 struct nvidia_par *par = info->par; 354 u32 fg, bg, mask = ~(~0 >> (32 - info->var.bits_per_pixel)); 355 u32 dsize, width, *data = (u32 *) image->data, tmp; 356 int j, k = 0; 357 358 width = (image->width + 31) & ~31; 359 dsize = (width * image->height) >> 5; 360 361 if (info->var.bits_per_pixel == 8) { 362 fg = image->fg_color | mask; 363 bg = image->bg_color | mask; 364 } else { 365 fg = ((u32 *) info->pseudo_palette)[image->fg_color] | mask; 366 bg = ((u32 *) info->pseudo_palette)[image->bg_color] | mask; 367 } 368 369 NVDmaStart(info, par, RECT_EXPAND_TWO_COLOR_CLIP, 7); 370 NVDmaNext(par, (image->dy << 16) | (image->dx & 0xffff)); 371 NVDmaNext(par, ((image->dy + image->height) << 16) | 372 ((image->dx + image->width) & 0xffff)); 373 NVDmaNext(par, bg); 374 NVDmaNext(par, fg); 375 NVDmaNext(par, (image->height << 16) | width); 376 NVDmaNext(par, (image->height << 16) | width); 377 NVDmaNext(par, (image->dy << 16) | (image->dx & 0xffff)); 378 379 while (dsize >= RECT_EXPAND_TWO_COLOR_DATA_MAX_DWORDS) { 380 NVDmaStart(info, par, RECT_EXPAND_TWO_COLOR_DATA(0), 381 RECT_EXPAND_TWO_COLOR_DATA_MAX_DWORDS); 382 383 for (j = RECT_EXPAND_TWO_COLOR_DATA_MAX_DWORDS; j--;) { 384 tmp = data[k++]; 385 reverse_order(&tmp); 386 NVDmaNext(par, tmp); 387 } 388 389 dsize -= RECT_EXPAND_TWO_COLOR_DATA_MAX_DWORDS; 390 } 391 392 if (dsize) { 393 NVDmaStart(info, par, RECT_EXPAND_TWO_COLOR_DATA(0), dsize); 394 395 for (j = dsize; j--;) { 396 tmp = data[k++]; 397 reverse_order(&tmp); 398 NVDmaNext(par, tmp); 399 } 400 } 401 402 NVDmaKickoff(par); 403 } 404 405 void nvidiafb_imageblit(struct fb_info *info, const struct fb_image *image) 406 { 407 struct nvidia_par *par = info->par; 408 409 if (info->state != FBINFO_STATE_RUNNING) 410 return; 411 412 if (image->depth == 1 && !par->lockup) 413 nvidiafb_mono_color_expand(info, image); 414 else 415 cfb_imageblit(info, image); 416 } 417