mem_helper.c (ccdf741c48db62319539a31bb5ae73a67316b295) | mem_helper.c (8577f354792414a2b24ef72c64730ed0f6bb071e) |
---|---|
1/* 2 * HPPA memory access helper routines 3 * 4 * Copyright (c) 2017 Helge Deller 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either --- 330 unchanged lines hidden (view full) --- 339 * because we record the large page here in the hppa tlb. 340 */ 341 tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK, 342 prot, mmu_idx, TARGET_PAGE_SIZE); 343 return true; 344} 345 346/* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */ | 1/* 2 * HPPA memory access helper routines 3 * 4 * Copyright (c) 2017 Helge Deller 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either --- 330 unchanged lines hidden (view full) --- 339 * because we record the large page here in the hppa tlb. 340 */ 341 tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK, 342 prot, mmu_idx, TARGET_PAGE_SIZE); 343 return true; 344} 345 346/* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */ |
347void HELPER(itlba)(CPUHPPAState *env, target_ulong addr, target_ureg reg) | 347void HELPER(itlba_pa11)(CPUHPPAState *env, target_ulong addr, target_ureg reg) |
348{ 349 HPPATLBEntry *ent; 350 351 /* Zap any old entries covering ADDR. */ 352 addr &= TARGET_PAGE_MASK; 353 hppa_flush_tlb_range(env, addr, addr + TARGET_PAGE_SIZE - 1); 354 355 ent = env->tlb_partial; --- 4 unchanged lines hidden (view full) --- 360 361 /* Note that ent->entry_valid == 0 already. */ 362 ent->itree.start = addr; 363 ent->itree.last = addr + TARGET_PAGE_SIZE - 1; 364 ent->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS; 365 trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa); 366} 367 | 348{ 349 HPPATLBEntry *ent; 350 351 /* Zap any old entries covering ADDR. */ 352 addr &= TARGET_PAGE_MASK; 353 hppa_flush_tlb_range(env, addr, addr + TARGET_PAGE_SIZE - 1); 354 355 ent = env->tlb_partial; --- 4 unchanged lines hidden (view full) --- 360 361 /* Note that ent->entry_valid == 0 already. */ 362 ent->itree.start = addr; 363 ent->itree.last = addr + TARGET_PAGE_SIZE - 1; 364 ent->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS; 365 trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa); 366} 367 |
368static void set_access_bits(CPUHPPAState *env, HPPATLBEntry *ent, target_ureg reg) | 368static void set_access_bits_pa11(CPUHPPAState *env, HPPATLBEntry *ent, 369 target_ureg reg) |
369{ 370 ent->access_id = extract32(reg, 1, 18); 371 ent->u = extract32(reg, 19, 1); 372 ent->ar_pl2 = extract32(reg, 20, 2); 373 ent->ar_pl1 = extract32(reg, 22, 2); 374 ent->ar_type = extract32(reg, 24, 3); 375 ent->b = extract32(reg, 27, 1); 376 ent->d = extract32(reg, 28, 1); 377 ent->t = extract32(reg, 29, 1); 378 ent->entry_valid = 1; 379 380 interval_tree_insert(&ent->itree, &env->tlb_root); 381 trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2, 382 ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t); 383} 384 385/* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */ | 370{ 371 ent->access_id = extract32(reg, 1, 18); 372 ent->u = extract32(reg, 19, 1); 373 ent->ar_pl2 = extract32(reg, 20, 2); 374 ent->ar_pl1 = extract32(reg, 22, 2); 375 ent->ar_type = extract32(reg, 24, 3); 376 ent->b = extract32(reg, 27, 1); 377 ent->d = extract32(reg, 28, 1); 378 ent->t = extract32(reg, 29, 1); 379 ent->entry_valid = 1; 380 381 interval_tree_insert(&ent->itree, &env->tlb_root); 382 trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2, 383 ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t); 384} 385 386/* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */ |
386void HELPER(itlbp)(CPUHPPAState *env, target_ulong addr, target_ureg reg) | 387void HELPER(itlbp_pa11)(CPUHPPAState *env, target_ulong addr, target_ureg reg) |
387{ 388 HPPATLBEntry *ent = env->tlb_partial; 389 390 if (ent) { 391 env->tlb_partial = NULL; 392 if (ent->itree.start <= addr && addr <= ent->itree.last) { | 388{ 389 HPPATLBEntry *ent = env->tlb_partial; 390 391 if (ent) { 392 env->tlb_partial = NULL; 393 if (ent->itree.start <= addr && addr <= ent->itree.last) { |
393 set_access_bits(env, ent, reg); | 394 set_access_bits_pa11(env, ent, reg); |
394 return; 395 } 396 } 397 qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n"); 398} 399 | 395 return; 396 } 397 } 398 qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n"); 399} 400 |
401static void itlbt_pa20(CPUHPPAState *env, target_ureg r1, 402 target_ureg r2, vaddr va_b) 403{ 404 HPPATLBEntry *ent; 405 vaddr va_e; 406 uint64_t va_size; 407 int mask_shift; 408 409 mask_shift = 2 * (r1 & 0xf); 410 va_size = TARGET_PAGE_SIZE << mask_shift; 411 va_b &= -va_size; 412 va_e = va_b + va_size - 1; 413 414 hppa_flush_tlb_range(env, va_b, va_e); 415 ent = hppa_alloc_tlb_ent(env); 416 417 ent->itree.start = va_b; 418 ent->itree.last = va_e; 419 ent->pa = (r1 << 7) & (TARGET_PAGE_MASK << mask_shift); 420 ent->t = extract64(r2, 61, 1); 421 ent->d = extract64(r2, 60, 1); 422 ent->b = extract64(r2, 59, 1); 423 ent->ar_type = extract64(r2, 56, 3); 424 ent->ar_pl1 = extract64(r2, 54, 2); 425 ent->ar_pl2 = extract64(r2, 52, 2); 426 ent->u = extract64(r2, 51, 1); 427 /* o = bit 50 */ 428 /* p = bit 49 */ 429 ent->access_id = extract64(r2, 1, 31); 430 ent->entry_valid = 1; 431 432 interval_tree_insert(&ent->itree, &env->tlb_root); 433 trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa); 434 trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, 435 ent->ar_pl2, ent->ar_pl1, ent->ar_type, 436 ent->b, ent->d, ent->t); 437} 438 439void HELPER(idtlbt_pa20)(CPUHPPAState *env, target_ureg r1, target_ureg r2) 440{ 441 vaddr va_b = deposit64(env->cr[CR_IOR], 32, 32, env->cr[CR_ISR]); 442 itlbt_pa20(env, r1, r2, va_b); 443} 444 445void HELPER(iitlbt_pa20)(CPUHPPAState *env, target_ureg r1, target_ureg r2) 446{ 447 vaddr va_b = deposit64(env->cr[CR_IIAOQ], 32, 32, env->cr[CR_IIASQ]); 448 itlbt_pa20(env, r1, r2, va_b); 449} 450 |
|
400/* Purge (Insn/Data) TLB. This is explicitly page-based, and is 401 synchronous across all processors. */ 402static void ptlb_work(CPUState *cpu, run_on_cpu_data data) 403{ 404 CPUHPPAState *env = cpu_env(cpu); 405 target_ulong addr = (target_ulong) data.target_ptr; 406 407 hppa_flush_tlb_range(env, addr, addr); --- 150 unchanged lines hidden (view full) --- 558 559 /* Force flush of possibly existing BTLB entry. */ 560 hppa_flush_tlb_ent(env, btlb, true); 561 562 /* Create new BTLB entry */ 563 btlb->itree.start = virt_page << TARGET_PAGE_BITS; 564 btlb->itree.last = btlb->itree.start + len * TARGET_PAGE_SIZE - 1; 565 btlb->pa = phys_page << TARGET_PAGE_BITS; | 451/* Purge (Insn/Data) TLB. This is explicitly page-based, and is 452 synchronous across all processors. */ 453static void ptlb_work(CPUState *cpu, run_on_cpu_data data) 454{ 455 CPUHPPAState *env = cpu_env(cpu); 456 target_ulong addr = (target_ulong) data.target_ptr; 457 458 hppa_flush_tlb_range(env, addr, addr); --- 150 unchanged lines hidden (view full) --- 609 610 /* Force flush of possibly existing BTLB entry. */ 611 hppa_flush_tlb_ent(env, btlb, true); 612 613 /* Create new BTLB entry */ 614 btlb->itree.start = virt_page << TARGET_PAGE_BITS; 615 btlb->itree.last = btlb->itree.start + len * TARGET_PAGE_SIZE - 1; 616 btlb->pa = phys_page << TARGET_PAGE_BITS; |
566 set_access_bits(env, btlb, env->gr[20]); | 617 set_access_bits_pa11(env, btlb, env->gr[20]); |
567 btlb->t = 0; 568 btlb->d = 1; 569 } else { 570 env->gr[28] = -10; /* invalid argument */ 571 } 572 break; 573 case 2: 574 /* Purge BTLB entry */ --- 23 unchanged lines hidden --- | 618 btlb->t = 0; 619 btlb->d = 1; 620 } else { 621 env->gr[28] = -10; /* invalid argument */ 622 } 623 break; 624 case 2: 625 /* Purge BTLB entry */ --- 23 unchanged lines hidden --- |