1 /* 2 * Copyright IBM Corp. 2007, 2011 3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/kernel.h> 8 #include <linux/errno.h> 9 #include <linux/gfp.h> 10 #include <linux/mm.h> 11 #include <linux/swap.h> 12 #include <linux/smp.h> 13 #include <linux/highmem.h> 14 #include <linux/pagemap.h> 15 #include <linux/spinlock.h> 16 #include <linux/module.h> 17 #include <linux/quicklist.h> 18 #include <linux/rcupdate.h> 19 #include <linux/slab.h> 20 21 #include <asm/pgtable.h> 22 #include <asm/pgalloc.h> 23 #include <asm/tlb.h> 24 #include <asm/tlbflush.h> 25 #include <asm/mmu_context.h> 26 27 #ifndef CONFIG_64BIT 28 #define ALLOC_ORDER 1 29 #define FRAG_MASK 0x0f 30 #else 31 #define ALLOC_ORDER 2 32 #define FRAG_MASK 0x03 33 #endif 34 35 36 unsigned long *crst_table_alloc(struct mm_struct *mm) 37 { 38 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); 39 40 if (!page) 41 return NULL; 42 return (unsigned long *) page_to_phys(page); 43 } 44 45 void crst_table_free(struct mm_struct *mm, unsigned long *table) 46 { 47 free_pages((unsigned long) table, ALLOC_ORDER); 48 } 49 50 #ifdef CONFIG_64BIT 51 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) 52 { 53 unsigned long *table, *pgd; 54 unsigned long entry; 55 56 BUG_ON(limit > (1UL << 53)); 57 repeat: 58 table = crst_table_alloc(mm); 59 if (!table) 60 return -ENOMEM; 61 spin_lock_bh(&mm->page_table_lock); 62 if (mm->context.asce_limit < limit) { 63 pgd = (unsigned long *) mm->pgd; 64 if (mm->context.asce_limit <= (1UL << 31)) { 65 entry = _REGION3_ENTRY_EMPTY; 66 mm->context.asce_limit = 1UL << 42; 67 mm->context.asce_bits = _ASCE_TABLE_LENGTH | 68 _ASCE_USER_BITS | 69 _ASCE_TYPE_REGION3; 70 } else { 71 entry = _REGION2_ENTRY_EMPTY; 72 mm->context.asce_limit = 1UL << 53; 73 mm->context.asce_bits = _ASCE_TABLE_LENGTH | 74 _ASCE_USER_BITS | 75 _ASCE_TYPE_REGION2; 76 } 77 crst_table_init(table, entry); 78 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd); 79 mm->pgd = (pgd_t *) table; 80 mm->task_size = mm->context.asce_limit; 81 table = NULL; 82 } 83 spin_unlock_bh(&mm->page_table_lock); 84 if (table) 85 crst_table_free(mm, table); 86 if (mm->context.asce_limit < limit) 87 goto repeat; 88 return 0; 89 } 90 91 void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) 92 { 93 pgd_t *pgd; 94 95 while (mm->context.asce_limit > limit) { 96 pgd = mm->pgd; 97 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) { 98 case _REGION_ENTRY_TYPE_R2: 99 mm->context.asce_limit = 1UL << 42; 100 mm->context.asce_bits = _ASCE_TABLE_LENGTH | 101 _ASCE_USER_BITS | 102 _ASCE_TYPE_REGION3; 103 break; 104 case _REGION_ENTRY_TYPE_R3: 105 mm->context.asce_limit = 1UL << 31; 106 mm->context.asce_bits = _ASCE_TABLE_LENGTH | 107 _ASCE_USER_BITS | 108 _ASCE_TYPE_SEGMENT; 109 break; 110 default: 111 BUG(); 112 } 113 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN); 114 mm->task_size = mm->context.asce_limit; 115 crst_table_free(mm, (unsigned long *) pgd); 116 } 117 } 118 #endif 119 120 #ifdef CONFIG_PGSTE 121 122 /** 123 * gmap_alloc - allocate a guest address space 124 * @mm: pointer to the parent mm_struct 125 * 126 * Returns a guest address space structure. 127 */ 128 struct gmap *gmap_alloc(struct mm_struct *mm) 129 { 130 struct gmap *gmap; 131 struct page *page; 132 unsigned long *table; 133 134 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL); 135 if (!gmap) 136 goto out; 137 INIT_LIST_HEAD(&gmap->crst_list); 138 gmap->mm = mm; 139 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); 140 if (!page) 141 goto out_free; 142 list_add(&page->lru, &gmap->crst_list); 143 table = (unsigned long *) page_to_phys(page); 144 crst_table_init(table, _REGION1_ENTRY_EMPTY); 145 gmap->table = table; 146 gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH | 147 _ASCE_USER_BITS | __pa(table); 148 list_add(&gmap->list, &mm->context.gmap_list); 149 return gmap; 150 151 out_free: 152 kfree(gmap); 153 out: 154 return NULL; 155 } 156 EXPORT_SYMBOL_GPL(gmap_alloc); 157 158 static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table) 159 { 160 struct gmap_pgtable *mp; 161 struct gmap_rmap *rmap; 162 struct page *page; 163 164 if (*table & _SEGMENT_ENTRY_INVALID) 165 return 0; 166 page = pfn_to_page(*table >> PAGE_SHIFT); 167 mp = (struct gmap_pgtable *) page->index; 168 list_for_each_entry(rmap, &mp->mapper, list) { 169 if (rmap->entry != table) 170 continue; 171 list_del(&rmap->list); 172 kfree(rmap); 173 break; 174 } 175 *table = mp->vmaddr | _SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_PROTECT; 176 return 1; 177 } 178 179 static void gmap_flush_tlb(struct gmap *gmap) 180 { 181 if (MACHINE_HAS_IDTE) 182 __tlb_flush_idte((unsigned long) gmap->table | 183 _ASCE_TYPE_REGION1); 184 else 185 __tlb_flush_global(); 186 } 187 188 /** 189 * gmap_free - free a guest address space 190 * @gmap: pointer to the guest address space structure 191 */ 192 void gmap_free(struct gmap *gmap) 193 { 194 struct page *page, *next; 195 unsigned long *table; 196 int i; 197 198 199 /* Flush tlb. */ 200 if (MACHINE_HAS_IDTE) 201 __tlb_flush_idte((unsigned long) gmap->table | 202 _ASCE_TYPE_REGION1); 203 else 204 __tlb_flush_global(); 205 206 /* Free all segment & region tables. */ 207 down_read(&gmap->mm->mmap_sem); 208 spin_lock(&gmap->mm->page_table_lock); 209 list_for_each_entry_safe(page, next, &gmap->crst_list, lru) { 210 table = (unsigned long *) page_to_phys(page); 211 if ((*table & _REGION_ENTRY_TYPE_MASK) == 0) 212 /* Remove gmap rmap structures for segment table. */ 213 for (i = 0; i < PTRS_PER_PMD; i++, table++) 214 gmap_unlink_segment(gmap, table); 215 __free_pages(page, ALLOC_ORDER); 216 } 217 spin_unlock(&gmap->mm->page_table_lock); 218 up_read(&gmap->mm->mmap_sem); 219 list_del(&gmap->list); 220 kfree(gmap); 221 } 222 EXPORT_SYMBOL_GPL(gmap_free); 223 224 /** 225 * gmap_enable - switch primary space to the guest address space 226 * @gmap: pointer to the guest address space structure 227 */ 228 void gmap_enable(struct gmap *gmap) 229 { 230 S390_lowcore.gmap = (unsigned long) gmap; 231 } 232 EXPORT_SYMBOL_GPL(gmap_enable); 233 234 /** 235 * gmap_disable - switch back to the standard primary address space 236 * @gmap: pointer to the guest address space structure 237 */ 238 void gmap_disable(struct gmap *gmap) 239 { 240 S390_lowcore.gmap = 0UL; 241 } 242 EXPORT_SYMBOL_GPL(gmap_disable); 243 244 /* 245 * gmap_alloc_table is assumed to be called with mmap_sem held 246 */ 247 static int gmap_alloc_table(struct gmap *gmap, 248 unsigned long *table, unsigned long init) 249 __releases(&gmap->mm->page_table_lock) 250 __acquires(&gmap->mm->page_table_lock) 251 { 252 struct page *page; 253 unsigned long *new; 254 255 /* since we dont free the gmap table until gmap_free we can unlock */ 256 spin_unlock(&gmap->mm->page_table_lock); 257 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); 258 spin_lock(&gmap->mm->page_table_lock); 259 if (!page) 260 return -ENOMEM; 261 new = (unsigned long *) page_to_phys(page); 262 crst_table_init(new, init); 263 if (*table & _REGION_ENTRY_INVALID) { 264 list_add(&page->lru, &gmap->crst_list); 265 *table = (unsigned long) new | _REGION_ENTRY_LENGTH | 266 (*table & _REGION_ENTRY_TYPE_MASK); 267 } else 268 __free_pages(page, ALLOC_ORDER); 269 return 0; 270 } 271 272 /** 273 * gmap_unmap_segment - unmap segment from the guest address space 274 * @gmap: pointer to the guest address space structure 275 * @addr: address in the guest address space 276 * @len: length of the memory area to unmap 277 * 278 * Returns 0 if the unmap succeded, -EINVAL if not. 279 */ 280 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len) 281 { 282 unsigned long *table; 283 unsigned long off; 284 int flush; 285 286 if ((to | len) & (PMD_SIZE - 1)) 287 return -EINVAL; 288 if (len == 0 || to + len < to) 289 return -EINVAL; 290 291 flush = 0; 292 down_read(&gmap->mm->mmap_sem); 293 spin_lock(&gmap->mm->page_table_lock); 294 for (off = 0; off < len; off += PMD_SIZE) { 295 /* Walk the guest addr space page table */ 296 table = gmap->table + (((to + off) >> 53) & 0x7ff); 297 if (*table & _REGION_ENTRY_INVALID) 298 goto out; 299 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 300 table = table + (((to + off) >> 42) & 0x7ff); 301 if (*table & _REGION_ENTRY_INVALID) 302 goto out; 303 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 304 table = table + (((to + off) >> 31) & 0x7ff); 305 if (*table & _REGION_ENTRY_INVALID) 306 goto out; 307 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 308 table = table + (((to + off) >> 20) & 0x7ff); 309 310 /* Clear segment table entry in guest address space. */ 311 flush |= gmap_unlink_segment(gmap, table); 312 *table = _SEGMENT_ENTRY_INVALID; 313 } 314 out: 315 spin_unlock(&gmap->mm->page_table_lock); 316 up_read(&gmap->mm->mmap_sem); 317 if (flush) 318 gmap_flush_tlb(gmap); 319 return 0; 320 } 321 EXPORT_SYMBOL_GPL(gmap_unmap_segment); 322 323 /** 324 * gmap_mmap_segment - map a segment to the guest address space 325 * @gmap: pointer to the guest address space structure 326 * @from: source address in the parent address space 327 * @to: target address in the guest address space 328 * 329 * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not. 330 */ 331 int gmap_map_segment(struct gmap *gmap, unsigned long from, 332 unsigned long to, unsigned long len) 333 { 334 unsigned long *table; 335 unsigned long off; 336 int flush; 337 338 if ((from | to | len) & (PMD_SIZE - 1)) 339 return -EINVAL; 340 if (len == 0 || from + len > TASK_MAX_SIZE || 341 from + len < from || to + len < to) 342 return -EINVAL; 343 344 flush = 0; 345 down_read(&gmap->mm->mmap_sem); 346 spin_lock(&gmap->mm->page_table_lock); 347 for (off = 0; off < len; off += PMD_SIZE) { 348 /* Walk the gmap address space page table */ 349 table = gmap->table + (((to + off) >> 53) & 0x7ff); 350 if ((*table & _REGION_ENTRY_INVALID) && 351 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY)) 352 goto out_unmap; 353 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 354 table = table + (((to + off) >> 42) & 0x7ff); 355 if ((*table & _REGION_ENTRY_INVALID) && 356 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY)) 357 goto out_unmap; 358 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 359 table = table + (((to + off) >> 31) & 0x7ff); 360 if ((*table & _REGION_ENTRY_INVALID) && 361 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY)) 362 goto out_unmap; 363 table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN); 364 table = table + (((to + off) >> 20) & 0x7ff); 365 366 /* Store 'from' address in an invalid segment table entry. */ 367 flush |= gmap_unlink_segment(gmap, table); 368 *table = (from + off) | (_SEGMENT_ENTRY_INVALID | 369 _SEGMENT_ENTRY_PROTECT); 370 } 371 spin_unlock(&gmap->mm->page_table_lock); 372 up_read(&gmap->mm->mmap_sem); 373 if (flush) 374 gmap_flush_tlb(gmap); 375 return 0; 376 377 out_unmap: 378 spin_unlock(&gmap->mm->page_table_lock); 379 up_read(&gmap->mm->mmap_sem); 380 gmap_unmap_segment(gmap, to, len); 381 return -ENOMEM; 382 } 383 EXPORT_SYMBOL_GPL(gmap_map_segment); 384 385 static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap) 386 { 387 unsigned long *table; 388 389 table = gmap->table + ((address >> 53) & 0x7ff); 390 if (unlikely(*table & _REGION_ENTRY_INVALID)) 391 return ERR_PTR(-EFAULT); 392 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 393 table = table + ((address >> 42) & 0x7ff); 394 if (unlikely(*table & _REGION_ENTRY_INVALID)) 395 return ERR_PTR(-EFAULT); 396 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 397 table = table + ((address >> 31) & 0x7ff); 398 if (unlikely(*table & _REGION_ENTRY_INVALID)) 399 return ERR_PTR(-EFAULT); 400 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 401 table = table + ((address >> 20) & 0x7ff); 402 return table; 403 } 404 405 /** 406 * __gmap_translate - translate a guest address to a user space address 407 * @address: guest address 408 * @gmap: pointer to guest mapping meta data structure 409 * 410 * Returns user space address which corresponds to the guest address or 411 * -EFAULT if no such mapping exists. 412 * This function does not establish potentially missing page table entries. 413 * The mmap_sem of the mm that belongs to the address space must be held 414 * when this function gets called. 415 */ 416 unsigned long __gmap_translate(unsigned long address, struct gmap *gmap) 417 { 418 unsigned long *segment_ptr, vmaddr, segment; 419 struct gmap_pgtable *mp; 420 struct page *page; 421 422 current->thread.gmap_addr = address; 423 segment_ptr = gmap_table_walk(address, gmap); 424 if (IS_ERR(segment_ptr)) 425 return PTR_ERR(segment_ptr); 426 /* Convert the gmap address to an mm address. */ 427 segment = *segment_ptr; 428 if (!(segment & _SEGMENT_ENTRY_INVALID)) { 429 page = pfn_to_page(segment >> PAGE_SHIFT); 430 mp = (struct gmap_pgtable *) page->index; 431 return mp->vmaddr | (address & ~PMD_MASK); 432 } else if (segment & _SEGMENT_ENTRY_PROTECT) { 433 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN; 434 return vmaddr | (address & ~PMD_MASK); 435 } 436 return -EFAULT; 437 } 438 EXPORT_SYMBOL_GPL(__gmap_translate); 439 440 /** 441 * gmap_translate - translate a guest address to a user space address 442 * @address: guest address 443 * @gmap: pointer to guest mapping meta data structure 444 * 445 * Returns user space address which corresponds to the guest address or 446 * -EFAULT if no such mapping exists. 447 * This function does not establish potentially missing page table entries. 448 */ 449 unsigned long gmap_translate(unsigned long address, struct gmap *gmap) 450 { 451 unsigned long rc; 452 453 down_read(&gmap->mm->mmap_sem); 454 rc = __gmap_translate(address, gmap); 455 up_read(&gmap->mm->mmap_sem); 456 return rc; 457 } 458 EXPORT_SYMBOL_GPL(gmap_translate); 459 460 static int gmap_connect_pgtable(unsigned long address, unsigned long segment, 461 unsigned long *segment_ptr, struct gmap *gmap) 462 { 463 unsigned long vmaddr; 464 struct vm_area_struct *vma; 465 struct gmap_pgtable *mp; 466 struct gmap_rmap *rmap; 467 struct mm_struct *mm; 468 struct page *page; 469 pgd_t *pgd; 470 pud_t *pud; 471 pmd_t *pmd; 472 473 mm = gmap->mm; 474 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN; 475 vma = find_vma(mm, vmaddr); 476 if (!vma || vma->vm_start > vmaddr) 477 return -EFAULT; 478 /* Walk the parent mm page table */ 479 pgd = pgd_offset(mm, vmaddr); 480 pud = pud_alloc(mm, pgd, vmaddr); 481 if (!pud) 482 return -ENOMEM; 483 pmd = pmd_alloc(mm, pud, vmaddr); 484 if (!pmd) 485 return -ENOMEM; 486 if (!pmd_present(*pmd) && 487 __pte_alloc(mm, vma, pmd, vmaddr)) 488 return -ENOMEM; 489 /* pmd now points to a valid segment table entry. */ 490 rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT); 491 if (!rmap) 492 return -ENOMEM; 493 /* Link gmap segment table entry location to page table. */ 494 page = pmd_page(*pmd); 495 mp = (struct gmap_pgtable *) page->index; 496 rmap->gmap = gmap; 497 rmap->entry = segment_ptr; 498 rmap->vmaddr = address & PMD_MASK; 499 spin_lock(&mm->page_table_lock); 500 if (*segment_ptr == segment) { 501 list_add(&rmap->list, &mp->mapper); 502 /* Set gmap segment table entry to page table. */ 503 *segment_ptr = pmd_val(*pmd) & PAGE_MASK; 504 rmap = NULL; 505 } 506 spin_unlock(&mm->page_table_lock); 507 kfree(rmap); 508 return 0; 509 } 510 511 static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table) 512 { 513 struct gmap_rmap *rmap, *next; 514 struct gmap_pgtable *mp; 515 struct page *page; 516 int flush; 517 518 flush = 0; 519 spin_lock(&mm->page_table_lock); 520 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 521 mp = (struct gmap_pgtable *) page->index; 522 list_for_each_entry_safe(rmap, next, &mp->mapper, list) { 523 *rmap->entry = mp->vmaddr | (_SEGMENT_ENTRY_INVALID | 524 _SEGMENT_ENTRY_PROTECT); 525 list_del(&rmap->list); 526 kfree(rmap); 527 flush = 1; 528 } 529 spin_unlock(&mm->page_table_lock); 530 if (flush) 531 __tlb_flush_global(); 532 } 533 534 /* 535 * this function is assumed to be called with mmap_sem held 536 */ 537 unsigned long __gmap_fault(unsigned long address, struct gmap *gmap) 538 { 539 unsigned long *segment_ptr, segment; 540 struct gmap_pgtable *mp; 541 struct page *page; 542 int rc; 543 544 current->thread.gmap_addr = address; 545 segment_ptr = gmap_table_walk(address, gmap); 546 if (IS_ERR(segment_ptr)) 547 return -EFAULT; 548 /* Convert the gmap address to an mm address. */ 549 while (1) { 550 segment = *segment_ptr; 551 if (!(segment & _SEGMENT_ENTRY_INVALID)) { 552 /* Page table is present */ 553 page = pfn_to_page(segment >> PAGE_SHIFT); 554 mp = (struct gmap_pgtable *) page->index; 555 return mp->vmaddr | (address & ~PMD_MASK); 556 } 557 if (!(segment & _SEGMENT_ENTRY_PROTECT)) 558 /* Nothing mapped in the gmap address space. */ 559 break; 560 rc = gmap_connect_pgtable(address, segment, segment_ptr, gmap); 561 if (rc) 562 return rc; 563 } 564 return -EFAULT; 565 } 566 567 unsigned long gmap_fault(unsigned long address, struct gmap *gmap) 568 { 569 unsigned long rc; 570 571 down_read(&gmap->mm->mmap_sem); 572 rc = __gmap_fault(address, gmap); 573 up_read(&gmap->mm->mmap_sem); 574 575 return rc; 576 } 577 EXPORT_SYMBOL_GPL(gmap_fault); 578 579 void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap) 580 { 581 582 unsigned long *table, address, size; 583 struct vm_area_struct *vma; 584 struct gmap_pgtable *mp; 585 struct page *page; 586 587 down_read(&gmap->mm->mmap_sem); 588 address = from; 589 while (address < to) { 590 /* Walk the gmap address space page table */ 591 table = gmap->table + ((address >> 53) & 0x7ff); 592 if (unlikely(*table & _REGION_ENTRY_INVALID)) { 593 address = (address + PMD_SIZE) & PMD_MASK; 594 continue; 595 } 596 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 597 table = table + ((address >> 42) & 0x7ff); 598 if (unlikely(*table & _REGION_ENTRY_INVALID)) { 599 address = (address + PMD_SIZE) & PMD_MASK; 600 continue; 601 } 602 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 603 table = table + ((address >> 31) & 0x7ff); 604 if (unlikely(*table & _REGION_ENTRY_INVALID)) { 605 address = (address + PMD_SIZE) & PMD_MASK; 606 continue; 607 } 608 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 609 table = table + ((address >> 20) & 0x7ff); 610 if (unlikely(*table & _SEGMENT_ENTRY_INVALID)) { 611 address = (address + PMD_SIZE) & PMD_MASK; 612 continue; 613 } 614 page = pfn_to_page(*table >> PAGE_SHIFT); 615 mp = (struct gmap_pgtable *) page->index; 616 vma = find_vma(gmap->mm, mp->vmaddr); 617 size = min(to - address, PMD_SIZE - (address & ~PMD_MASK)); 618 zap_page_range(vma, mp->vmaddr | (address & ~PMD_MASK), 619 size, NULL); 620 address = (address + PMD_SIZE) & PMD_MASK; 621 } 622 up_read(&gmap->mm->mmap_sem); 623 } 624 EXPORT_SYMBOL_GPL(gmap_discard); 625 626 static LIST_HEAD(gmap_notifier_list); 627 static DEFINE_SPINLOCK(gmap_notifier_lock); 628 629 /** 630 * gmap_register_ipte_notifier - register a pte invalidation callback 631 * @nb: pointer to the gmap notifier block 632 */ 633 void gmap_register_ipte_notifier(struct gmap_notifier *nb) 634 { 635 spin_lock(&gmap_notifier_lock); 636 list_add(&nb->list, &gmap_notifier_list); 637 spin_unlock(&gmap_notifier_lock); 638 } 639 EXPORT_SYMBOL_GPL(gmap_register_ipte_notifier); 640 641 /** 642 * gmap_unregister_ipte_notifier - remove a pte invalidation callback 643 * @nb: pointer to the gmap notifier block 644 */ 645 void gmap_unregister_ipte_notifier(struct gmap_notifier *nb) 646 { 647 spin_lock(&gmap_notifier_lock); 648 list_del_init(&nb->list); 649 spin_unlock(&gmap_notifier_lock); 650 } 651 EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier); 652 653 /** 654 * gmap_ipte_notify - mark a range of ptes for invalidation notification 655 * @gmap: pointer to guest mapping meta data structure 656 * @address: virtual address in the guest address space 657 * @len: size of area 658 * 659 * Returns 0 if for each page in the given range a gmap mapping exists and 660 * the invalidation notification could be set. If the gmap mapping is missing 661 * for one or more pages -EFAULT is returned. If no memory could be allocated 662 * -ENOMEM is returned. This function establishes missing page table entries. 663 */ 664 int gmap_ipte_notify(struct gmap *gmap, unsigned long start, unsigned long len) 665 { 666 unsigned long addr; 667 spinlock_t *ptl; 668 pte_t *ptep, entry; 669 pgste_t pgste; 670 int rc = 0; 671 672 if ((start & ~PAGE_MASK) || (len & ~PAGE_MASK)) 673 return -EINVAL; 674 down_read(&gmap->mm->mmap_sem); 675 while (len) { 676 /* Convert gmap address and connect the page tables */ 677 addr = __gmap_fault(start, gmap); 678 if (IS_ERR_VALUE(addr)) { 679 rc = addr; 680 break; 681 } 682 /* Get the page mapped */ 683 if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE)) { 684 rc = -EFAULT; 685 break; 686 } 687 /* Walk the process page table, lock and get pte pointer */ 688 ptep = get_locked_pte(gmap->mm, addr, &ptl); 689 if (unlikely(!ptep)) 690 continue; 691 /* Set notification bit in the pgste of the pte */ 692 entry = *ptep; 693 if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) { 694 pgste = pgste_get_lock(ptep); 695 pgste_val(pgste) |= PGSTE_IN_BIT; 696 pgste_set_unlock(ptep, pgste); 697 start += PAGE_SIZE; 698 len -= PAGE_SIZE; 699 } 700 spin_unlock(ptl); 701 } 702 up_read(&gmap->mm->mmap_sem); 703 return rc; 704 } 705 EXPORT_SYMBOL_GPL(gmap_ipte_notify); 706 707 /** 708 * gmap_do_ipte_notify - call all invalidation callbacks for a specific pte. 709 * @mm: pointer to the process mm_struct 710 * @addr: virtual address in the process address space 711 * @pte: pointer to the page table entry 712 * 713 * This function is assumed to be called with the page table lock held 714 * for the pte to notify. 715 */ 716 void gmap_do_ipte_notify(struct mm_struct *mm, unsigned long addr, pte_t *pte) 717 { 718 unsigned long segment_offset; 719 struct gmap_notifier *nb; 720 struct gmap_pgtable *mp; 721 struct gmap_rmap *rmap; 722 struct page *page; 723 724 segment_offset = ((unsigned long) pte) & (255 * sizeof(pte_t)); 725 segment_offset = segment_offset * (4096 / sizeof(pte_t)); 726 page = pfn_to_page(__pa(pte) >> PAGE_SHIFT); 727 mp = (struct gmap_pgtable *) page->index; 728 spin_lock(&gmap_notifier_lock); 729 list_for_each_entry(rmap, &mp->mapper, list) { 730 list_for_each_entry(nb, &gmap_notifier_list, list) 731 nb->notifier_call(rmap->gmap, 732 rmap->vmaddr + segment_offset); 733 } 734 spin_unlock(&gmap_notifier_lock); 735 } 736 737 static inline int page_table_with_pgste(struct page *page) 738 { 739 return atomic_read(&page->_mapcount) == 0; 740 } 741 742 static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, 743 unsigned long vmaddr) 744 { 745 struct page *page; 746 unsigned long *table; 747 struct gmap_pgtable *mp; 748 749 page = alloc_page(GFP_KERNEL|__GFP_REPEAT); 750 if (!page) 751 return NULL; 752 mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT); 753 if (!mp) { 754 __free_page(page); 755 return NULL; 756 } 757 pgtable_page_ctor(page); 758 mp->vmaddr = vmaddr & PMD_MASK; 759 INIT_LIST_HEAD(&mp->mapper); 760 page->index = (unsigned long) mp; 761 atomic_set(&page->_mapcount, 0); 762 table = (unsigned long *) page_to_phys(page); 763 clear_table(table, _PAGE_INVALID, PAGE_SIZE/2); 764 clear_table(table + PTRS_PER_PTE, PGSTE_HR_BIT | PGSTE_HC_BIT, 765 PAGE_SIZE/2); 766 return table; 767 } 768 769 static inline void page_table_free_pgste(unsigned long *table) 770 { 771 struct page *page; 772 struct gmap_pgtable *mp; 773 774 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 775 mp = (struct gmap_pgtable *) page->index; 776 BUG_ON(!list_empty(&mp->mapper)); 777 pgtable_page_dtor(page); 778 atomic_set(&page->_mapcount, -1); 779 kfree(mp); 780 __free_page(page); 781 } 782 783 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, 784 unsigned long key, bool nq) 785 { 786 spinlock_t *ptl; 787 pgste_t old, new; 788 pte_t *ptep; 789 790 down_read(&mm->mmap_sem); 791 ptep = get_locked_pte(current->mm, addr, &ptl); 792 if (unlikely(!ptep)) { 793 up_read(&mm->mmap_sem); 794 return -EFAULT; 795 } 796 797 new = old = pgste_get_lock(ptep); 798 pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT | 799 PGSTE_ACC_BITS | PGSTE_FP_BIT); 800 pgste_val(new) |= (key & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48; 801 pgste_val(new) |= (key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; 802 if (!(pte_val(*ptep) & _PAGE_INVALID)) { 803 unsigned long address, bits, skey; 804 805 address = pte_val(*ptep) & PAGE_MASK; 806 skey = (unsigned long) page_get_storage_key(address); 807 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); 808 skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT); 809 /* Set storage key ACC and FP */ 810 page_set_storage_key(address, skey, !nq); 811 /* Merge host changed & referenced into pgste */ 812 pgste_val(new) |= bits << 52; 813 } 814 /* changing the guest storage key is considered a change of the page */ 815 if ((pgste_val(new) ^ pgste_val(old)) & 816 (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT)) 817 pgste_val(new) |= PGSTE_HC_BIT; 818 819 pgste_set_unlock(ptep, new); 820 pte_unmap_unlock(*ptep, ptl); 821 up_read(&mm->mmap_sem); 822 return 0; 823 } 824 EXPORT_SYMBOL(set_guest_storage_key); 825 826 #else /* CONFIG_PGSTE */ 827 828 static inline int page_table_with_pgste(struct page *page) 829 { 830 return 0; 831 } 832 833 static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, 834 unsigned long vmaddr) 835 { 836 return NULL; 837 } 838 839 static inline void page_table_free_pgste(unsigned long *table) 840 { 841 } 842 843 static inline void gmap_disconnect_pgtable(struct mm_struct *mm, 844 unsigned long *table) 845 { 846 } 847 848 #endif /* CONFIG_PGSTE */ 849 850 static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits) 851 { 852 unsigned int old, new; 853 854 do { 855 old = atomic_read(v); 856 new = old ^ bits; 857 } while (atomic_cmpxchg(v, old, new) != old); 858 return new; 859 } 860 861 /* 862 * page table entry allocation/free routines. 863 */ 864 unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr) 865 { 866 unsigned long *uninitialized_var(table); 867 struct page *uninitialized_var(page); 868 unsigned int mask, bit; 869 870 if (mm_has_pgste(mm)) 871 return page_table_alloc_pgste(mm, vmaddr); 872 /* Allocate fragments of a 4K page as 1K/2K page table */ 873 spin_lock_bh(&mm->context.list_lock); 874 mask = FRAG_MASK; 875 if (!list_empty(&mm->context.pgtable_list)) { 876 page = list_first_entry(&mm->context.pgtable_list, 877 struct page, lru); 878 table = (unsigned long *) page_to_phys(page); 879 mask = atomic_read(&page->_mapcount); 880 mask = mask | (mask >> 4); 881 } 882 if ((mask & FRAG_MASK) == FRAG_MASK) { 883 spin_unlock_bh(&mm->context.list_lock); 884 page = alloc_page(GFP_KERNEL|__GFP_REPEAT); 885 if (!page) 886 return NULL; 887 pgtable_page_ctor(page); 888 atomic_set(&page->_mapcount, 1); 889 table = (unsigned long *) page_to_phys(page); 890 clear_table(table, _PAGE_INVALID, PAGE_SIZE); 891 spin_lock_bh(&mm->context.list_lock); 892 list_add(&page->lru, &mm->context.pgtable_list); 893 } else { 894 for (bit = 1; mask & bit; bit <<= 1) 895 table += PTRS_PER_PTE; 896 mask = atomic_xor_bits(&page->_mapcount, bit); 897 if ((mask & FRAG_MASK) == FRAG_MASK) 898 list_del(&page->lru); 899 } 900 spin_unlock_bh(&mm->context.list_lock); 901 return table; 902 } 903 904 void page_table_free(struct mm_struct *mm, unsigned long *table) 905 { 906 struct page *page; 907 unsigned int bit, mask; 908 909 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 910 if (page_table_with_pgste(page)) { 911 gmap_disconnect_pgtable(mm, table); 912 return page_table_free_pgste(table); 913 } 914 /* Free 1K/2K page table fragment of a 4K page */ 915 bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t))); 916 spin_lock_bh(&mm->context.list_lock); 917 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK) 918 list_del(&page->lru); 919 mask = atomic_xor_bits(&page->_mapcount, bit); 920 if (mask & FRAG_MASK) 921 list_add(&page->lru, &mm->context.pgtable_list); 922 spin_unlock_bh(&mm->context.list_lock); 923 if (mask == 0) { 924 pgtable_page_dtor(page); 925 atomic_set(&page->_mapcount, -1); 926 __free_page(page); 927 } 928 } 929 930 static void __page_table_free_rcu(void *table, unsigned bit) 931 { 932 struct page *page; 933 934 if (bit == FRAG_MASK) 935 return page_table_free_pgste(table); 936 /* Free 1K/2K page table fragment of a 4K page */ 937 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 938 if (atomic_xor_bits(&page->_mapcount, bit) == 0) { 939 pgtable_page_dtor(page); 940 atomic_set(&page->_mapcount, -1); 941 __free_page(page); 942 } 943 } 944 945 void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table) 946 { 947 struct mm_struct *mm; 948 struct page *page; 949 unsigned int bit, mask; 950 951 mm = tlb->mm; 952 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 953 if (page_table_with_pgste(page)) { 954 gmap_disconnect_pgtable(mm, table); 955 table = (unsigned long *) (__pa(table) | FRAG_MASK); 956 tlb_remove_table(tlb, table); 957 return; 958 } 959 bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t))); 960 spin_lock_bh(&mm->context.list_lock); 961 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK) 962 list_del(&page->lru); 963 mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4)); 964 if (mask & FRAG_MASK) 965 list_add_tail(&page->lru, &mm->context.pgtable_list); 966 spin_unlock_bh(&mm->context.list_lock); 967 table = (unsigned long *) (__pa(table) | (bit << 4)); 968 tlb_remove_table(tlb, table); 969 } 970 971 static void __tlb_remove_table(void *_table) 972 { 973 const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK; 974 void *table = (void *)((unsigned long) _table & ~mask); 975 unsigned type = (unsigned long) _table & mask; 976 977 if (type) 978 __page_table_free_rcu(table, type); 979 else 980 free_pages((unsigned long) table, ALLOC_ORDER); 981 } 982 983 static void tlb_remove_table_smp_sync(void *arg) 984 { 985 /* Simply deliver the interrupt */ 986 } 987 988 static void tlb_remove_table_one(void *table) 989 { 990 /* 991 * This isn't an RCU grace period and hence the page-tables cannot be 992 * assumed to be actually RCU-freed. 993 * 994 * It is however sufficient for software page-table walkers that rely 995 * on IRQ disabling. See the comment near struct mmu_table_batch. 996 */ 997 smp_call_function(tlb_remove_table_smp_sync, NULL, 1); 998 __tlb_remove_table(table); 999 } 1000 1001 static void tlb_remove_table_rcu(struct rcu_head *head) 1002 { 1003 struct mmu_table_batch *batch; 1004 int i; 1005 1006 batch = container_of(head, struct mmu_table_batch, rcu); 1007 1008 for (i = 0; i < batch->nr; i++) 1009 __tlb_remove_table(batch->tables[i]); 1010 1011 free_page((unsigned long)batch); 1012 } 1013 1014 void tlb_table_flush(struct mmu_gather *tlb) 1015 { 1016 struct mmu_table_batch **batch = &tlb->batch; 1017 1018 if (*batch) { 1019 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); 1020 *batch = NULL; 1021 } 1022 } 1023 1024 void tlb_remove_table(struct mmu_gather *tlb, void *table) 1025 { 1026 struct mmu_table_batch **batch = &tlb->batch; 1027 1028 tlb->mm->context.flush_mm = 1; 1029 if (*batch == NULL) { 1030 *batch = (struct mmu_table_batch *) 1031 __get_free_page(GFP_NOWAIT | __GFP_NOWARN); 1032 if (*batch == NULL) { 1033 __tlb_flush_mm_lazy(tlb->mm); 1034 tlb_remove_table_one(table); 1035 return; 1036 } 1037 (*batch)->nr = 0; 1038 } 1039 (*batch)->tables[(*batch)->nr++] = table; 1040 if ((*batch)->nr == MAX_TABLE_BATCH) 1041 tlb_flush_mmu(tlb); 1042 } 1043 1044 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1045 static inline void thp_split_vma(struct vm_area_struct *vma) 1046 { 1047 unsigned long addr; 1048 1049 for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) 1050 follow_page(vma, addr, FOLL_SPLIT); 1051 } 1052 1053 static inline void thp_split_mm(struct mm_struct *mm) 1054 { 1055 struct vm_area_struct *vma; 1056 1057 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) { 1058 thp_split_vma(vma); 1059 vma->vm_flags &= ~VM_HUGEPAGE; 1060 vma->vm_flags |= VM_NOHUGEPAGE; 1061 } 1062 mm->def_flags |= VM_NOHUGEPAGE; 1063 } 1064 #else 1065 static inline void thp_split_mm(struct mm_struct *mm) 1066 { 1067 } 1068 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1069 1070 static unsigned long page_table_realloc_pmd(struct mmu_gather *tlb, 1071 struct mm_struct *mm, pud_t *pud, 1072 unsigned long addr, unsigned long end) 1073 { 1074 unsigned long next, *table, *new; 1075 struct page *page; 1076 pmd_t *pmd; 1077 1078 pmd = pmd_offset(pud, addr); 1079 do { 1080 next = pmd_addr_end(addr, end); 1081 again: 1082 if (pmd_none_or_clear_bad(pmd)) 1083 continue; 1084 table = (unsigned long *) pmd_deref(*pmd); 1085 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 1086 if (page_table_with_pgste(page)) 1087 continue; 1088 /* Allocate new page table with pgstes */ 1089 new = page_table_alloc_pgste(mm, addr); 1090 if (!new) { 1091 mm->context.has_pgste = 0; 1092 continue; 1093 } 1094 spin_lock(&mm->page_table_lock); 1095 if (likely((unsigned long *) pmd_deref(*pmd) == table)) { 1096 /* Nuke pmd entry pointing to the "short" page table */ 1097 pmdp_flush_lazy(mm, addr, pmd); 1098 pmd_clear(pmd); 1099 /* Copy ptes from old table to new table */ 1100 memcpy(new, table, PAGE_SIZE/2); 1101 clear_table(table, _PAGE_INVALID, PAGE_SIZE/2); 1102 /* Establish new table */ 1103 pmd_populate(mm, pmd, (pte_t *) new); 1104 /* Free old table with rcu, there might be a walker! */ 1105 page_table_free_rcu(tlb, table); 1106 new = NULL; 1107 } 1108 spin_unlock(&mm->page_table_lock); 1109 if (new) { 1110 page_table_free_pgste(new); 1111 goto again; 1112 } 1113 } while (pmd++, addr = next, addr != end); 1114 1115 return addr; 1116 } 1117 1118 static unsigned long page_table_realloc_pud(struct mmu_gather *tlb, 1119 struct mm_struct *mm, pgd_t *pgd, 1120 unsigned long addr, unsigned long end) 1121 { 1122 unsigned long next; 1123 pud_t *pud; 1124 1125 pud = pud_offset(pgd, addr); 1126 do { 1127 next = pud_addr_end(addr, end); 1128 if (pud_none_or_clear_bad(pud)) 1129 continue; 1130 next = page_table_realloc_pmd(tlb, mm, pud, addr, next); 1131 } while (pud++, addr = next, addr != end); 1132 1133 return addr; 1134 } 1135 1136 static void page_table_realloc(struct mmu_gather *tlb, struct mm_struct *mm, 1137 unsigned long addr, unsigned long end) 1138 { 1139 unsigned long next; 1140 pgd_t *pgd; 1141 1142 pgd = pgd_offset(mm, addr); 1143 do { 1144 next = pgd_addr_end(addr, end); 1145 if (pgd_none_or_clear_bad(pgd)) 1146 continue; 1147 next = page_table_realloc_pud(tlb, mm, pgd, addr, next); 1148 } while (pgd++, addr = next, addr != end); 1149 } 1150 1151 /* 1152 * switch on pgstes for its userspace process (for kvm) 1153 */ 1154 int s390_enable_sie(void) 1155 { 1156 struct task_struct *tsk = current; 1157 struct mm_struct *mm = tsk->mm; 1158 struct mmu_gather tlb; 1159 1160 /* Do we have switched amode? If no, we cannot do sie */ 1161 if (s390_user_mode == HOME_SPACE_MODE) 1162 return -EINVAL; 1163 1164 /* Do we have pgstes? if yes, we are done */ 1165 if (mm_has_pgste(tsk->mm)) 1166 return 0; 1167 1168 down_write(&mm->mmap_sem); 1169 /* split thp mappings and disable thp for future mappings */ 1170 thp_split_mm(mm); 1171 /* Reallocate the page tables with pgstes */ 1172 mm->context.has_pgste = 1; 1173 tlb_gather_mmu(&tlb, mm, 0, TASK_SIZE); 1174 page_table_realloc(&tlb, mm, 0, TASK_SIZE); 1175 tlb_finish_mmu(&tlb, 0, TASK_SIZE); 1176 up_write(&mm->mmap_sem); 1177 return mm->context.has_pgste ? 0 : -ENOMEM; 1178 } 1179 EXPORT_SYMBOL_GPL(s390_enable_sie); 1180 1181 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1182 int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address, 1183 pmd_t *pmdp) 1184 { 1185 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 1186 /* No need to flush TLB 1187 * On s390 reference bits are in storage key and never in TLB */ 1188 return pmdp_test_and_clear_young(vma, address, pmdp); 1189 } 1190 1191 int pmdp_set_access_flags(struct vm_area_struct *vma, 1192 unsigned long address, pmd_t *pmdp, 1193 pmd_t entry, int dirty) 1194 { 1195 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 1196 1197 if (pmd_same(*pmdp, entry)) 1198 return 0; 1199 pmdp_invalidate(vma, address, pmdp); 1200 set_pmd_at(vma->vm_mm, address, pmdp, entry); 1201 return 1; 1202 } 1203 1204 static void pmdp_splitting_flush_sync(void *arg) 1205 { 1206 /* Simply deliver the interrupt */ 1207 } 1208 1209 void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, 1210 pmd_t *pmdp) 1211 { 1212 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 1213 if (!test_and_set_bit(_SEGMENT_ENTRY_SPLIT_BIT, 1214 (unsigned long *) pmdp)) { 1215 /* need to serialize against gup-fast (IRQ disabled) */ 1216 smp_call_function(pmdp_splitting_flush_sync, NULL, 1); 1217 } 1218 } 1219 1220 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 1221 pgtable_t pgtable) 1222 { 1223 struct list_head *lh = (struct list_head *) pgtable; 1224 1225 assert_spin_locked(&mm->page_table_lock); 1226 1227 /* FIFO */ 1228 if (!mm->pmd_huge_pte) 1229 INIT_LIST_HEAD(lh); 1230 else 1231 list_add(lh, (struct list_head *) mm->pmd_huge_pte); 1232 mm->pmd_huge_pte = pgtable; 1233 } 1234 1235 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) 1236 { 1237 struct list_head *lh; 1238 pgtable_t pgtable; 1239 pte_t *ptep; 1240 1241 assert_spin_locked(&mm->page_table_lock); 1242 1243 /* FIFO */ 1244 pgtable = mm->pmd_huge_pte; 1245 lh = (struct list_head *) pgtable; 1246 if (list_empty(lh)) 1247 mm->pmd_huge_pte = NULL; 1248 else { 1249 mm->pmd_huge_pte = (pgtable_t) lh->next; 1250 list_del(lh); 1251 } 1252 ptep = (pte_t *) pgtable; 1253 pte_val(*ptep) = _PAGE_INVALID; 1254 ptep++; 1255 pte_val(*ptep) = _PAGE_INVALID; 1256 return pgtable; 1257 } 1258 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1259