Lines Matching refs:pmbe

145 		struct pmb_entry *pmbe, *iter;  in pmb_mapping_exists()  local
151 pmbe = &pmb_entry_list[i]; in pmb_mapping_exists()
156 if ((vaddr < pmbe->vpn) || (vaddr >= (pmbe->vpn + pmbe->size))) in pmb_mapping_exists()
158 if ((phys < pmbe->ppn) || (phys >= (pmbe->ppn + pmbe->size))) in pmb_mapping_exists()
164 if (size <= pmbe->size) { in pmb_mapping_exists()
169 span = pmbe->size; in pmb_mapping_exists()
175 for (iter = pmbe->link; iter; iter = iter->link) in pmb_mapping_exists()
239 struct pmb_entry *pmbe; in pmb_alloc() local
263 pmbe = &pmb_entry_list[pos]; in pmb_alloc()
265 memset(pmbe, 0, sizeof(struct pmb_entry)); in pmb_alloc()
267 raw_spin_lock_init(&pmbe->lock); in pmb_alloc()
269 pmbe->vpn = vpn; in pmb_alloc()
270 pmbe->ppn = ppn; in pmb_alloc()
271 pmbe->flags = flags; in pmb_alloc()
272 pmbe->entry = pos; in pmb_alloc()
274 return pmbe; in pmb_alloc()
281 static void pmb_free(struct pmb_entry *pmbe) in pmb_free() argument
283 __clear_bit(pmbe->entry, pmb_map); in pmb_free()
285 pmbe->entry = PMB_NO_ENTRY; in pmb_free()
286 pmbe->link = NULL; in pmb_free()
292 static void __set_pmb_entry(struct pmb_entry *pmbe) in __set_pmb_entry() argument
296 addr = mk_pmb_addr(pmbe->entry); in __set_pmb_entry()
297 data = mk_pmb_data(pmbe->entry); in __set_pmb_entry()
302 __raw_writel(pmbe->vpn | PMB_V, addr); in __set_pmb_entry()
303 __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, data); in __set_pmb_entry()
308 static void __clear_pmb_entry(struct pmb_entry *pmbe) in __clear_pmb_entry() argument
313 addr = mk_pmb_addr(pmbe->entry); in __clear_pmb_entry()
314 data = mk_pmb_data(pmbe->entry); in __clear_pmb_entry()
325 static void set_pmb_entry(struct pmb_entry *pmbe) in set_pmb_entry() argument
329 raw_spin_lock_irqsave(&pmbe->lock, flags); in set_pmb_entry()
330 __set_pmb_entry(pmbe); in set_pmb_entry()
331 raw_spin_unlock_irqrestore(&pmbe->lock, flags); in set_pmb_entry()
338 struct pmb_entry *pmbp, *pmbe; in pmb_bolt_mapping() local
363 pmbe = pmb_alloc(vaddr, phys, pmb_flags | in pmb_bolt_mapping()
365 if (IS_ERR(pmbe)) { in pmb_bolt_mapping()
367 return PTR_ERR(pmbe); in pmb_bolt_mapping()
370 raw_spin_lock_irqsave(&pmbe->lock, flags); in pmb_bolt_mapping()
372 pmbe->size = pmb_sizes[i].size; in pmb_bolt_mapping()
374 __set_pmb_entry(pmbe); in pmb_bolt_mapping()
376 phys += pmbe->size; in pmb_bolt_mapping()
377 vaddr += pmbe->size; in pmb_bolt_mapping()
378 size -= pmbe->size; in pmb_bolt_mapping()
387 pmbp->link = pmbe; in pmb_bolt_mapping()
391 pmbp = pmbe; in pmb_bolt_mapping()
401 raw_spin_unlock_irqrestore(&pmbe->lock, flags); in pmb_bolt_mapping()
463 struct pmb_entry *pmbe = NULL; in pmb_unmap() local
471 pmbe = &pmb_entry_list[i]; in pmb_unmap()
472 if (pmbe->vpn == vaddr) { in pmb_unmap()
482 pmb_unmap_entry(pmbe, NR_PMB_ENTRIES); in pmb_unmap()
489 static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth) in __pmb_unmap_entry() argument
492 struct pmb_entry *pmblink = pmbe; in __pmb_unmap_entry()
504 __clear_pmb_entry(pmbe); in __pmb_unmap_entry()
506 flush_cache_vunmap(pmbe->vpn, pmbe->vpn + pmbe->size); in __pmb_unmap_entry()
508 pmbe = pmblink->link; in __pmb_unmap_entry()
511 } while (pmbe && --depth); in __pmb_unmap_entry()
514 static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth) in pmb_unmap_entry() argument
518 if (unlikely(!pmbe)) in pmb_unmap_entry()
522 __pmb_unmap_entry(pmbe, depth); in pmb_unmap_entry()
535 struct pmb_entry *pmbe; in pmb_notify() local
540 pmbe = &pmb_entry_list[i]; in pmb_notify()
543 pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT, in pmb_notify()
544 pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un"); in pmb_notify()
582 struct pmb_entry *pmbe; in pmb_synchronize() local
624 pmbe = pmb_alloc(vpn, ppn, flags, i); in pmb_synchronize()
625 if (IS_ERR(pmbe)) { in pmb_synchronize()
630 raw_spin_lock_irqsave(&pmbe->lock, irqflags); in pmb_synchronize()
634 pmbe->size = pmb_sizes[j].size; in pmb_synchronize()
644 if (pmb_can_merge(pmbp, pmbe)) in pmb_synchronize()
645 pmbp->link = pmbe; in pmb_synchronize()
649 pmbp = pmbe; in pmb_synchronize()
651 raw_spin_unlock_irqrestore(&pmbe->lock, irqflags); in pmb_synchronize()
703 struct pmb_entry *pmbe; in pmb_coalesce() local
708 pmbe = &pmb_entry_list[i]; in pmb_coalesce()
713 if (!pmbe->link) in pmb_coalesce()
720 if (pmbe->size == SZ_512M) in pmb_coalesce()
723 pmb_merge(pmbe); in pmb_coalesce()
744 struct pmb_entry *pmbe; in pmb_resize() local
750 pmbe = &pmb_entry_list[i]; in pmb_resize()
752 if (pmbe->vpn != uncached_start) in pmb_resize()
758 raw_spin_lock_irqsave(&pmbe->lock, flags); in pmb_resize()
760 pmbe->size = SZ_16M; in pmb_resize()
761 pmbe->flags &= ~PMB_SZ_MASK; in pmb_resize()
762 pmbe->flags |= pmb_size_to_flags(pmbe->size); in pmb_resize()
764 uncached_resize(pmbe->size); in pmb_resize()
766 __set_pmb_entry(pmbe); in pmb_resize()
768 raw_spin_unlock_irqrestore(&pmbe->lock, flags); in pmb_resize()
862 struct pmb_entry *pmbe; in pmb_syscore_resume() local
869 pmbe = &pmb_entry_list[i]; in pmb_syscore_resume()
870 set_pmb_entry(pmbe); in pmb_syscore_resume()