slb.c (651a88798412e216f337d70181127e847f00a4b7) slb.c (1fd02f6605b855b4af2883f29a2abc88bdf17857)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * PowerPC64 SLB support.
4 *
5 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
6 * Based on earlier code written by:
7 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
8 * Copyright (c) 2001 Dave Engebretsen

--- 333 unchanged lines hidden (view full) ---

342 if (ti->slb_preload_nr + 2 > SLB_PRELOAD_NR)
343 return;
344
345 hard_irq_disable();
346
347 /*
348 * We have no good place to clear the slb preload cache on exec,
349 * flush_thread is about the earliest arch hook but that happens
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * PowerPC64 SLB support.
4 *
5 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
6 * Based on earlier code written by:
7 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
8 * Copyright (c) 2001 Dave Engebretsen

--- 333 unchanged lines hidden (view full) ---

342 if (ti->slb_preload_nr + 2 > SLB_PRELOAD_NR)
343 return;
344
345 hard_irq_disable();
346
347 /*
348 * We have no good place to clear the slb preload cache on exec,
349 * flush_thread is about the earliest arch hook but that happens
350 * after we switch to the mm and have aleady preloaded the SLBEs.
350 * after we switch to the mm and have already preloaded the SLBEs.
351 *
352 * For the most part that's probably okay to use entries from the
353 * previous exec, they will age out if unused. It may turn out to
354 * be an advantage to clear the cache before switching to it,
355 * however.
356 */
357
358 /*

--- 251 unchanged lines hidden (view full) ---

610 * We have space in slb cache for optimized switch_slb().
611 * Top 36 bits from esid_data as per ISA
612 */
613 local_paca->slb_cache[slb_cache_index++] = esid_data >> SID_SHIFT;
614 local_paca->slb_cache_ptr++;
615 } else {
616 /*
617 * Our cache is full and the current cache content strictly
351 *
352 * For the most part that's probably okay to use entries from the
353 * previous exec, they will age out if unused. It may turn out to
354 * be an advantage to clear the cache before switching to it,
355 * however.
356 */
357
358 /*

--- 251 unchanged lines hidden (view full) ---

610 * We have space in slb cache for optimized switch_slb().
611 * Top 36 bits from esid_data as per ISA
612 */
613 local_paca->slb_cache[slb_cache_index++] = esid_data >> SID_SHIFT;
614 local_paca->slb_cache_ptr++;
615 } else {
616 /*
617 * Our cache is full and the current cache content strictly
618 * doesn't indicate the active SLB conents. Bump the ptr
618 * doesn't indicate the active SLB contents. Bump the ptr
619 * so that switch_slb() will ignore the cache.
620 */
621 local_paca->slb_cache_ptr = SLB_CACHE_ENTRIES + 1;
622 }
623}
624
625static enum slb_index alloc_slb_index(bool kernel)
626{

--- 243 unchanged lines hidden ---
619 * so that switch_slb() will ignore the cache.
620 */
621 local_paca->slb_cache_ptr = SLB_CACHE_ENTRIES + 1;
622 }
623}
624
625static enum slb_index alloc_slb_index(bool kernel)
626{

--- 243 unchanged lines hidden ---