xref: /openbmc/linux/drivers/misc/sgi-gru/grufault.c (revision 2612e3bbc0386368a850140a6c9b990cd496a5ec)
11a59d1b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
214258640SJack Steiner /*
314258640SJack Steiner  * SN Platform GRU Driver
414258640SJack Steiner  *
514258640SJack Steiner  *              FAULT HANDLER FOR GRU DETECTED TLB MISSES
614258640SJack Steiner  *
714258640SJack Steiner  * This file contains code that handles TLB misses within the GRU.
814258640SJack Steiner  * These misses are reported either via interrupts or user polling of
914258640SJack Steiner  * the user CB.
1014258640SJack Steiner  *
1114258640SJack Steiner  *  Copyright (c) 2008 Silicon Graphics, Inc.  All Rights Reserved.
1214258640SJack Steiner  */
1314258640SJack Steiner 
1414258640SJack Steiner #include <linux/kernel.h>
1514258640SJack Steiner #include <linux/errno.h>
1614258640SJack Steiner #include <linux/spinlock.h>
1714258640SJack Steiner #include <linux/mm.h>
1814258640SJack Steiner #include <linux/hugetlb.h>
1914258640SJack Steiner #include <linux/device.h>
2014258640SJack Steiner #include <linux/io.h>
2114258640SJack Steiner #include <linux/uaccess.h>
22bb04aa78SJack Steiner #include <linux/security.h>
239998a983SRicardo Neri #include <linux/sync_core.h>
24268bb0ceSLinus Torvalds #include <linux/prefetch.h>
2514258640SJack Steiner #include "gru.h"
2614258640SJack Steiner #include "grutables.h"
2714258640SJack Steiner #include "grulib.h"
2814258640SJack Steiner #include "gru_instructions.h"
2914258640SJack Steiner #include <asm/uv/uv_hub.h>
3014258640SJack Steiner 
319c13cb33SJack Steiner /* Return codes for vtop functions */
329c13cb33SJack Steiner #define VTOP_SUCCESS               0
339c13cb33SJack Steiner #define VTOP_INVALID               -1
349c13cb33SJack Steiner #define VTOP_RETRY                 -2
359c13cb33SJack Steiner 
369c13cb33SJack Steiner 
3714258640SJack Steiner /*
3814258640SJack Steiner  * Test if a physical address is a valid GRU GSEG address
3914258640SJack Steiner  */
is_gru_paddr(unsigned long paddr)4014258640SJack Steiner static inline int is_gru_paddr(unsigned long paddr)
4114258640SJack Steiner {
4214258640SJack Steiner 	return paddr >= gru_start_paddr && paddr < gru_end_paddr;
4314258640SJack Steiner }
4414258640SJack Steiner 
4514258640SJack Steiner /*
46c1e8d7c6SMichel Lespinasse  * Find the vma of a GRU segment. Caller must hold mmap_lock.
4714258640SJack Steiner  */
gru_find_vma(unsigned long vaddr)4814258640SJack Steiner struct vm_area_struct *gru_find_vma(unsigned long vaddr)
4914258640SJack Steiner {
5014258640SJack Steiner 	struct vm_area_struct *vma;
5114258640SJack Steiner 
522beaf153SLiam Howlett 	vma = vma_lookup(current->mm, vaddr);
532beaf153SLiam Howlett 	if (vma && vma->vm_ops == &gru_vm_ops)
5414258640SJack Steiner 		return vma;
5514258640SJack Steiner 	return NULL;
5614258640SJack Steiner }
5714258640SJack Steiner 
5814258640SJack Steiner /*
5914258640SJack Steiner  * Find and lock the gts that contains the specified user vaddr.
6014258640SJack Steiner  *
6114258640SJack Steiner  * Returns:
62c1e8d7c6SMichel Lespinasse  * 	- *gts with the mmap_lock locked for read and the GTS locked.
6314258640SJack Steiner  *	- NULL if vaddr invalid OR is not a valid GSEG vaddr.
6414258640SJack Steiner  */
6514258640SJack Steiner 
gru_find_lock_gts(unsigned long vaddr)6614258640SJack Steiner static struct gru_thread_state *gru_find_lock_gts(unsigned long vaddr)
6714258640SJack Steiner {
6814258640SJack Steiner 	struct mm_struct *mm = current->mm;
6914258640SJack Steiner 	struct vm_area_struct *vma;
7014258640SJack Steiner 	struct gru_thread_state *gts = NULL;
7114258640SJack Steiner 
72d8ed45c5SMichel Lespinasse 	mmap_read_lock(mm);
7314258640SJack Steiner 	vma = gru_find_vma(vaddr);
7414258640SJack Steiner 	if (vma)
7514258640SJack Steiner 		gts = gru_find_thread_state(vma, TSID(vaddr, vma));
7614258640SJack Steiner 	if (gts)
7714258640SJack Steiner 		mutex_lock(&gts->ts_ctxlock);
7814258640SJack Steiner 	else
79d8ed45c5SMichel Lespinasse 		mmap_read_unlock(mm);
8014258640SJack Steiner 	return gts;
8114258640SJack Steiner }
8214258640SJack Steiner 
gru_alloc_locked_gts(unsigned long vaddr)8314258640SJack Steiner static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr)
8414258640SJack Steiner {
8514258640SJack Steiner 	struct mm_struct *mm = current->mm;
8614258640SJack Steiner 	struct vm_area_struct *vma;
87e006043aSJack Steiner 	struct gru_thread_state *gts = ERR_PTR(-EINVAL);
8814258640SJack Steiner 
89d8ed45c5SMichel Lespinasse 	mmap_write_lock(mm);
9014258640SJack Steiner 	vma = gru_find_vma(vaddr);
91e006043aSJack Steiner 	if (!vma)
92e006043aSJack Steiner 		goto err;
93e006043aSJack Steiner 
9414258640SJack Steiner 	gts = gru_alloc_thread_state(vma, TSID(vaddr, vma));
95e006043aSJack Steiner 	if (IS_ERR(gts))
96e006043aSJack Steiner 		goto err;
9714258640SJack Steiner 	mutex_lock(&gts->ts_ctxlock);
98d8ed45c5SMichel Lespinasse 	mmap_write_downgrade(mm);
99e006043aSJack Steiner 	return gts;
10014258640SJack Steiner 
101e006043aSJack Steiner err:
102d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
10314258640SJack Steiner 	return gts;
10414258640SJack Steiner }
10514258640SJack Steiner 
10614258640SJack Steiner /*
10714258640SJack Steiner  * Unlock a GTS that was previously locked with gru_find_lock_gts().
10814258640SJack Steiner  */
gru_unlock_gts(struct gru_thread_state * gts)10914258640SJack Steiner static void gru_unlock_gts(struct gru_thread_state *gts)
11014258640SJack Steiner {
11114258640SJack Steiner 	mutex_unlock(&gts->ts_ctxlock);
112d8ed45c5SMichel Lespinasse 	mmap_read_unlock(current->mm);
11314258640SJack Steiner }
11414258640SJack Steiner 
11514258640SJack Steiner /*
11614258640SJack Steiner  * Set a CB.istatus to active using a user virtual address. This must be done
11714258640SJack Steiner  * just prior to a TFH RESTART. The new cb.istatus is an in-cache status ONLY.
11814258640SJack Steiner  * If the line is evicted, the status may be lost. The in-cache update
11914258640SJack Steiner  * is necessary to prevent the user from seeing a stale cb.istatus that will
12014258640SJack Steiner  * change as soon as the TFH restart is complete. Races may cause an
12114258640SJack Steiner  * occasional failure to clear the cb.istatus, but that is ok.
12214258640SJack Steiner  */
gru_cb_set_istatus_active(struct gru_instruction_bits * cbk)123b61fc69bSJack Steiner static void gru_cb_set_istatus_active(struct gru_instruction_bits *cbk)
12414258640SJack Steiner {
125b61fc69bSJack Steiner 	if (cbk) {
126b61fc69bSJack Steiner 		cbk->istatus = CBS_ACTIVE;
12714258640SJack Steiner 	}
12814258640SJack Steiner }
12914258640SJack Steiner 
13014258640SJack Steiner /*
13114258640SJack Steiner  * Read & clear a TFM
13214258640SJack Steiner  *
13314258640SJack Steiner  * The GRU has an array of fault maps. A map is private to a cpu
13414258640SJack Steiner  * Only one cpu will be accessing a cpu's fault map.
13514258640SJack Steiner  *
13614258640SJack Steiner  * This function scans the cpu-private fault map & clears all bits that
13714258640SJack Steiner  * are set. The function returns a bitmap that indicates the bits that
13814258640SJack Steiner  * were cleared. Note that sense the maps may be updated asynchronously by
13914258640SJack Steiner  * the GRU, atomic operations must be used to clear bits.
14014258640SJack Steiner  */
get_clear_fault_map(struct gru_state * gru,struct gru_tlb_fault_map * imap,struct gru_tlb_fault_map * dmap)14114258640SJack Steiner static void get_clear_fault_map(struct gru_state *gru,
1424a7a17c1SJack Steiner 				struct gru_tlb_fault_map *imap,
1434a7a17c1SJack Steiner 				struct gru_tlb_fault_map *dmap)
14414258640SJack Steiner {
14514258640SJack Steiner 	unsigned long i, k;
14614258640SJack Steiner 	struct gru_tlb_fault_map *tfm;
14714258640SJack Steiner 
14814258640SJack Steiner 	tfm = get_tfm_for_cpu(gru, gru_cpu_fault_map_id());
14914258640SJack Steiner 	prefetchw(tfm);		/* Helps on hardware, required for emulator */
15014258640SJack Steiner 	for (i = 0; i < BITS_TO_LONGS(GRU_NUM_CBE); i++) {
15114258640SJack Steiner 		k = tfm->fault_bits[i];
15214258640SJack Steiner 		if (k)
15314258640SJack Steiner 			k = xchg(&tfm->fault_bits[i], 0UL);
1544a7a17c1SJack Steiner 		imap->fault_bits[i] = k;
1554a7a17c1SJack Steiner 		k = tfm->done_bits[i];
1564a7a17c1SJack Steiner 		if (k)
1574a7a17c1SJack Steiner 			k = xchg(&tfm->done_bits[i], 0UL);
1584a7a17c1SJack Steiner 		dmap->fault_bits[i] = k;
15914258640SJack Steiner 	}
16014258640SJack Steiner 
16114258640SJack Steiner 	/*
16214258640SJack Steiner 	 * Not functionally required but helps performance. (Required
16314258640SJack Steiner 	 * on emulator)
16414258640SJack Steiner 	 */
16514258640SJack Steiner 	gru_flush_cache(tfm);
16614258640SJack Steiner }
16714258640SJack Steiner 
16814258640SJack Steiner /*
16914258640SJack Steiner  * Atomic (interrupt context) & non-atomic (user context) functions to
17014258640SJack Steiner  * convert a vaddr into a physical address. The size of the page
17114258640SJack Steiner  * is returned in pageshift.
17214258640SJack Steiner  * 	returns:
17314258640SJack Steiner  * 		  0 - successful
17414258640SJack Steiner  * 		< 0 - error code
17514258640SJack Steiner  * 		  1 - (atomic only) try again in non-atomic context
17614258640SJack Steiner  */
non_atomic_pte_lookup(struct vm_area_struct * vma,unsigned long vaddr,int write,unsigned long * paddr,int * pageshift)17714258640SJack Steiner static int non_atomic_pte_lookup(struct vm_area_struct *vma,
17814258640SJack Steiner 				 unsigned long vaddr, int write,
17914258640SJack Steiner 				 unsigned long *paddr, int *pageshift)
18014258640SJack Steiner {
18114258640SJack Steiner 	struct page *page;
18214258640SJack Steiner 
18374ccd095SJack Steiner #ifdef CONFIG_HUGETLB_PAGE
18474ccd095SJack Steiner 	*pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
18574ccd095SJack Steiner #else
18614258640SJack Steiner 	*pageshift = PAGE_SHIFT;
18774ccd095SJack Steiner #endif
18854d02069SLorenzo Stoakes 	if (get_user_pages(vaddr, 1, write ? FOLL_WRITE : 0, &page) <= 0)
18914258640SJack Steiner 		return -EFAULT;
19014258640SJack Steiner 	*paddr = page_to_phys(page);
19114258640SJack Steiner 	put_page(page);
19214258640SJack Steiner 	return 0;
19314258640SJack Steiner }
19414258640SJack Steiner 
19514258640SJack Steiner /*
19614258640SJack Steiner  * atomic_pte_lookup
19714258640SJack Steiner  *
19814258640SJack Steiner  * Convert a user virtual address to a physical address
19914258640SJack Steiner  * Only supports Intel large pages (2MB only) on x86_64.
20014258640SJack Steiner  *	ZZZ - hugepage support is incomplete
201923f7f69SJack Steiner  *
202c1e8d7c6SMichel Lespinasse  * NOTE: mmap_lock is already held on entry to this function. This
203923f7f69SJack Steiner  * guarantees existence of the page tables.
20414258640SJack Steiner  */
atomic_pte_lookup(struct vm_area_struct * vma,unsigned long vaddr,int write,unsigned long * paddr,int * pageshift)20514258640SJack Steiner static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
20614258640SJack Steiner 	int write, unsigned long *paddr, int *pageshift)
20714258640SJack Steiner {
20814258640SJack Steiner 	pgd_t *pgdp;
209c2febafcSKirill A. Shutemov 	p4d_t *p4dp;
21014258640SJack Steiner 	pud_t *pudp;
211c2febafcSKirill A. Shutemov 	pmd_t *pmdp;
21214258640SJack Steiner 	pte_t pte;
21314258640SJack Steiner 
21414258640SJack Steiner 	pgdp = pgd_offset(vma->vm_mm, vaddr);
21514258640SJack Steiner 	if (unlikely(pgd_none(*pgdp)))
21614258640SJack Steiner 		goto err;
21714258640SJack Steiner 
218c2febafcSKirill A. Shutemov 	p4dp = p4d_offset(pgdp, vaddr);
219c2febafcSKirill A. Shutemov 	if (unlikely(p4d_none(*p4dp)))
220c2febafcSKirill A. Shutemov 		goto err;
221c2febafcSKirill A. Shutemov 
222c2febafcSKirill A. Shutemov 	pudp = pud_offset(p4dp, vaddr);
22314258640SJack Steiner 	if (unlikely(pud_none(*pudp)))
22414258640SJack Steiner 		goto err;
22514258640SJack Steiner 
22614258640SJack Steiner 	pmdp = pmd_offset(pudp, vaddr);
22714258640SJack Steiner 	if (unlikely(pmd_none(*pmdp)))
22814258640SJack Steiner 		goto err;
22914258640SJack Steiner #ifdef CONFIG_X86_64
23014258640SJack Steiner 	if (unlikely(pmd_large(*pmdp)))
231*c33c7948SRyan Roberts 		pte = ptep_get((pte_t *)pmdp);
23214258640SJack Steiner 	else
23314258640SJack Steiner #endif
23414258640SJack Steiner 		pte = *pte_offset_kernel(pmdp, vaddr);
23514258640SJack Steiner 
23614258640SJack Steiner 	if (unlikely(!pte_present(pte) ||
23714258640SJack Steiner 		     (write && (!pte_write(pte) || !pte_dirty(pte)))))
23814258640SJack Steiner 		return 1;
23914258640SJack Steiner 
24014258640SJack Steiner 	*paddr = pte_pfn(pte) << PAGE_SHIFT;
241023a407fSJack Steiner #ifdef CONFIG_HUGETLB_PAGE
24214258640SJack Steiner 	*pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
243023a407fSJack Steiner #else
244023a407fSJack Steiner 	*pageshift = PAGE_SHIFT;
245023a407fSJack Steiner #endif
24614258640SJack Steiner 	return 0;
24714258640SJack Steiner 
24814258640SJack Steiner err:
24914258640SJack Steiner 	return 1;
25014258640SJack Steiner }
25114258640SJack Steiner 
gru_vtop(struct gru_thread_state * gts,unsigned long vaddr,int write,int atomic,unsigned long * gpa,int * pageshift)252ecdaf2b5SJack Steiner static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
253ecdaf2b5SJack Steiner 		    int write, int atomic, unsigned long *gpa, int *pageshift)
254ecdaf2b5SJack Steiner {
255ecdaf2b5SJack Steiner 	struct mm_struct *mm = gts->ts_mm;
256ecdaf2b5SJack Steiner 	struct vm_area_struct *vma;
257ecdaf2b5SJack Steiner 	unsigned long paddr;
258ecdaf2b5SJack Steiner 	int ret, ps;
259ecdaf2b5SJack Steiner 
260ecdaf2b5SJack Steiner 	vma = find_vma(mm, vaddr);
261ecdaf2b5SJack Steiner 	if (!vma)
262ecdaf2b5SJack Steiner 		goto inval;
263ecdaf2b5SJack Steiner 
264ecdaf2b5SJack Steiner 	/*
265ecdaf2b5SJack Steiner 	 * Atomic lookup is faster & usually works even if called in non-atomic
266ecdaf2b5SJack Steiner 	 * context.
267ecdaf2b5SJack Steiner 	 */
268ecdaf2b5SJack Steiner 	rmb();	/* Must/check ms_range_active before loading PTEs */
269ecdaf2b5SJack Steiner 	ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps);
270ecdaf2b5SJack Steiner 	if (ret) {
271ecdaf2b5SJack Steiner 		if (atomic)
272ecdaf2b5SJack Steiner 			goto upm;
273ecdaf2b5SJack Steiner 		if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps))
274ecdaf2b5SJack Steiner 			goto inval;
275ecdaf2b5SJack Steiner 	}
276ecdaf2b5SJack Steiner 	if (is_gru_paddr(paddr))
277ecdaf2b5SJack Steiner 		goto inval;
278ecdaf2b5SJack Steiner 	paddr = paddr & ~((1UL << ps) - 1);
279ecdaf2b5SJack Steiner 	*gpa = uv_soc_phys_ram_to_gpa(paddr);
280ecdaf2b5SJack Steiner 	*pageshift = ps;
2819c13cb33SJack Steiner 	return VTOP_SUCCESS;
282ecdaf2b5SJack Steiner 
283ecdaf2b5SJack Steiner inval:
2849c13cb33SJack Steiner 	return VTOP_INVALID;
285ecdaf2b5SJack Steiner upm:
2869c13cb33SJack Steiner 	return VTOP_RETRY;
287ecdaf2b5SJack Steiner }
288ecdaf2b5SJack Steiner 
289ecdaf2b5SJack Steiner 
29014258640SJack Steiner /*
291c550222fSJack Steiner  * Flush a CBE from cache. The CBE is clean in the cache. Dirty the
292c550222fSJack Steiner  * CBE cacheline so that the line will be written back to home agent.
293c550222fSJack Steiner  * Otherwise the line may be silently dropped. This has no impact
294c550222fSJack Steiner  * except on performance.
295c550222fSJack Steiner  */
gru_flush_cache_cbe(struct gru_control_block_extended * cbe)296c550222fSJack Steiner static void gru_flush_cache_cbe(struct gru_control_block_extended *cbe)
297c550222fSJack Steiner {
298c550222fSJack Steiner 	if (unlikely(cbe)) {
299c550222fSJack Steiner 		cbe->cbrexecstatus = 0;         /* make CL dirty */
300c550222fSJack Steiner 		gru_flush_cache(cbe);
301c550222fSJack Steiner 	}
302c550222fSJack Steiner }
303c550222fSJack Steiner 
304c550222fSJack Steiner /*
305c550222fSJack Steiner  * Preload the TLB with entries that may be required. Currently, preloading
306c550222fSJack Steiner  * is implemented only for BCOPY. Preload  <tlb_preload_count> pages OR to
307c550222fSJack Steiner  * the end of the bcopy tranfer, whichever is smaller.
308c550222fSJack Steiner  */
gru_preload_tlb(struct gru_state * gru,struct gru_thread_state * gts,int atomic,unsigned long fault_vaddr,int asid,int write,unsigned char tlb_preload_count,struct gru_tlb_fault_handle * tfh,struct gru_control_block_extended * cbe)309c550222fSJack Steiner static void gru_preload_tlb(struct gru_state *gru,
310c550222fSJack Steiner 			struct gru_thread_state *gts, int atomic,
311c550222fSJack Steiner 			unsigned long fault_vaddr, int asid, int write,
312c550222fSJack Steiner 			unsigned char tlb_preload_count,
313c550222fSJack Steiner 			struct gru_tlb_fault_handle *tfh,
314c550222fSJack Steiner 			struct gru_control_block_extended *cbe)
315c550222fSJack Steiner {
316c550222fSJack Steiner 	unsigned long vaddr = 0, gpa;
317c550222fSJack Steiner 	int ret, pageshift;
318c550222fSJack Steiner 
319c550222fSJack Steiner 	if (cbe->opccpy != OP_BCOPY)
320c550222fSJack Steiner 		return;
321c550222fSJack Steiner 
322c550222fSJack Steiner 	if (fault_vaddr == cbe->cbe_baddr0)
323c550222fSJack Steiner 		vaddr = fault_vaddr + GRU_CACHE_LINE_BYTES * cbe->cbe_src_cl - 1;
324c550222fSJack Steiner 	else if (fault_vaddr == cbe->cbe_baddr1)
325c550222fSJack Steiner 		vaddr = fault_vaddr + (1 << cbe->xtypecpy) * cbe->cbe_nelemcur - 1;
326c550222fSJack Steiner 
327c550222fSJack Steiner 	fault_vaddr &= PAGE_MASK;
328c550222fSJack Steiner 	vaddr &= PAGE_MASK;
329c550222fSJack Steiner 	vaddr = min(vaddr, fault_vaddr + tlb_preload_count * PAGE_SIZE);
330c550222fSJack Steiner 
331c550222fSJack Steiner 	while (vaddr > fault_vaddr) {
332c550222fSJack Steiner 		ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
333c550222fSJack Steiner 		if (ret || tfh_write_only(tfh, gpa, GAA_RAM, vaddr, asid, write,
334c550222fSJack Steiner 					  GRU_PAGESIZE(pageshift)))
335c550222fSJack Steiner 			return;
336c550222fSJack Steiner 		gru_dbg(grudev,
337c550222fSJack Steiner 			"%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, rw %d, ps %d, gpa 0x%lx\n",
338c550222fSJack Steiner 			atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh,
339c550222fSJack Steiner 			vaddr, asid, write, pageshift, gpa);
340c550222fSJack Steiner 		vaddr -= PAGE_SIZE;
341c550222fSJack Steiner 		STAT(tlb_preload_page);
342c550222fSJack Steiner 	}
343c550222fSJack Steiner }
344c550222fSJack Steiner 
345c550222fSJack Steiner /*
34614258640SJack Steiner  * Drop a TLB entry into the GRU. The fault is described by info in an TFH.
34714258640SJack Steiner  *	Input:
34814258640SJack Steiner  *		cb    Address of user CBR. Null if not running in user context
34914258640SJack Steiner  * 	Return:
35014258640SJack Steiner  * 		  0 = dropin, exception, or switch to UPM successful
35114258640SJack Steiner  * 		  1 = range invalidate active
35214258640SJack Steiner  * 		< 0 = error code
35314258640SJack Steiner  *
35414258640SJack Steiner  */
gru_try_dropin(struct gru_state * gru,struct gru_thread_state * gts,struct gru_tlb_fault_handle * tfh,struct gru_instruction_bits * cbk)3552ce4d4c9SJack Steiner static int gru_try_dropin(struct gru_state *gru,
3562ce4d4c9SJack Steiner 			  struct gru_thread_state *gts,
35714258640SJack Steiner 			  struct gru_tlb_fault_handle *tfh,
358b61fc69bSJack Steiner 			  struct gru_instruction_bits *cbk)
35914258640SJack Steiner {
360c550222fSJack Steiner 	struct gru_control_block_extended *cbe = NULL;
361c550222fSJack Steiner 	unsigned char tlb_preload_count = gts->ts_tlb_preload_count;
362563447d7SJack Steiner 	int pageshift = 0, asid, write, ret, atomic = !cbk, indexway;
363ecdaf2b5SJack Steiner 	unsigned long gpa = 0, vaddr = 0;
36414258640SJack Steiner 
36514258640SJack Steiner 	/*
36614258640SJack Steiner 	 * NOTE: The GRU contains magic hardware that eliminates races between
36714258640SJack Steiner 	 * TLB invalidates and TLB dropins. If an invalidate occurs
36814258640SJack Steiner 	 * in the window between reading the TFH and the subsequent TLB dropin,
36914258640SJack Steiner 	 * the dropin is ignored. This eliminates the need for additional locks.
37014258640SJack Steiner 	 */
37114258640SJack Steiner 
37214258640SJack Steiner 	/*
373c550222fSJack Steiner 	 * Prefetch the CBE if doing TLB preloading
374c550222fSJack Steiner 	 */
375c550222fSJack Steiner 	if (unlikely(tlb_preload_count)) {
376c550222fSJack Steiner 		cbe = gru_tfh_to_cbe(tfh);
377c550222fSJack Steiner 		prefetchw(cbe);
378c550222fSJack Steiner 	}
379c550222fSJack Steiner 
380c550222fSJack Steiner 	/*
38114258640SJack Steiner 	 * Error if TFH state is IDLE or FMM mode & the user issuing a UPM call.
38214258640SJack Steiner 	 * Might be a hardware race OR a stupid user. Ignore FMM because FMM
38314258640SJack Steiner 	 * is a transient state.
38414258640SJack Steiner 	 */
385270952a9SJack Steiner 	if (tfh->status != TFHSTATUS_EXCEPTION) {
386270952a9SJack Steiner 		gru_flush_cache(tfh);
38767bf04a5SJack Steiner 		sync_core();
388cd1334f0SJack Steiner 		if (tfh->status != TFHSTATUS_EXCEPTION)
389cd1334f0SJack Steiner 			goto failnoexception;
390270952a9SJack Steiner 		STAT(tfh_stale_on_fault);
391270952a9SJack Steiner 	}
39214258640SJack Steiner 	if (tfh->state == TFHSTATE_IDLE)
39314258640SJack Steiner 		goto failidle;
394b61fc69bSJack Steiner 	if (tfh->state == TFHSTATE_MISS_FMM && cbk)
39514258640SJack Steiner 		goto failfmm;
39614258640SJack Steiner 
39714258640SJack Steiner 	write = (tfh->cause & TFHCAUSE_TLB_MOD) != 0;
39814258640SJack Steiner 	vaddr = tfh->missvaddr;
39914258640SJack Steiner 	asid = tfh->missasid;
400563447d7SJack Steiner 	indexway = tfh->indexway;
40114258640SJack Steiner 	if (asid == 0)
40214258640SJack Steiner 		goto failnoasid;
40314258640SJack Steiner 
40414258640SJack Steiner 	rmb();	/* TFH must be cache resident before reading ms_range_active */
40514258640SJack Steiner 
40614258640SJack Steiner 	/*
40714258640SJack Steiner 	 * TFH is cache resident - at least briefly. Fail the dropin
40814258640SJack Steiner 	 * if a range invalidate is active.
40914258640SJack Steiner 	 */
41014258640SJack Steiner 	if (atomic_read(&gts->ts_gms->ms_range_active))
41114258640SJack Steiner 		goto failactive;
41214258640SJack Steiner 
413ecdaf2b5SJack Steiner 	ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
4149c13cb33SJack Steiner 	if (ret == VTOP_INVALID)
41514258640SJack Steiner 		goto failinval;
4169c13cb33SJack Steiner 	if (ret == VTOP_RETRY)
41714258640SJack Steiner 		goto failupm;
41814258640SJack Steiner 
4197b8274e9SJack Steiner 	if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) {
4207b8274e9SJack Steiner 		gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift);
42199f7c229SJack Steiner 		if (atomic || !gru_update_cch(gts)) {
4227b8274e9SJack Steiner 			gts->ts_force_cch_reload = 1;
4237b8274e9SJack Steiner 			goto failupm;
4247b8274e9SJack Steiner 		}
4257b8274e9SJack Steiner 	}
426c550222fSJack Steiner 
427c550222fSJack Steiner 	if (unlikely(cbe) && pageshift == PAGE_SHIFT) {
4282ce4d4c9SJack Steiner 		gru_preload_tlb(gru, gts, atomic, vaddr, asid, write, tlb_preload_count, tfh, cbe);
429c550222fSJack Steiner 		gru_flush_cache_cbe(cbe);
430c550222fSJack Steiner 	}
431c550222fSJack Steiner 
432b61fc69bSJack Steiner 	gru_cb_set_istatus_active(cbk);
4335958ab88SJack Steiner 	gts->ustats.tlbdropin++;
43414258640SJack Steiner 	tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write,
43514258640SJack Steiner 			  GRU_PAGESIZE(pageshift));
43614258640SJack Steiner 	gru_dbg(grudev,
437563447d7SJack Steiner 		"%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, indexway 0x%x,"
438563447d7SJack Steiner 		" rw %d, ps %d, gpa 0x%lx\n",
4392ce4d4c9SJack Steiner 		atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh, vaddr, asid,
440563447d7SJack Steiner 		indexway, write, pageshift, gpa);
441563447d7SJack Steiner 	STAT(tlb_dropin);
44214258640SJack Steiner 	return 0;
44314258640SJack Steiner 
44414258640SJack Steiner failnoasid:
44514258640SJack Steiner 	/* No asid (delayed unload). */
44614258640SJack Steiner 	STAT(tlb_dropin_fail_no_asid);
44714258640SJack Steiner 	gru_dbg(grudev, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
448b61fc69bSJack Steiner 	if (!cbk)
44914258640SJack Steiner 		tfh_user_polling_mode(tfh);
45014258640SJack Steiner 	else
45114258640SJack Steiner 		gru_flush_cache(tfh);
452c550222fSJack Steiner 	gru_flush_cache_cbe(cbe);
45314258640SJack Steiner 	return -EAGAIN;
45414258640SJack Steiner 
45514258640SJack Steiner failupm:
45614258640SJack Steiner 	/* Atomic failure switch CBR to UPM */
45714258640SJack Steiner 	tfh_user_polling_mode(tfh);
458c550222fSJack Steiner 	gru_flush_cache_cbe(cbe);
45914258640SJack Steiner 	STAT(tlb_dropin_fail_upm);
46014258640SJack Steiner 	gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
46114258640SJack Steiner 	return 1;
46214258640SJack Steiner 
46314258640SJack Steiner failfmm:
46414258640SJack Steiner 	/* FMM state on UPM call */
465fe5bb6b0SJack Steiner 	gru_flush_cache(tfh);
466c550222fSJack Steiner 	gru_flush_cache_cbe(cbe);
46714258640SJack Steiner 	STAT(tlb_dropin_fail_fmm);
46814258640SJack Steiner 	gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state);
46914258640SJack Steiner 	return 0;
47014258640SJack Steiner 
471cd1334f0SJack Steiner failnoexception:
472cd1334f0SJack Steiner 	/* TFH status did not show exception pending */
473cd1334f0SJack Steiner 	gru_flush_cache(tfh);
474c550222fSJack Steiner 	gru_flush_cache_cbe(cbe);
475b61fc69bSJack Steiner 	if (cbk)
476b61fc69bSJack Steiner 		gru_flush_cache(cbk);
477cd1334f0SJack Steiner 	STAT(tlb_dropin_fail_no_exception);
478b61fc69bSJack Steiner 	gru_dbg(grudev, "FAILED non-exception tfh: 0x%p, status %d, state %d\n",
479b61fc69bSJack Steiner 		tfh, tfh->status, tfh->state);
480cd1334f0SJack Steiner 	return 0;
481cd1334f0SJack Steiner 
48214258640SJack Steiner failidle:
483cd1334f0SJack Steiner 	/* TFH state was idle  - no miss pending */
48414258640SJack Steiner 	gru_flush_cache(tfh);
485c550222fSJack Steiner 	gru_flush_cache_cbe(cbe);
486b61fc69bSJack Steiner 	if (cbk)
487b61fc69bSJack Steiner 		gru_flush_cache(cbk);
48814258640SJack Steiner 	STAT(tlb_dropin_fail_idle);
48914258640SJack Steiner 	gru_dbg(grudev, "FAILED idle tfh: 0x%p, state %d\n", tfh, tfh->state);
49014258640SJack Steiner 	return 0;
49114258640SJack Steiner 
49214258640SJack Steiner failinval:
49314258640SJack Steiner 	/* All errors (atomic & non-atomic) switch CBR to EXCEPTION state */
49414258640SJack Steiner 	tfh_exception(tfh);
495c550222fSJack Steiner 	gru_flush_cache_cbe(cbe);
49614258640SJack Steiner 	STAT(tlb_dropin_fail_invalid);
49714258640SJack Steiner 	gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
49814258640SJack Steiner 	return -EFAULT;
49914258640SJack Steiner 
50014258640SJack Steiner failactive:
50114258640SJack Steiner 	/* Range invalidate active. Switch to UPM iff atomic */
502b61fc69bSJack Steiner 	if (!cbk)
50314258640SJack Steiner 		tfh_user_polling_mode(tfh);
50414258640SJack Steiner 	else
50514258640SJack Steiner 		gru_flush_cache(tfh);
506c550222fSJack Steiner 	gru_flush_cache_cbe(cbe);
50714258640SJack Steiner 	STAT(tlb_dropin_fail_range_active);
50814258640SJack Steiner 	gru_dbg(grudev, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n",
50914258640SJack Steiner 		tfh, vaddr);
51014258640SJack Steiner 	return 1;
51114258640SJack Steiner }
51214258640SJack Steiner 
51314258640SJack Steiner /*
51414258640SJack Steiner  * Process an external interrupt from the GRU. This interrupt is
51514258640SJack Steiner  * caused by a TLB miss.
51614258640SJack Steiner  * Note that this is the interrupt handler that is registered with linux
51714258640SJack Steiner  * interrupt handlers.
51814258640SJack Steiner  */
gru_intr(int chiplet,int blade)5194107e1d3SJack Steiner static irqreturn_t gru_intr(int chiplet, int blade)
52014258640SJack Steiner {
52114258640SJack Steiner 	struct gru_state *gru;
5224a7a17c1SJack Steiner 	struct gru_tlb_fault_map imap, dmap;
52314258640SJack Steiner 	struct gru_thread_state *gts;
52414258640SJack Steiner 	struct gru_tlb_fault_handle *tfh = NULL;
5252ce4d4c9SJack Steiner 	struct completion *cmp;
52614258640SJack Steiner 	int cbrnum, ctxnum;
52714258640SJack Steiner 
52814258640SJack Steiner 	STAT(intr);
52914258640SJack Steiner 
5304107e1d3SJack Steiner 	gru = &gru_base[blade]->bs_grus[chiplet];
53114258640SJack Steiner 	if (!gru) {
5324107e1d3SJack Steiner 		dev_err(grudev, "GRU: invalid interrupt: cpu %d, chiplet %d\n",
5334107e1d3SJack Steiner 			raw_smp_processor_id(), chiplet);
53414258640SJack Steiner 		return IRQ_NONE;
53514258640SJack Steiner 	}
5364a7a17c1SJack Steiner 	get_clear_fault_map(gru, &imap, &dmap);
5374107e1d3SJack Steiner 	gru_dbg(grudev,
5384107e1d3SJack Steiner 		"cpu %d, chiplet %d, gid %d, imap %016lx %016lx, dmap %016lx %016lx\n",
5394107e1d3SJack Steiner 		smp_processor_id(), chiplet, gru->gs_gid,
5404107e1d3SJack Steiner 		imap.fault_bits[0], imap.fault_bits[1],
5414107e1d3SJack Steiner 		dmap.fault_bits[0], dmap.fault_bits[1]);
54214258640SJack Steiner 
5434a7a17c1SJack Steiner 	for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) {
544563447d7SJack Steiner 		STAT(intr_cbr);
5452ce4d4c9SJack Steiner 		cmp = gru->gs_blade->bs_async_wq;
5462ce4d4c9SJack Steiner 		if (cmp)
5472ce4d4c9SJack Steiner 			complete(cmp);
5484a7a17c1SJack Steiner 		gru_dbg(grudev, "gid %d, cbr_done %d, done %d\n",
5492ce4d4c9SJack Steiner 			gru->gs_gid, cbrnum, cmp ? cmp->done : -1);
5504a7a17c1SJack Steiner 	}
5514a7a17c1SJack Steiner 
5524a7a17c1SJack Steiner 	for_each_cbr_in_tfm(cbrnum, imap.fault_bits) {
553563447d7SJack Steiner 		STAT(intr_tfh);
55414258640SJack Steiner 		tfh = get_tfh_by_index(gru, cbrnum);
55514258640SJack Steiner 		prefetchw(tfh);	/* Helps on hdw, required for emulator */
55614258640SJack Steiner 
55714258640SJack Steiner 		/*
55814258640SJack Steiner 		 * When hardware sets a bit in the faultmap, it implicitly
55914258640SJack Steiner 		 * locks the GRU context so that it cannot be unloaded.
56014258640SJack Steiner 		 * The gts cannot change until a TFH start/writestart command
56114258640SJack Steiner 		 * is issued.
56214258640SJack Steiner 		 */
56314258640SJack Steiner 		ctxnum = tfh->ctxnum;
56414258640SJack Steiner 		gts = gru->gs_gts[ctxnum];
56514258640SJack Steiner 
5662ce4d4c9SJack Steiner 		/* Spurious interrupts can cause this. Ignore. */
5672ce4d4c9SJack Steiner 		if (!gts) {
5682ce4d4c9SJack Steiner 			STAT(intr_spurious);
5692ce4d4c9SJack Steiner 			continue;
5702ce4d4c9SJack Steiner 		}
5712ce4d4c9SJack Steiner 
57214258640SJack Steiner 		/*
573c1e8d7c6SMichel Lespinasse 		 * This is running in interrupt context. Trylock the mmap_lock.
57414258640SJack Steiner 		 * If it fails, retry the fault in user context.
57514258640SJack Steiner 		 */
5765958ab88SJack Steiner 		gts->ustats.fmm_tlbmiss++;
577cd1334f0SJack Steiner 		if (!gts->ts_force_cch_reload &&
578d8ed45c5SMichel Lespinasse 					mmap_read_trylock(gts->ts_mm)) {
5792ce4d4c9SJack Steiner 			gru_try_dropin(gru, gts, tfh, NULL);
580d8ed45c5SMichel Lespinasse 			mmap_read_unlock(gts->ts_mm);
58114258640SJack Steiner 		} else {
58214258640SJack Steiner 			tfh_user_polling_mode(tfh);
58343884604SJack Steiner 			STAT(intr_mm_lock_failed);
58414258640SJack Steiner 		}
58514258640SJack Steiner 	}
58614258640SJack Steiner 	return IRQ_HANDLED;
58714258640SJack Steiner }
58814258640SJack Steiner 
gru0_intr(int irq,void * dev_id)5894107e1d3SJack Steiner irqreturn_t gru0_intr(int irq, void *dev_id)
5904107e1d3SJack Steiner {
5914107e1d3SJack Steiner 	return gru_intr(0, uv_numa_blade_id());
5924107e1d3SJack Steiner }
5934107e1d3SJack Steiner 
gru1_intr(int irq,void * dev_id)5944107e1d3SJack Steiner irqreturn_t gru1_intr(int irq, void *dev_id)
5954107e1d3SJack Steiner {
5964107e1d3SJack Steiner 	return gru_intr(1, uv_numa_blade_id());
5974107e1d3SJack Steiner }
5984107e1d3SJack Steiner 
gru_intr_mblade(int irq,void * dev_id)5994107e1d3SJack Steiner irqreturn_t gru_intr_mblade(int irq, void *dev_id)
6004107e1d3SJack Steiner {
6014107e1d3SJack Steiner 	int blade;
6024107e1d3SJack Steiner 
6034107e1d3SJack Steiner 	for_each_possible_blade(blade) {
6044107e1d3SJack Steiner 		if (uv_blade_nr_possible_cpus(blade))
6054107e1d3SJack Steiner 			continue;
6064107e1d3SJack Steiner 		gru_intr(0, blade);
6074107e1d3SJack Steiner 		gru_intr(1, blade);
6084107e1d3SJack Steiner 	}
6094107e1d3SJack Steiner 	return IRQ_HANDLED;
6104107e1d3SJack Steiner }
6114107e1d3SJack Steiner 
61214258640SJack Steiner 
gru_user_dropin(struct gru_thread_state * gts,struct gru_tlb_fault_handle * tfh,void * cb)61314258640SJack Steiner static int gru_user_dropin(struct gru_thread_state *gts,
61414258640SJack Steiner 			   struct gru_tlb_fault_handle *tfh,
615b61fc69bSJack Steiner 			   void *cb)
61614258640SJack Steiner {
61714258640SJack Steiner 	struct gru_mm_struct *gms = gts->ts_gms;
61814258640SJack Steiner 	int ret;
61914258640SJack Steiner 
6205958ab88SJack Steiner 	gts->ustats.upm_tlbmiss++;
62114258640SJack Steiner 	while (1) {
62214258640SJack Steiner 		wait_event(gms->ms_wait_queue,
62314258640SJack Steiner 			   atomic_read(&gms->ms_range_active) == 0);
62414258640SJack Steiner 		prefetchw(tfh);	/* Helps on hdw, required for emulator */
6252ce4d4c9SJack Steiner 		ret = gru_try_dropin(gts->ts_gru, gts, tfh, cb);
62614258640SJack Steiner 		if (ret <= 0)
62714258640SJack Steiner 			return ret;
62814258640SJack Steiner 		STAT(call_os_wait_queue);
62914258640SJack Steiner 	}
63014258640SJack Steiner }
63114258640SJack Steiner 
63214258640SJack Steiner /*
63314258640SJack Steiner  * This interface is called as a result of a user detecting a "call OS" bit
63414258640SJack Steiner  * in a user CB. Normally means that a TLB fault has occurred.
63514258640SJack Steiner  * 	cb - user virtual address of the CB
63614258640SJack Steiner  */
gru_handle_user_call_os(unsigned long cb)63714258640SJack Steiner int gru_handle_user_call_os(unsigned long cb)
63814258640SJack Steiner {
63914258640SJack Steiner 	struct gru_tlb_fault_handle *tfh;
64014258640SJack Steiner 	struct gru_thread_state *gts;
641b61fc69bSJack Steiner 	void *cbk;
64214258640SJack Steiner 	int ucbnum, cbrnum, ret = -EINVAL;
64314258640SJack Steiner 
64414258640SJack Steiner 	STAT(call_os);
64514258640SJack Steiner 
64614258640SJack Steiner 	/* sanity check the cb pointer */
64714258640SJack Steiner 	ucbnum = get_cb_number((void *)cb);
64814258640SJack Steiner 	if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB)
64914258640SJack Steiner 		return -EINVAL;
65014258640SJack Steiner 
651643a16a0SZheng Wang again:
65214258640SJack Steiner 	gts = gru_find_lock_gts(cb);
65314258640SJack Steiner 	if (!gts)
65414258640SJack Steiner 		return -EINVAL;
655563447d7SJack Steiner 	gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
65614258640SJack Steiner 
657fe5bb6b0SJack Steiner 	if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE)
65814258640SJack Steiner 		goto exit;
65914258640SJack Steiner 
660643a16a0SZheng Wang 	if (gru_check_context_placement(gts)) {
661643a16a0SZheng Wang 		gru_unlock_gts(gts);
662643a16a0SZheng Wang 		gru_unload_context(gts, 1);
663643a16a0SZheng Wang 		goto again;
664643a16a0SZheng Wang 	}
665fe5bb6b0SJack Steiner 
6667b8274e9SJack Steiner 	/*
6677b8274e9SJack Steiner 	 * CCH may contain stale data if ts_force_cch_reload is set.
6687b8274e9SJack Steiner 	 */
6697b8274e9SJack Steiner 	if (gts->ts_gru && gts->ts_force_cch_reload) {
6707b8274e9SJack Steiner 		gts->ts_force_cch_reload = 0;
67199f7c229SJack Steiner 		gru_update_cch(gts);
6727b8274e9SJack Steiner 	}
6737b8274e9SJack Steiner 
67414258640SJack Steiner 	ret = -EAGAIN;
67514258640SJack Steiner 	cbrnum = thread_cbr_number(gts, ucbnum);
67699f7c229SJack Steiner 	if (gts->ts_gru) {
67714258640SJack Steiner 		tfh = get_tfh_by_index(gts->ts_gru, cbrnum);
678b61fc69bSJack Steiner 		cbk = get_gseg_base_address_cb(gts->ts_gru->gs_gru_base_vaddr,
679b61fc69bSJack Steiner 				gts->ts_ctxnum, ucbnum);
680b61fc69bSJack Steiner 		ret = gru_user_dropin(gts, tfh, cbk);
68114258640SJack Steiner 	}
68214258640SJack Steiner exit:
68314258640SJack Steiner 	gru_unlock_gts(gts);
68414258640SJack Steiner 	return ret;
68514258640SJack Steiner }
68614258640SJack Steiner 
68714258640SJack Steiner /*
68814258640SJack Steiner  * Fetch the exception detail information for a CB that terminated with
68914258640SJack Steiner  * an exception.
69014258640SJack Steiner  */
gru_get_exception_detail(unsigned long arg)69114258640SJack Steiner int gru_get_exception_detail(unsigned long arg)
69214258640SJack Steiner {
69314258640SJack Steiner 	struct control_block_extended_exc_detail excdet;
69414258640SJack Steiner 	struct gru_control_block_extended *cbe;
69514258640SJack Steiner 	struct gru_thread_state *gts;
69614258640SJack Steiner 	int ucbnum, cbrnum, ret;
69714258640SJack Steiner 
69814258640SJack Steiner 	STAT(user_exception);
69914258640SJack Steiner 	if (copy_from_user(&excdet, (void __user *)arg, sizeof(excdet)))
70014258640SJack Steiner 		return -EFAULT;
70114258640SJack Steiner 
70214258640SJack Steiner 	gts = gru_find_lock_gts(excdet.cb);
70314258640SJack Steiner 	if (!gts)
70414258640SJack Steiner 		return -EINVAL;
70514258640SJack Steiner 
706563447d7SJack Steiner 	gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", excdet.cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
70714258640SJack Steiner 	ucbnum = get_cb_number((void *)excdet.cb);
708fe5bb6b0SJack Steiner 	if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) {
709fe5bb6b0SJack Steiner 		ret = -EINVAL;
710fe5bb6b0SJack Steiner 	} else if (gts->ts_gru) {
71114258640SJack Steiner 		cbrnum = thread_cbr_number(gts, ucbnum);
71214258640SJack Steiner 		cbe = get_cbe_by_index(gts->ts_gru, cbrnum);
7131a2c09e3SJack Steiner 		gru_flush_cache(cbe);	/* CBE not coherent */
71467bf04a5SJack Steiner 		sync_core();		/* make sure we are have current data */
71514258640SJack Steiner 		excdet.opc = cbe->opccpy;
71614258640SJack Steiner 		excdet.exopc = cbe->exopccpy;
71714258640SJack Steiner 		excdet.ecause = cbe->ecause;
71814258640SJack Steiner 		excdet.exceptdet0 = cbe->idef1upd;
71914258640SJack Steiner 		excdet.exceptdet1 = cbe->idef3upd;
720cd1334f0SJack Steiner 		excdet.cbrstate = cbe->cbrstate;
721cd1334f0SJack Steiner 		excdet.cbrexecstatus = cbe->cbrexecstatus;
722c550222fSJack Steiner 		gru_flush_cache_cbe(cbe);
72314258640SJack Steiner 		ret = 0;
72414258640SJack Steiner 	} else {
72514258640SJack Steiner 		ret = -EAGAIN;
72614258640SJack Steiner 	}
72714258640SJack Steiner 	gru_unlock_gts(gts);
72814258640SJack Steiner 
729cd1334f0SJack Steiner 	gru_dbg(grudev,
730cd1334f0SJack Steiner 		"cb 0x%lx, op %d, exopc %d, cbrstate %d, cbrexecstatus 0x%x, ecause 0x%x, "
731cd1334f0SJack Steiner 		"exdet0 0x%lx, exdet1 0x%x\n",
732cd1334f0SJack Steiner 		excdet.cb, excdet.opc, excdet.exopc, excdet.cbrstate, excdet.cbrexecstatus,
733cd1334f0SJack Steiner 		excdet.ecause, excdet.exceptdet0, excdet.exceptdet1);
73414258640SJack Steiner 	if (!ret && copy_to_user((void __user *)arg, &excdet, sizeof(excdet)))
73514258640SJack Steiner 		ret = -EFAULT;
73614258640SJack Steiner 	return ret;
73714258640SJack Steiner }
73814258640SJack Steiner 
73914258640SJack Steiner /*
74014258640SJack Steiner  * User request to unload a context. Content is saved for possible reload.
74114258640SJack Steiner  */
gru_unload_all_contexts(void)742bb04aa78SJack Steiner static int gru_unload_all_contexts(void)
743bb04aa78SJack Steiner {
744bb04aa78SJack Steiner 	struct gru_thread_state *gts;
745bb04aa78SJack Steiner 	struct gru_state *gru;
746e1c3219dSJack Steiner 	int gid, ctxnum;
747bb04aa78SJack Steiner 
748bb04aa78SJack Steiner 	if (!capable(CAP_SYS_ADMIN))
749bb04aa78SJack Steiner 		return -EPERM;
750e1c3219dSJack Steiner 	foreach_gid(gid) {
751bb04aa78SJack Steiner 		gru = GID_TO_GRU(gid);
752bb04aa78SJack Steiner 		spin_lock(&gru->gs_lock);
753bb04aa78SJack Steiner 		for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
754bb04aa78SJack Steiner 			gts = gru->gs_gts[ctxnum];
755bb04aa78SJack Steiner 			if (gts && mutex_trylock(&gts->ts_ctxlock)) {
756bb04aa78SJack Steiner 				spin_unlock(&gru->gs_lock);
757bb04aa78SJack Steiner 				gru_unload_context(gts, 1);
758d57c82b1SJack Steiner 				mutex_unlock(&gts->ts_ctxlock);
759bb04aa78SJack Steiner 				spin_lock(&gru->gs_lock);
760bb04aa78SJack Steiner 			}
761bb04aa78SJack Steiner 		}
762bb04aa78SJack Steiner 		spin_unlock(&gru->gs_lock);
763bb04aa78SJack Steiner 	}
764bb04aa78SJack Steiner 	return 0;
765bb04aa78SJack Steiner }
766bb04aa78SJack Steiner 
gru_user_unload_context(unsigned long arg)76714258640SJack Steiner int gru_user_unload_context(unsigned long arg)
76814258640SJack Steiner {
76914258640SJack Steiner 	struct gru_thread_state *gts;
77014258640SJack Steiner 	struct gru_unload_context_req req;
77114258640SJack Steiner 
77214258640SJack Steiner 	STAT(user_unload_context);
77314258640SJack Steiner 	if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
77414258640SJack Steiner 		return -EFAULT;
77514258640SJack Steiner 
77614258640SJack Steiner 	gru_dbg(grudev, "gseg 0x%lx\n", req.gseg);
77714258640SJack Steiner 
778bb04aa78SJack Steiner 	if (!req.gseg)
779bb04aa78SJack Steiner 		return gru_unload_all_contexts();
780bb04aa78SJack Steiner 
78114258640SJack Steiner 	gts = gru_find_lock_gts(req.gseg);
78214258640SJack Steiner 	if (!gts)
78314258640SJack Steiner 		return -EINVAL;
78414258640SJack Steiner 
78514258640SJack Steiner 	if (gts->ts_gru)
78614258640SJack Steiner 		gru_unload_context(gts, 1);
78714258640SJack Steiner 	gru_unlock_gts(gts);
78814258640SJack Steiner 
78914258640SJack Steiner 	return 0;
79014258640SJack Steiner }
79114258640SJack Steiner 
79214258640SJack Steiner /*
79314258640SJack Steiner  * User request to flush a range of virtual addresses from the GRU TLB
79414258640SJack Steiner  * (Mainly for testing).
79514258640SJack Steiner  */
gru_user_flush_tlb(unsigned long arg)79614258640SJack Steiner int gru_user_flush_tlb(unsigned long arg)
79714258640SJack Steiner {
79814258640SJack Steiner 	struct gru_thread_state *gts;
79914258640SJack Steiner 	struct gru_flush_tlb_req req;
8001926ee85SJack Steiner 	struct gru_mm_struct *gms;
80114258640SJack Steiner 
80214258640SJack Steiner 	STAT(user_flush_tlb);
80314258640SJack Steiner 	if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
80414258640SJack Steiner 		return -EFAULT;
80514258640SJack Steiner 
80614258640SJack Steiner 	gru_dbg(grudev, "gseg 0x%lx, vaddr 0x%lx, len 0x%lx\n", req.gseg,
80714258640SJack Steiner 		req.vaddr, req.len);
80814258640SJack Steiner 
80914258640SJack Steiner 	gts = gru_find_lock_gts(req.gseg);
81014258640SJack Steiner 	if (!gts)
81114258640SJack Steiner 		return -EINVAL;
81214258640SJack Steiner 
8131926ee85SJack Steiner 	gms = gts->ts_gms;
81414258640SJack Steiner 	gru_unlock_gts(gts);
8151926ee85SJack Steiner 	gru_flush_tlb_range(gms, req.vaddr, req.len);
81614258640SJack Steiner 
81714258640SJack Steiner 	return 0;
81814258640SJack Steiner }
81914258640SJack Steiner 
82014258640SJack Steiner /*
8217e796a72SJack Steiner  * Fetch GSEG statisticss
8227e796a72SJack Steiner  */
gru_get_gseg_statistics(unsigned long arg)8237e796a72SJack Steiner long gru_get_gseg_statistics(unsigned long arg)
8247e796a72SJack Steiner {
8257e796a72SJack Steiner 	struct gru_thread_state *gts;
8267e796a72SJack Steiner 	struct gru_get_gseg_statistics_req req;
8277e796a72SJack Steiner 
8287e796a72SJack Steiner 	if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
8297e796a72SJack Steiner 		return -EFAULT;
8307e796a72SJack Steiner 
831091f1a10SJack Steiner 	/*
832091f1a10SJack Steiner 	 * The library creates arrays of contexts for threaded programs.
833091f1a10SJack Steiner 	 * If no gts exists in the array, the context has never been used & all
834091f1a10SJack Steiner 	 * statistics are implicitly 0.
835091f1a10SJack Steiner 	 */
8367e796a72SJack Steiner 	gts = gru_find_lock_gts(req.gseg);
8377e796a72SJack Steiner 	if (gts) {
8387e796a72SJack Steiner 		memcpy(&req.stats, &gts->ustats, sizeof(gts->ustats));
8397e796a72SJack Steiner 		gru_unlock_gts(gts);
8407e796a72SJack Steiner 	} else {
8417e796a72SJack Steiner 		memset(&req.stats, 0, sizeof(gts->ustats));
8427e796a72SJack Steiner 	}
8437e796a72SJack Steiner 
8447e796a72SJack Steiner 	if (copy_to_user((void __user *)arg, &req, sizeof(req)))
8457e796a72SJack Steiner 		return -EFAULT;
8467e796a72SJack Steiner 
8477e796a72SJack Steiner 	return 0;
8487e796a72SJack Steiner }
8497e796a72SJack Steiner 
8507e796a72SJack Steiner /*
85114258640SJack Steiner  * Register the current task as the user of the GSEG slice.
85214258640SJack Steiner  * Needed for TLB fault interrupt targeting.
85314258640SJack Steiner  */
gru_set_context_option(unsigned long arg)85492b39388SJack Steiner int gru_set_context_option(unsigned long arg)
85514258640SJack Steiner {
85614258640SJack Steiner 	struct gru_thread_state *gts;
85792b39388SJack Steiner 	struct gru_set_context_option_req req;
85892b39388SJack Steiner 	int ret = 0;
85914258640SJack Steiner 
86092b39388SJack Steiner 	STAT(set_context_option);
86192b39388SJack Steiner 	if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
86292b39388SJack Steiner 		return -EFAULT;
86392b39388SJack Steiner 	gru_dbg(grudev, "op %d, gseg 0x%lx, value1 0x%lx\n", req.op, req.gseg, req.val1);
86492b39388SJack Steiner 
865c550222fSJack Steiner 	gts = gru_find_lock_gts(req.gseg);
866c550222fSJack Steiner 	if (!gts) {
86792b39388SJack Steiner 		gts = gru_alloc_locked_gts(req.gseg);
8687f2251b1SJack Steiner 		if (IS_ERR(gts))
8697f2251b1SJack Steiner 			return PTR_ERR(gts);
870c550222fSJack Steiner 	}
87114258640SJack Steiner 
87292b39388SJack Steiner 	switch (req.op) {
873518e5cd4SJack Steiner 	case sco_blade_chiplet:
874518e5cd4SJack Steiner 		/* Select blade/chiplet for GRU context */
875a7d0dabbSDimitri Sivanich 		if (req.val0 < -1 || req.val0 >= GRU_CHIPLETS_PER_HUB ||
876a7d0dabbSDimitri Sivanich 		    req.val1 < -1 || req.val1 >= GRU_MAX_BLADES ||
877a7d0dabbSDimitri Sivanich 		    (req.val1 >= 0 && !gru_base[req.val1])) {
878518e5cd4SJack Steiner 			ret = -EINVAL;
879518e5cd4SJack Steiner 		} else {
880518e5cd4SJack Steiner 			gts->ts_user_blade_id = req.val1;
881518e5cd4SJack Steiner 			gts->ts_user_chiplet_id = req.val0;
882643a16a0SZheng Wang 			if (gru_check_context_placement(gts)) {
883643a16a0SZheng Wang 				gru_unlock_gts(gts);
884643a16a0SZheng Wang 				gru_unload_context(gts, 1);
885643a16a0SZheng Wang 				return ret;
886643a16a0SZheng Wang 			}
887518e5cd4SJack Steiner 		}
888518e5cd4SJack Steiner 		break;
88992b39388SJack Steiner 	case sco_gseg_owner:
89092b39388SJack Steiner  		/* Register the current task as the GSEG owner */
89114258640SJack Steiner 		gts->ts_tgid_owner = current->tgid;
89292b39388SJack Steiner 		break;
893b1b19fcfSJack Steiner 	case sco_cch_req_slice:
894b1b19fcfSJack Steiner  		/* Set the CCH slice option */
895b1b19fcfSJack Steiner 		gts->ts_cch_req_slice = req.val1 & 3;
896b1b19fcfSJack Steiner 		break;
89792b39388SJack Steiner 	default:
89892b39388SJack Steiner 		ret = -EINVAL;
89992b39388SJack Steiner 	}
90014258640SJack Steiner 	gru_unlock_gts(gts);
90114258640SJack Steiner 
90292b39388SJack Steiner 	return ret;
90314258640SJack Steiner }
904