1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *              GRU KERNEL MCS INSTRUCTIONS
4  *
5  *  Copyright (c) 2008 Silicon Graphics, Inc.  All Rights Reserved.
6  */
7 
8 #include <linux/kernel.h>
9 #include "gru.h"
10 #include "grulib.h"
11 #include "grutables.h"
12 
13 /* 10 sec */
14 #ifdef CONFIG_IA64
15 #include <asm/processor.h>
16 #define GRU_OPERATION_TIMEOUT	(((cycles_t) local_cpu_data->itc_freq)*10)
17 #define CLKS2NSEC(c)		((c) *1000000000 / local_cpu_data->itc_freq)
18 #else
19 #include <asm/tsc.h>
20 #define GRU_OPERATION_TIMEOUT	((cycles_t) tsc_khz*10*1000)
21 #define CLKS2NSEC(c)		((c) * 1000000 / tsc_khz)
22 #endif
23 
24 /* Extract the status field from a kernel handle */
25 #define GET_MSEG_HANDLE_STATUS(h)	(((*(unsigned long *)(h)) >> 16) & 3)
26 
27 struct mcs_op_statistic mcs_op_statistics[mcsop_last];
28 
29 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
30 {
31 	unsigned long nsec;
32 
33 	nsec = CLKS2NSEC(clks);
34 	atomic_long_inc(&mcs_op_statistics[op].count);
35 	atomic_long_add(nsec, &mcs_op_statistics[op].total);
36 	if (mcs_op_statistics[op].max < nsec)
37 		mcs_op_statistics[op].max = nsec;
38 }
39 
40 static void start_instruction(void *h)
41 {
42 	unsigned long *w0 = h;
43 
44 	wmb();		/* setting CMD/STATUS bits must be last */
45 	*w0 = *w0 | 0x20001;
46 	gru_flush_cache(h);
47 }
48 
49 static void report_instruction_timeout(void *h)
50 {
51 	unsigned long goff = GSEGPOFF((unsigned long)h);
52 	char *id = "???";
53 
54 	if (TYPE_IS(CCH, goff))
55 		id = "CCH";
56 	else if (TYPE_IS(TGH, goff))
57 		id = "TGH";
58 	else if (TYPE_IS(TFH, goff))
59 		id = "TFH";
60 
61 	panic(KERN_ALERT "GRU %p (%s) is malfunctioning\n", h, id);
62 }
63 
64 static int wait_instruction_complete(void *h, enum mcs_op opc)
65 {
66 	int status;
67 	unsigned long start_time = get_cycles();
68 
69 	while (1) {
70 		cpu_relax();
71 		status = GET_MSEG_HANDLE_STATUS(h);
72 		if (status != CCHSTATUS_ACTIVE)
73 			break;
74 		if (GRU_OPERATION_TIMEOUT < (get_cycles() - start_time)) {
75 			report_instruction_timeout(h);
76 			start_time = get_cycles();
77 		}
78 	}
79 	if (gru_options & OPT_STATS)
80 		update_mcs_stats(opc, get_cycles() - start_time);
81 	return status;
82 }
83 
84 int cch_allocate(struct gru_context_configuration_handle *cch)
85 {
86 	int ret;
87 
88 	cch->opc = CCHOP_ALLOCATE;
89 	start_instruction(cch);
90 	ret = wait_instruction_complete(cch, cchop_allocate);
91 
92 	/*
93 	 * Stop speculation into the GSEG being mapped by the previous ALLOCATE.
94 	 * The GSEG memory does not exist until the ALLOCATE completes.
95 	 */
96 	sync_core();
97 	return ret;
98 }
99 
100 int cch_start(struct gru_context_configuration_handle *cch)
101 {
102 	cch->opc = CCHOP_START;
103 	start_instruction(cch);
104 	return wait_instruction_complete(cch, cchop_start);
105 }
106 
107 int cch_interrupt(struct gru_context_configuration_handle *cch)
108 {
109 	cch->opc = CCHOP_INTERRUPT;
110 	start_instruction(cch);
111 	return wait_instruction_complete(cch, cchop_interrupt);
112 }
113 
114 int cch_deallocate(struct gru_context_configuration_handle *cch)
115 {
116 	int ret;
117 
118 	cch->opc = CCHOP_DEALLOCATE;
119 	start_instruction(cch);
120 	ret = wait_instruction_complete(cch, cchop_deallocate);
121 
122 	/*
123 	 * Stop speculation into the GSEG being unmapped by the previous
124 	 * DEALLOCATE.
125 	 */
126 	sync_core();
127 	return ret;
128 }
129 
130 int cch_interrupt_sync(struct gru_context_configuration_handle
131 				     *cch)
132 {
133 	cch->opc = CCHOP_INTERRUPT_SYNC;
134 	start_instruction(cch);
135 	return wait_instruction_complete(cch, cchop_interrupt_sync);
136 }
137 
138 int tgh_invalidate(struct gru_tlb_global_handle *tgh,
139 				 unsigned long vaddr, unsigned long vaddrmask,
140 				 int asid, int pagesize, int global, int n,
141 				 unsigned short ctxbitmap)
142 {
143 	tgh->vaddr = vaddr;
144 	tgh->asid = asid;
145 	tgh->pagesize = pagesize;
146 	tgh->n = n;
147 	tgh->global = global;
148 	tgh->vaddrmask = vaddrmask;
149 	tgh->ctxbitmap = ctxbitmap;
150 	tgh->opc = TGHOP_TLBINV;
151 	start_instruction(tgh);
152 	return wait_instruction_complete(tgh, tghop_invalidate);
153 }
154 
155 int tfh_write_only(struct gru_tlb_fault_handle *tfh,
156 				  unsigned long paddr, int gaa,
157 				  unsigned long vaddr, int asid, int dirty,
158 				  int pagesize)
159 {
160 	tfh->fillasid = asid;
161 	tfh->fillvaddr = vaddr;
162 	tfh->pfn = paddr >> GRU_PADDR_SHIFT;
163 	tfh->gaa = gaa;
164 	tfh->dirty = dirty;
165 	tfh->pagesize = pagesize;
166 	tfh->opc = TFHOP_WRITE_ONLY;
167 	start_instruction(tfh);
168 	return wait_instruction_complete(tfh, tfhop_write_only);
169 }
170 
171 void tfh_write_restart(struct gru_tlb_fault_handle *tfh,
172 				     unsigned long paddr, int gaa,
173 				     unsigned long vaddr, int asid, int dirty,
174 				     int pagesize)
175 {
176 	tfh->fillasid = asid;
177 	tfh->fillvaddr = vaddr;
178 	tfh->pfn = paddr >> GRU_PADDR_SHIFT;
179 	tfh->gaa = gaa;
180 	tfh->dirty = dirty;
181 	tfh->pagesize = pagesize;
182 	tfh->opc = TFHOP_WRITE_RESTART;
183 	start_instruction(tfh);
184 }
185 
186 void tfh_user_polling_mode(struct gru_tlb_fault_handle *tfh)
187 {
188 	tfh->opc = TFHOP_USER_POLLING_MODE;
189 	start_instruction(tfh);
190 }
191 
192 void tfh_exception(struct gru_tlb_fault_handle *tfh)
193 {
194 	tfh->opc = TFHOP_EXCEPTION;
195 	start_instruction(tfh);
196 }
197 
198