1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * GRU KERNEL MCS INSTRUCTIONS 4 * 5 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. 6 */ 7 8 #include <linux/kernel.h> 9 #include "gru.h" 10 #include "grulib.h" 11 #include "grutables.h" 12 13 /* 10 sec */ 14 #ifdef CONFIG_IA64 15 #include <asm/processor.h> 16 #define GRU_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10) 17 #define CLKS2NSEC(c) ((c) *1000000000 / local_cpu_data->itc_freq) 18 #else 19 #include <linux/sync_core.h> 20 #include <asm/tsc.h> 21 #define GRU_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) 22 #define CLKS2NSEC(c) ((c) * 1000000 / tsc_khz) 23 #endif 24 25 /* Extract the status field from a kernel handle */ 26 #define GET_MSEG_HANDLE_STATUS(h) (((*(unsigned long *)(h)) >> 16) & 3) 27 28 struct mcs_op_statistic mcs_op_statistics[mcsop_last]; 29 30 static void update_mcs_stats(enum mcs_op op, unsigned long clks) 31 { 32 unsigned long nsec; 33 34 nsec = CLKS2NSEC(clks); 35 atomic_long_inc(&mcs_op_statistics[op].count); 36 atomic_long_add(nsec, &mcs_op_statistics[op].total); 37 if (mcs_op_statistics[op].max < nsec) 38 mcs_op_statistics[op].max = nsec; 39 } 40 41 static void start_instruction(void *h) 42 { 43 unsigned long *w0 = h; 44 45 wmb(); /* setting CMD/STATUS bits must be last */ 46 *w0 = *w0 | 0x20001; 47 gru_flush_cache(h); 48 } 49 50 static void report_instruction_timeout(void *h) 51 { 52 unsigned long goff = GSEGPOFF((unsigned long)h); 53 char *id = "???"; 54 55 if (TYPE_IS(CCH, goff)) 56 id = "CCH"; 57 else if (TYPE_IS(TGH, goff)) 58 id = "TGH"; 59 else if (TYPE_IS(TFH, goff)) 60 id = "TFH"; 61 62 panic(KERN_ALERT "GRU %p (%s) is malfunctioning\n", h, id); 63 } 64 65 static int wait_instruction_complete(void *h, enum mcs_op opc) 66 { 67 int status; 68 unsigned long start_time = get_cycles(); 69 70 while (1) { 71 cpu_relax(); 72 status = GET_MSEG_HANDLE_STATUS(h); 73 if (status != CCHSTATUS_ACTIVE) 74 break; 75 if (GRU_OPERATION_TIMEOUT < (get_cycles() - start_time)) { 76 report_instruction_timeout(h); 77 start_time = get_cycles(); 78 } 79 } 80 if (gru_options & OPT_STATS) 81 update_mcs_stats(opc, get_cycles() - start_time); 82 return status; 83 } 84 85 int cch_allocate(struct gru_context_configuration_handle *cch) 86 { 87 int ret; 88 89 cch->opc = CCHOP_ALLOCATE; 90 start_instruction(cch); 91 ret = wait_instruction_complete(cch, cchop_allocate); 92 93 /* 94 * Stop speculation into the GSEG being mapped by the previous ALLOCATE. 95 * The GSEG memory does not exist until the ALLOCATE completes. 96 */ 97 sync_core(); 98 return ret; 99 } 100 101 int cch_start(struct gru_context_configuration_handle *cch) 102 { 103 cch->opc = CCHOP_START; 104 start_instruction(cch); 105 return wait_instruction_complete(cch, cchop_start); 106 } 107 108 int cch_interrupt(struct gru_context_configuration_handle *cch) 109 { 110 cch->opc = CCHOP_INTERRUPT; 111 start_instruction(cch); 112 return wait_instruction_complete(cch, cchop_interrupt); 113 } 114 115 int cch_deallocate(struct gru_context_configuration_handle *cch) 116 { 117 int ret; 118 119 cch->opc = CCHOP_DEALLOCATE; 120 start_instruction(cch); 121 ret = wait_instruction_complete(cch, cchop_deallocate); 122 123 /* 124 * Stop speculation into the GSEG being unmapped by the previous 125 * DEALLOCATE. 126 */ 127 sync_core(); 128 return ret; 129 } 130 131 int cch_interrupt_sync(struct gru_context_configuration_handle 132 *cch) 133 { 134 cch->opc = CCHOP_INTERRUPT_SYNC; 135 start_instruction(cch); 136 return wait_instruction_complete(cch, cchop_interrupt_sync); 137 } 138 139 int tgh_invalidate(struct gru_tlb_global_handle *tgh, 140 unsigned long vaddr, unsigned long vaddrmask, 141 int asid, int pagesize, int global, int n, 142 unsigned short ctxbitmap) 143 { 144 tgh->vaddr = vaddr; 145 tgh->asid = asid; 146 tgh->pagesize = pagesize; 147 tgh->n = n; 148 tgh->global = global; 149 tgh->vaddrmask = vaddrmask; 150 tgh->ctxbitmap = ctxbitmap; 151 tgh->opc = TGHOP_TLBINV; 152 start_instruction(tgh); 153 return wait_instruction_complete(tgh, tghop_invalidate); 154 } 155 156 int tfh_write_only(struct gru_tlb_fault_handle *tfh, 157 unsigned long paddr, int gaa, 158 unsigned long vaddr, int asid, int dirty, 159 int pagesize) 160 { 161 tfh->fillasid = asid; 162 tfh->fillvaddr = vaddr; 163 tfh->pfn = paddr >> GRU_PADDR_SHIFT; 164 tfh->gaa = gaa; 165 tfh->dirty = dirty; 166 tfh->pagesize = pagesize; 167 tfh->opc = TFHOP_WRITE_ONLY; 168 start_instruction(tfh); 169 return wait_instruction_complete(tfh, tfhop_write_only); 170 } 171 172 void tfh_write_restart(struct gru_tlb_fault_handle *tfh, 173 unsigned long paddr, int gaa, 174 unsigned long vaddr, int asid, int dirty, 175 int pagesize) 176 { 177 tfh->fillasid = asid; 178 tfh->fillvaddr = vaddr; 179 tfh->pfn = paddr >> GRU_PADDR_SHIFT; 180 tfh->gaa = gaa; 181 tfh->dirty = dirty; 182 tfh->pagesize = pagesize; 183 tfh->opc = TFHOP_WRITE_RESTART; 184 start_instruction(tfh); 185 } 186 187 void tfh_user_polling_mode(struct gru_tlb_fault_handle *tfh) 188 { 189 tfh->opc = TFHOP_USER_POLLING_MODE; 190 start_instruction(tfh); 191 } 192 193 void tfh_exception(struct gru_tlb_fault_handle *tfh) 194 { 195 tfh->opc = TFHOP_EXCEPTION; 196 start_instruction(tfh); 197 } 198 199