1f204e0b8SIan Munsie /* 2f204e0b8SIan Munsie * Copyright 2014 IBM Corp. 3f204e0b8SIan Munsie * 4f204e0b8SIan Munsie * This program is free software; you can redistribute it and/or 5f204e0b8SIan Munsie * modify it under the terms of the GNU General Public License 6f204e0b8SIan Munsie * as published by the Free Software Foundation; either version 7f204e0b8SIan Munsie * 2 of the License, or (at your option) any later version. 8f204e0b8SIan Munsie */ 9f204e0b8SIan Munsie 10f204e0b8SIan Munsie #include <linux/workqueue.h> 11f204e0b8SIan Munsie #include <linux/sched.h> 12f204e0b8SIan Munsie #include <linux/pid.h> 13f204e0b8SIan Munsie #include <linux/mm.h> 14f204e0b8SIan Munsie #include <linux/moduleparam.h> 15f204e0b8SIan Munsie 16f204e0b8SIan Munsie #undef MODULE_PARAM_PREFIX 17f204e0b8SIan Munsie #define MODULE_PARAM_PREFIX "cxl" "." 18f204e0b8SIan Munsie #include <asm/current.h> 19f204e0b8SIan Munsie #include <asm/copro.h> 20f204e0b8SIan Munsie #include <asm/mmu.h> 21f204e0b8SIan Munsie 22f204e0b8SIan Munsie #include "cxl.h" 239bcf28cdSIan Munsie #include "trace.h" 24f204e0b8SIan Munsie 25eb01d4c2SIan Munsie static bool sste_matches(struct cxl_sste *sste, struct copro_slb *slb) 26eb01d4c2SIan Munsie { 27eb01d4c2SIan Munsie return ((sste->vsid_data == cpu_to_be64(slb->vsid)) && 28eb01d4c2SIan Munsie (sste->esid_data == cpu_to_be64(slb->esid))); 29eb01d4c2SIan Munsie } 30eb01d4c2SIan Munsie 31eb01d4c2SIan Munsie /* 32eb01d4c2SIan Munsie * This finds a free SSTE for the given SLB, or returns NULL if it's already in 33eb01d4c2SIan Munsie * the segment table. 34eb01d4c2SIan Munsie */ 35b03a7f57SIan Munsie static struct cxl_sste* find_free_sste(struct cxl_context *ctx, 36b03a7f57SIan Munsie struct copro_slb *slb) 37f204e0b8SIan Munsie { 38eb01d4c2SIan Munsie struct cxl_sste *primary, *sste, *ret = NULL; 39b03a7f57SIan Munsie unsigned int mask = (ctx->sst_size >> 7) - 1; /* SSTP0[SegTableSize] */ 405100a9d6SIan Munsie unsigned int entry; 41b03a7f57SIan Munsie unsigned int hash; 42f204e0b8SIan Munsie 43b03a7f57SIan Munsie if (slb->vsid & SLB_VSID_B_1T) 44b03a7f57SIan Munsie hash = (slb->esid >> SID_SHIFT_1T) & mask; 45b03a7f57SIan Munsie else /* 256M */ 46b03a7f57SIan Munsie hash = (slb->esid >> SID_SHIFT) & mask; 47b03a7f57SIan Munsie 48b03a7f57SIan Munsie primary = ctx->sstp + (hash << 3); 49b03a7f57SIan Munsie 50b03a7f57SIan Munsie for (entry = 0, sste = primary; entry < 8; entry++, sste++) { 51eb01d4c2SIan Munsie if (!ret && !(be64_to_cpu(sste->esid_data) & SLB_ESID_V)) 52eb01d4c2SIan Munsie ret = sste; 53eb01d4c2SIan Munsie if (sste_matches(sste, slb)) 54eb01d4c2SIan Munsie return NULL; 55f204e0b8SIan Munsie } 56eb01d4c2SIan Munsie if (ret) 57eb01d4c2SIan Munsie return ret; 58b03a7f57SIan Munsie 59f204e0b8SIan Munsie /* Nothing free, select an entry to cast out */ 60eb01d4c2SIan Munsie ret = primary + ctx->sst_lru; 61b03a7f57SIan Munsie ctx->sst_lru = (ctx->sst_lru + 1) & 0x7; 62f204e0b8SIan Munsie 63eb01d4c2SIan Munsie return ret; 64f204e0b8SIan Munsie } 65f204e0b8SIan Munsie 66f204e0b8SIan Munsie static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb) 67f204e0b8SIan Munsie { 68f204e0b8SIan Munsie /* mask is the group index, we search primary and secondary here. */ 69f204e0b8SIan Munsie struct cxl_sste *sste; 70f204e0b8SIan Munsie unsigned long flags; 71f204e0b8SIan Munsie 72f204e0b8SIan Munsie spin_lock_irqsave(&ctx->sste_lock, flags); 73b03a7f57SIan Munsie sste = find_free_sste(ctx, slb); 74eb01d4c2SIan Munsie if (!sste) 75eb01d4c2SIan Munsie goto out_unlock; 76f204e0b8SIan Munsie 77f204e0b8SIan Munsie pr_devel("CXL Populating SST[%li]: %#llx %#llx\n", 78f204e0b8SIan Munsie sste - ctx->sstp, slb->vsid, slb->esid); 799bcf28cdSIan Munsie trace_cxl_ste_write(ctx, sste - ctx->sstp, slb->esid, slb->vsid); 80f204e0b8SIan Munsie 81f204e0b8SIan Munsie sste->vsid_data = cpu_to_be64(slb->vsid); 82f204e0b8SIan Munsie sste->esid_data = cpu_to_be64(slb->esid); 83eb01d4c2SIan Munsie out_unlock: 84f204e0b8SIan Munsie spin_unlock_irqrestore(&ctx->sste_lock, flags); 85f204e0b8SIan Munsie } 86f204e0b8SIan Munsie 87f204e0b8SIan Munsie static int cxl_fault_segment(struct cxl_context *ctx, struct mm_struct *mm, 88f204e0b8SIan Munsie u64 ea) 89f204e0b8SIan Munsie { 90f204e0b8SIan Munsie struct copro_slb slb = {0,0}; 91f204e0b8SIan Munsie int rc; 92f204e0b8SIan Munsie 93f204e0b8SIan Munsie if (!(rc = copro_calculate_slb(mm, ea, &slb))) { 94f204e0b8SIan Munsie cxl_load_segment(ctx, &slb); 95f204e0b8SIan Munsie } 96f204e0b8SIan Munsie 97f204e0b8SIan Munsie return rc; 98f204e0b8SIan Munsie } 99f204e0b8SIan Munsie 100f204e0b8SIan Munsie static void cxl_ack_ae(struct cxl_context *ctx) 101f204e0b8SIan Munsie { 102f204e0b8SIan Munsie unsigned long flags; 103f204e0b8SIan Munsie 1045be587b1SFrederic Barrat cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_AE, 0); 105f204e0b8SIan Munsie 106f204e0b8SIan Munsie spin_lock_irqsave(&ctx->lock, flags); 107f204e0b8SIan Munsie ctx->pending_fault = true; 108f204e0b8SIan Munsie ctx->fault_addr = ctx->dar; 109f204e0b8SIan Munsie ctx->fault_dsisr = ctx->dsisr; 110f204e0b8SIan Munsie spin_unlock_irqrestore(&ctx->lock, flags); 111f204e0b8SIan Munsie 112f204e0b8SIan Munsie wake_up_all(&ctx->wq); 113f204e0b8SIan Munsie } 114f204e0b8SIan Munsie 115f204e0b8SIan Munsie static int cxl_handle_segment_miss(struct cxl_context *ctx, 116f204e0b8SIan Munsie struct mm_struct *mm, u64 ea) 117f204e0b8SIan Munsie { 118f204e0b8SIan Munsie int rc; 119f204e0b8SIan Munsie 120f204e0b8SIan Munsie pr_devel("CXL interrupt: Segment fault pe: %i ea: %#llx\n", ctx->pe, ea); 1219bcf28cdSIan Munsie trace_cxl_ste_miss(ctx, ea); 122f204e0b8SIan Munsie 123f204e0b8SIan Munsie if ((rc = cxl_fault_segment(ctx, mm, ea))) 124f204e0b8SIan Munsie cxl_ack_ae(ctx); 125f204e0b8SIan Munsie else { 126f204e0b8SIan Munsie 127f204e0b8SIan Munsie mb(); /* Order seg table write to TFC MMIO write */ 1285be587b1SFrederic Barrat cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0); 129f204e0b8SIan Munsie } 130f204e0b8SIan Munsie 131f204e0b8SIan Munsie return IRQ_HANDLED; 132f204e0b8SIan Munsie } 133f204e0b8SIan Munsie 134f204e0b8SIan Munsie static void cxl_handle_page_fault(struct cxl_context *ctx, 135f204e0b8SIan Munsie struct mm_struct *mm, u64 dsisr, u64 dar) 136f204e0b8SIan Munsie { 137f204e0b8SIan Munsie unsigned flt = 0; 138f204e0b8SIan Munsie int result; 139aefa5688SAneesh Kumar K.V unsigned long access, flags, inv_flags = 0; 140f204e0b8SIan Munsie 1419bcf28cdSIan Munsie trace_cxl_pte_miss(ctx, dsisr, dar); 1429bcf28cdSIan Munsie 143f204e0b8SIan Munsie if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) { 144f204e0b8SIan Munsie pr_devel("copro_handle_mm_fault failed: %#x\n", result); 145f204e0b8SIan Munsie return cxl_ack_ae(ctx); 146f204e0b8SIan Munsie } 147f204e0b8SIan Munsie 148f204e0b8SIan Munsie /* 149f204e0b8SIan Munsie * update_mmu_cache() will not have loaded the hash since current->trap 150f204e0b8SIan Munsie * is not a 0x400 or 0x300, so just call hash_page_mm() here. 151f204e0b8SIan Munsie */ 152f204e0b8SIan Munsie access = _PAGE_PRESENT; 153f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_S) 154f204e0b8SIan Munsie access |= _PAGE_RW; 1553b1dbfa1SAneesh Kumar K.V if ((!ctx->kernel) || (REGION_ID(dar) == USER_REGION_ID)) 156f204e0b8SIan Munsie access |= _PAGE_USER; 157aefa5688SAneesh Kumar K.V 158aefa5688SAneesh Kumar K.V if (dsisr & DSISR_NOHPTE) 159aefa5688SAneesh Kumar K.V inv_flags |= HPTE_NOHPTE_UPDATE; 160aefa5688SAneesh Kumar K.V 161f204e0b8SIan Munsie local_irq_save(flags); 162aefa5688SAneesh Kumar K.V hash_page_mm(mm, dar, access, 0x300, inv_flags); 163f204e0b8SIan Munsie local_irq_restore(flags); 164f204e0b8SIan Munsie 165f204e0b8SIan Munsie pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe); 1665be587b1SFrederic Barrat cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0); 167f204e0b8SIan Munsie } 168f204e0b8SIan Munsie 1697b8ad495SVaibhav Jain /* 1707b8ad495SVaibhav Jain * Returns the mm_struct corresponding to the context ctx via ctx->pid 1717b8ad495SVaibhav Jain * In case the task has exited we use the task group leader accessible 1727b8ad495SVaibhav Jain * via ctx->glpid to find the next task in the thread group that has a 1737b8ad495SVaibhav Jain * valid mm_struct associated with it. If a task with valid mm_struct 1747b8ad495SVaibhav Jain * is found the ctx->pid is updated to use the task struct for subsequent 1757b8ad495SVaibhav Jain * translations. In case no valid mm_struct is found in the task group to 1767b8ad495SVaibhav Jain * service the fault a NULL is returned. 1777b8ad495SVaibhav Jain */ 1787b8ad495SVaibhav Jain static struct mm_struct *get_mem_context(struct cxl_context *ctx) 1797b8ad495SVaibhav Jain { 1807b8ad495SVaibhav Jain struct task_struct *task = NULL; 1817b8ad495SVaibhav Jain struct mm_struct *mm = NULL; 1827b8ad495SVaibhav Jain struct pid *old_pid = ctx->pid; 1837b8ad495SVaibhav Jain 1847b8ad495SVaibhav Jain if (old_pid == NULL) { 1857b8ad495SVaibhav Jain pr_warn("%s: Invalid context for pe=%d\n", 1867b8ad495SVaibhav Jain __func__, ctx->pe); 1877b8ad495SVaibhav Jain return NULL; 1887b8ad495SVaibhav Jain } 1897b8ad495SVaibhav Jain 1907b8ad495SVaibhav Jain task = get_pid_task(old_pid, PIDTYPE_PID); 1917b8ad495SVaibhav Jain 1927b8ad495SVaibhav Jain /* 1937b8ad495SVaibhav Jain * pid_alive may look racy but this saves us from costly 1947b8ad495SVaibhav Jain * get_task_mm when the task is a zombie. In worst case 1957b8ad495SVaibhav Jain * we may think a task is alive, which is about to die 1967b8ad495SVaibhav Jain * but get_task_mm will return NULL. 1977b8ad495SVaibhav Jain */ 1987b8ad495SVaibhav Jain if (task != NULL && pid_alive(task)) 1997b8ad495SVaibhav Jain mm = get_task_mm(task); 2007b8ad495SVaibhav Jain 2017b8ad495SVaibhav Jain /* release the task struct that was taken earlier */ 2027b8ad495SVaibhav Jain if (task) 2037b8ad495SVaibhav Jain put_task_struct(task); 2047b8ad495SVaibhav Jain else 2057b8ad495SVaibhav Jain pr_devel("%s: Context owning pid=%i for pe=%i dead\n", 2067b8ad495SVaibhav Jain __func__, pid_nr(old_pid), ctx->pe); 2077b8ad495SVaibhav Jain 2087b8ad495SVaibhav Jain /* 2097b8ad495SVaibhav Jain * If we couldn't find the mm context then use the group 2107b8ad495SVaibhav Jain * leader to iterate over the task group and find a task 2117b8ad495SVaibhav Jain * that gives us mm_struct. 2127b8ad495SVaibhav Jain */ 2137b8ad495SVaibhav Jain if (unlikely(mm == NULL && ctx->glpid != NULL)) { 2147b8ad495SVaibhav Jain 2157b8ad495SVaibhav Jain rcu_read_lock(); 2167b8ad495SVaibhav Jain task = pid_task(ctx->glpid, PIDTYPE_PID); 2177b8ad495SVaibhav Jain if (task) 2187b8ad495SVaibhav Jain do { 2197b8ad495SVaibhav Jain mm = get_task_mm(task); 2207b8ad495SVaibhav Jain if (mm) { 2217b8ad495SVaibhav Jain ctx->pid = get_task_pid(task, 2227b8ad495SVaibhav Jain PIDTYPE_PID); 2237b8ad495SVaibhav Jain break; 2247b8ad495SVaibhav Jain } 2257b8ad495SVaibhav Jain task = next_thread(task); 2267b8ad495SVaibhav Jain } while (task && !thread_group_leader(task)); 2277b8ad495SVaibhav Jain rcu_read_unlock(); 2287b8ad495SVaibhav Jain 2297b8ad495SVaibhav Jain /* check if we switched pid */ 2307b8ad495SVaibhav Jain if (ctx->pid != old_pid) { 2317b8ad495SVaibhav Jain if (mm) 2327b8ad495SVaibhav Jain pr_devel("%s:pe=%i switch pid %i->%i\n", 2337b8ad495SVaibhav Jain __func__, ctx->pe, pid_nr(old_pid), 2347b8ad495SVaibhav Jain pid_nr(ctx->pid)); 2357b8ad495SVaibhav Jain else 2367b8ad495SVaibhav Jain pr_devel("%s:Cannot find mm for pid=%i\n", 2377b8ad495SVaibhav Jain __func__, pid_nr(old_pid)); 2387b8ad495SVaibhav Jain 2397b8ad495SVaibhav Jain /* drop the reference to older pid */ 2407b8ad495SVaibhav Jain put_pid(old_pid); 2417b8ad495SVaibhav Jain } 2427b8ad495SVaibhav Jain } 2437b8ad495SVaibhav Jain 2447b8ad495SVaibhav Jain return mm; 2457b8ad495SVaibhav Jain } 2467b8ad495SVaibhav Jain 2477b8ad495SVaibhav Jain 2487b8ad495SVaibhav Jain 249f204e0b8SIan Munsie void cxl_handle_fault(struct work_struct *fault_work) 250f204e0b8SIan Munsie { 251f204e0b8SIan Munsie struct cxl_context *ctx = 252f204e0b8SIan Munsie container_of(fault_work, struct cxl_context, fault_work); 253f204e0b8SIan Munsie u64 dsisr = ctx->dsisr; 254f204e0b8SIan Munsie u64 dar = ctx->dar; 255a6b07d82SMichael Neuling struct mm_struct *mm = NULL; 256f204e0b8SIan Munsie 257ea2d1f95SFrederic Barrat if (cpu_has_feature(CPU_FTR_HVMODE)) { 258f204e0b8SIan Munsie if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr || 259f204e0b8SIan Munsie cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An) != dar || 260f204e0b8SIan Munsie cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) != ctx->pe) { 261ea2d1f95SFrederic Barrat /* Most likely explanation is harmless - a dedicated 262ea2d1f95SFrederic Barrat * process has detached and these were cleared by the 263ea2d1f95SFrederic Barrat * PSL purge, but warn about it just in case 264ea2d1f95SFrederic Barrat */ 265f204e0b8SIan Munsie dev_notice(&ctx->afu->dev, "cxl_handle_fault: Translation fault regs changed\n"); 266f204e0b8SIan Munsie return; 267f204e0b8SIan Munsie } 268ea2d1f95SFrederic Barrat } 269f204e0b8SIan Munsie 27013da7046SIan Munsie /* Early return if the context is being / has been detached */ 27113da7046SIan Munsie if (ctx->status == CLOSED) { 27213da7046SIan Munsie cxl_ack_ae(ctx); 27313da7046SIan Munsie return; 27413da7046SIan Munsie } 27513da7046SIan Munsie 276f204e0b8SIan Munsie pr_devel("CXL BOTTOM HALF handling fault for afu pe: %i. " 277f204e0b8SIan Munsie "DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar); 278f204e0b8SIan Munsie 279a6b07d82SMichael Neuling if (!ctx->kernel) { 2807b8ad495SVaibhav Jain 2817b8ad495SVaibhav Jain mm = get_mem_context(ctx); 2827b8ad495SVaibhav Jain /* indicates all the thread in task group have exited */ 2837b8ad495SVaibhav Jain if (mm == NULL) { 2847b8ad495SVaibhav Jain pr_devel("%s: unable to get mm for pe=%d pid=%i\n", 2857b8ad495SVaibhav Jain __func__, ctx->pe, pid_nr(ctx->pid)); 286f204e0b8SIan Munsie cxl_ack_ae(ctx); 287f204e0b8SIan Munsie return; 2887b8ad495SVaibhav Jain } else { 2897b8ad495SVaibhav Jain pr_devel("Handling page fault for pe=%d pid=%i\n", 2907b8ad495SVaibhav Jain ctx->pe, pid_nr(ctx->pid)); 291f204e0b8SIan Munsie } 292a6b07d82SMichael Neuling } 293f204e0b8SIan Munsie 294f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_DS) 295f204e0b8SIan Munsie cxl_handle_segment_miss(ctx, mm, dar); 296f204e0b8SIan Munsie else if (dsisr & CXL_PSL_DSISR_An_DM) 297f204e0b8SIan Munsie cxl_handle_page_fault(ctx, mm, dsisr, dar); 298f204e0b8SIan Munsie else 299f204e0b8SIan Munsie WARN(1, "cxl_handle_fault has nothing to handle\n"); 300f204e0b8SIan Munsie 301a6b07d82SMichael Neuling if (mm) 302f204e0b8SIan Munsie mmput(mm); 303f204e0b8SIan Munsie } 304f204e0b8SIan Munsie 305f204e0b8SIan Munsie static void cxl_prefault_one(struct cxl_context *ctx, u64 ea) 306f204e0b8SIan Munsie { 307f204e0b8SIan Munsie struct mm_struct *mm; 308f204e0b8SIan Munsie 3097b8ad495SVaibhav Jain mm = get_mem_context(ctx); 3107b8ad495SVaibhav Jain if (mm == NULL) { 311f204e0b8SIan Munsie pr_devel("cxl_prefault_one unable to get mm %i\n", 312f204e0b8SIan Munsie pid_nr(ctx->pid)); 313f204e0b8SIan Munsie return; 314f204e0b8SIan Munsie } 315f204e0b8SIan Munsie 3167b8ad495SVaibhav Jain cxl_fault_segment(ctx, mm, ea); 317f204e0b8SIan Munsie 318f204e0b8SIan Munsie mmput(mm); 319f204e0b8SIan Munsie } 320f204e0b8SIan Munsie 321f204e0b8SIan Munsie static u64 next_segment(u64 ea, u64 vsid) 322f204e0b8SIan Munsie { 323f204e0b8SIan Munsie if (vsid & SLB_VSID_B_1T) 324f204e0b8SIan Munsie ea |= (1ULL << 40) - 1; 325f204e0b8SIan Munsie else 326f204e0b8SIan Munsie ea |= (1ULL << 28) - 1; 327f204e0b8SIan Munsie 328f204e0b8SIan Munsie return ea + 1; 329f204e0b8SIan Munsie } 330f204e0b8SIan Munsie 331f204e0b8SIan Munsie static void cxl_prefault_vma(struct cxl_context *ctx) 332f204e0b8SIan Munsie { 333f204e0b8SIan Munsie u64 ea, last_esid = 0; 334f204e0b8SIan Munsie struct copro_slb slb; 335f204e0b8SIan Munsie struct vm_area_struct *vma; 336f204e0b8SIan Munsie int rc; 337f204e0b8SIan Munsie struct mm_struct *mm; 338f204e0b8SIan Munsie 3397b8ad495SVaibhav Jain mm = get_mem_context(ctx); 3407b8ad495SVaibhav Jain if (mm == NULL) { 341f204e0b8SIan Munsie pr_devel("cxl_prefault_vm unable to get mm %i\n", 342f204e0b8SIan Munsie pid_nr(ctx->pid)); 3437b8ad495SVaibhav Jain return; 344f204e0b8SIan Munsie } 345f204e0b8SIan Munsie 346f204e0b8SIan Munsie down_read(&mm->mmap_sem); 347f204e0b8SIan Munsie for (vma = mm->mmap; vma; vma = vma->vm_next) { 348f204e0b8SIan Munsie for (ea = vma->vm_start; ea < vma->vm_end; 349f204e0b8SIan Munsie ea = next_segment(ea, slb.vsid)) { 350f204e0b8SIan Munsie rc = copro_calculate_slb(mm, ea, &slb); 351f204e0b8SIan Munsie if (rc) 352f204e0b8SIan Munsie continue; 353f204e0b8SIan Munsie 354f204e0b8SIan Munsie if (last_esid == slb.esid) 355f204e0b8SIan Munsie continue; 356f204e0b8SIan Munsie 357f204e0b8SIan Munsie cxl_load_segment(ctx, &slb); 358f204e0b8SIan Munsie last_esid = slb.esid; 359f204e0b8SIan Munsie } 360f204e0b8SIan Munsie } 361f204e0b8SIan Munsie up_read(&mm->mmap_sem); 362f204e0b8SIan Munsie 363f204e0b8SIan Munsie mmput(mm); 364f204e0b8SIan Munsie } 365f204e0b8SIan Munsie 366f204e0b8SIan Munsie void cxl_prefault(struct cxl_context *ctx, u64 wed) 367f204e0b8SIan Munsie { 368f204e0b8SIan Munsie switch (ctx->afu->prefault_mode) { 369f204e0b8SIan Munsie case CXL_PREFAULT_WED: 370f204e0b8SIan Munsie cxl_prefault_one(ctx, wed); 371f204e0b8SIan Munsie break; 372f204e0b8SIan Munsie case CXL_PREFAULT_ALL: 373f204e0b8SIan Munsie cxl_prefault_vma(ctx); 374f204e0b8SIan Munsie break; 375f204e0b8SIan Munsie default: 376f204e0b8SIan Munsie break; 377f204e0b8SIan Munsie } 378f204e0b8SIan Munsie } 379