141c594abSRalf Baechle /* 241c594abSRalf Baechle * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels 341c594abSRalf Baechle * Copyright (C) 2005 Mips Technologies, Inc 441c594abSRalf Baechle */ 541c594abSRalf Baechle 6*27a3bbafSRalf Baechle #include <linux/device.h> 741c594abSRalf Baechle #include <linux/kernel.h> 841c594abSRalf Baechle #include <linux/sched.h> 941c594abSRalf Baechle #include <linux/cpumask.h> 10*27a3bbafSRalf Baechle #include <linux/module.h> 1141c594abSRalf Baechle #include <linux/interrupt.h> 12f72af3cfSYoichi Yuasa #include <linux/security.h> 1341c594abSRalf Baechle 1441c594abSRalf Baechle #include <asm/cpu.h> 1541c594abSRalf Baechle #include <asm/processor.h> 1641c594abSRalf Baechle #include <asm/atomic.h> 1741c594abSRalf Baechle #include <asm/system.h> 1841c594abSRalf Baechle #include <asm/hardirq.h> 1941c594abSRalf Baechle #include <asm/mmu_context.h> 2041c594abSRalf Baechle #include <asm/smp.h> 2141c594abSRalf Baechle #include <asm/mipsmtregs.h> 2241c594abSRalf Baechle #include <asm/r4kcache.h> 2341c594abSRalf Baechle #include <asm/cacheflush.h> 2441c594abSRalf Baechle 2541c594abSRalf Baechle /* 2641c594abSRalf Baechle * CPU mask used to set process affinity for MT VPEs/TCs with FPUs 2741c594abSRalf Baechle */ 2841c594abSRalf Baechle 2941c594abSRalf Baechle cpumask_t mt_fpu_cpumask; 3041c594abSRalf Baechle 3141c594abSRalf Baechle #ifdef CONFIG_MIPS_MT_FPAFF 3241c594abSRalf Baechle 3341c594abSRalf Baechle #include <linux/cpu.h> 3441c594abSRalf Baechle #include <linux/delay.h> 3541c594abSRalf Baechle #include <asm/uaccess.h> 3641c594abSRalf Baechle 3741c594abSRalf Baechle unsigned long mt_fpemul_threshold = 0; 3841c594abSRalf Baechle 3941c594abSRalf Baechle /* 4041c594abSRalf Baechle * Replacement functions for the sys_sched_setaffinity() and 4141c594abSRalf Baechle * sys_sched_getaffinity() system calls, so that we can integrate 4241c594abSRalf Baechle * FPU affinity with the user's requested processor affinity. 4341c594abSRalf Baechle * This code is 98% identical with the sys_sched_setaffinity() 4441c594abSRalf Baechle * and sys_sched_getaffinity() system calls, and should be 4541c594abSRalf Baechle * updated when kernel/sched.c changes. 4641c594abSRalf Baechle */ 4741c594abSRalf Baechle 4841c594abSRalf Baechle /* 4941c594abSRalf Baechle * find_process_by_pid - find a process with a matching PID value. 5041c594abSRalf Baechle * used in sys_sched_set/getaffinity() in kernel/sched.c, so 5141c594abSRalf Baechle * cloned here. 5241c594abSRalf Baechle */ 5336c8b586SIngo Molnar static inline struct task_struct *find_process_by_pid(pid_t pid) 5441c594abSRalf Baechle { 5541c594abSRalf Baechle return pid ? find_task_by_pid(pid) : current; 5641c594abSRalf Baechle } 5741c594abSRalf Baechle 5841c594abSRalf Baechle 5941c594abSRalf Baechle /* 6041c594abSRalf Baechle * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process 6141c594abSRalf Baechle */ 6241c594abSRalf Baechle asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, 6341c594abSRalf Baechle unsigned long __user *user_mask_ptr) 6441c594abSRalf Baechle { 6541c594abSRalf Baechle cpumask_t new_mask; 6641c594abSRalf Baechle cpumask_t effective_mask; 6741c594abSRalf Baechle int retval; 6836c8b586SIngo Molnar struct task_struct *p; 6941c594abSRalf Baechle 7041c594abSRalf Baechle if (len < sizeof(new_mask)) 7141c594abSRalf Baechle return -EINVAL; 7241c594abSRalf Baechle 7341c594abSRalf Baechle if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask))) 7441c594abSRalf Baechle return -EFAULT; 7541c594abSRalf Baechle 7641c594abSRalf Baechle lock_cpu_hotplug(); 7741c594abSRalf Baechle read_lock(&tasklist_lock); 7841c594abSRalf Baechle 7941c594abSRalf Baechle p = find_process_by_pid(pid); 8041c594abSRalf Baechle if (!p) { 8141c594abSRalf Baechle read_unlock(&tasklist_lock); 8241c594abSRalf Baechle unlock_cpu_hotplug(); 8341c594abSRalf Baechle return -ESRCH; 8441c594abSRalf Baechle } 8541c594abSRalf Baechle 8641c594abSRalf Baechle /* 8741c594abSRalf Baechle * It is not safe to call set_cpus_allowed with the 8841c594abSRalf Baechle * tasklist_lock held. We will bump the task_struct's 8941c594abSRalf Baechle * usage count and drop tasklist_lock before invoking 9041c594abSRalf Baechle * set_cpus_allowed. 9141c594abSRalf Baechle */ 9241c594abSRalf Baechle get_task_struct(p); 9341c594abSRalf Baechle 9441c594abSRalf Baechle retval = -EPERM; 9541c594abSRalf Baechle if ((current->euid != p->euid) && (current->euid != p->uid) && 9641c594abSRalf Baechle !capable(CAP_SYS_NICE)) { 9741c594abSRalf Baechle read_unlock(&tasklist_lock); 9841c594abSRalf Baechle goto out_unlock; 9941c594abSRalf Baechle } 10041c594abSRalf Baechle 1017418cb89SDavid Quigley retval = security_task_setscheduler(p, 0, NULL); 1027418cb89SDavid Quigley if (retval) 1037418cb89SDavid Quigley goto out_unlock; 1047418cb89SDavid Quigley 10541c594abSRalf Baechle /* Record new user-specified CPU set for future reference */ 10641c594abSRalf Baechle p->thread.user_cpus_allowed = new_mask; 10741c594abSRalf Baechle 10841c594abSRalf Baechle /* Unlock the task list */ 10941c594abSRalf Baechle read_unlock(&tasklist_lock); 11041c594abSRalf Baechle 11141c594abSRalf Baechle /* Compute new global allowed CPU set if necessary */ 11241c594abSRalf Baechle if( (p->thread.mflags & MF_FPUBOUND) 11341c594abSRalf Baechle && cpus_intersects(new_mask, mt_fpu_cpumask)) { 11441c594abSRalf Baechle cpus_and(effective_mask, new_mask, mt_fpu_cpumask); 11541c594abSRalf Baechle retval = set_cpus_allowed(p, effective_mask); 11641c594abSRalf Baechle } else { 11741c594abSRalf Baechle p->thread.mflags &= ~MF_FPUBOUND; 11841c594abSRalf Baechle retval = set_cpus_allowed(p, new_mask); 11941c594abSRalf Baechle } 12041c594abSRalf Baechle 12141c594abSRalf Baechle 12241c594abSRalf Baechle out_unlock: 12341c594abSRalf Baechle put_task_struct(p); 12441c594abSRalf Baechle unlock_cpu_hotplug(); 12541c594abSRalf Baechle return retval; 12641c594abSRalf Baechle } 12741c594abSRalf Baechle 12841c594abSRalf Baechle /* 12941c594abSRalf Baechle * mipsmt_sys_sched_getaffinity - get the cpu affinity of a process 13041c594abSRalf Baechle */ 13141c594abSRalf Baechle asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, 13241c594abSRalf Baechle unsigned long __user *user_mask_ptr) 13341c594abSRalf Baechle { 13441c594abSRalf Baechle unsigned int real_len; 13541c594abSRalf Baechle cpumask_t mask; 13641c594abSRalf Baechle int retval; 13736c8b586SIngo Molnar struct task_struct *p; 13841c594abSRalf Baechle 13941c594abSRalf Baechle real_len = sizeof(mask); 14041c594abSRalf Baechle if (len < real_len) 14141c594abSRalf Baechle return -EINVAL; 14241c594abSRalf Baechle 14341c594abSRalf Baechle lock_cpu_hotplug(); 14441c594abSRalf Baechle read_lock(&tasklist_lock); 14541c594abSRalf Baechle 14641c594abSRalf Baechle retval = -ESRCH; 14741c594abSRalf Baechle p = find_process_by_pid(pid); 14841c594abSRalf Baechle if (!p) 14941c594abSRalf Baechle goto out_unlock; 1507418cb89SDavid Quigley retval = security_task_getscheduler(p); 1517418cb89SDavid Quigley if (retval) 1527418cb89SDavid Quigley goto out_unlock; 15341c594abSRalf Baechle 15441c594abSRalf Baechle cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map); 15541c594abSRalf Baechle 15641c594abSRalf Baechle out_unlock: 15741c594abSRalf Baechle read_unlock(&tasklist_lock); 15841c594abSRalf Baechle unlock_cpu_hotplug(); 15941c594abSRalf Baechle if (retval) 16041c594abSRalf Baechle return retval; 16141c594abSRalf Baechle if (copy_to_user(user_mask_ptr, &mask, real_len)) 16241c594abSRalf Baechle return -EFAULT; 16341c594abSRalf Baechle return real_len; 16441c594abSRalf Baechle } 16541c594abSRalf Baechle 16641c594abSRalf Baechle #endif /* CONFIG_MIPS_MT_FPAFF */ 16741c594abSRalf Baechle 16841c594abSRalf Baechle /* 16941c594abSRalf Baechle * Dump new MIPS MT state for the core. Does not leave TCs halted. 17041c594abSRalf Baechle * Takes an argument which taken to be a pre-call MVPControl value. 17141c594abSRalf Baechle */ 17241c594abSRalf Baechle 17341c594abSRalf Baechle void mips_mt_regdump(unsigned long mvpctl) 17441c594abSRalf Baechle { 17541c594abSRalf Baechle unsigned long flags; 17641c594abSRalf Baechle unsigned long vpflags; 17741c594abSRalf Baechle unsigned long mvpconf0; 17841c594abSRalf Baechle int nvpe; 17941c594abSRalf Baechle int ntc; 18041c594abSRalf Baechle int i; 18141c594abSRalf Baechle int tc; 18241c594abSRalf Baechle unsigned long haltval; 18341c594abSRalf Baechle unsigned long tcstatval; 18441c594abSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 18541c594abSRalf Baechle void smtc_soft_dump(void); 18641c594abSRalf Baechle #endif /* CONFIG_MIPT_MT_SMTC */ 18741c594abSRalf Baechle 18841c594abSRalf Baechle local_irq_save(flags); 18941c594abSRalf Baechle vpflags = dvpe(); 19041c594abSRalf Baechle printk("=== MIPS MT State Dump ===\n"); 19141c594abSRalf Baechle printk("-- Global State --\n"); 19241c594abSRalf Baechle printk(" MVPControl Passed: %08lx\n", mvpctl); 19341c594abSRalf Baechle printk(" MVPControl Read: %08lx\n", vpflags); 19441c594abSRalf Baechle printk(" MVPConf0 : %08lx\n", (mvpconf0 = read_c0_mvpconf0())); 19541c594abSRalf Baechle nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; 19641c594abSRalf Baechle ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; 19741c594abSRalf Baechle printk("-- per-VPE State --\n"); 19841c594abSRalf Baechle for(i = 0; i < nvpe; i++) { 19941c594abSRalf Baechle for(tc = 0; tc < ntc; tc++) { 20041c594abSRalf Baechle settc(tc); 20141c594abSRalf Baechle if((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) { 20241c594abSRalf Baechle printk(" VPE %d\n", i); 20341c594abSRalf Baechle printk(" VPEControl : %08lx\n", read_vpe_c0_vpecontrol()); 20441c594abSRalf Baechle printk(" VPEConf0 : %08lx\n", read_vpe_c0_vpeconf0()); 20541c594abSRalf Baechle printk(" VPE%d.Status : %08lx\n", 20641c594abSRalf Baechle i, read_vpe_c0_status()); 20741c594abSRalf Baechle printk(" VPE%d.EPC : %08lx\n", i, read_vpe_c0_epc()); 20841c594abSRalf Baechle printk(" VPE%d.Cause : %08lx\n", i, read_vpe_c0_cause()); 20941c594abSRalf Baechle printk(" VPE%d.Config7 : %08lx\n", 21041c594abSRalf Baechle i, read_vpe_c0_config7()); 21141c594abSRalf Baechle break; /* Next VPE */ 21241c594abSRalf Baechle } 21341c594abSRalf Baechle } 21441c594abSRalf Baechle } 21541c594abSRalf Baechle printk("-- per-TC State --\n"); 21641c594abSRalf Baechle for(tc = 0; tc < ntc; tc++) { 21741c594abSRalf Baechle settc(tc); 21841c594abSRalf Baechle if(read_tc_c0_tcbind() == read_c0_tcbind()) { 21941c594abSRalf Baechle /* Are we dumping ourself? */ 22041c594abSRalf Baechle haltval = 0; /* Then we're not halted, and mustn't be */ 22141c594abSRalf Baechle tcstatval = flags; /* And pre-dump TCStatus is flags */ 22241c594abSRalf Baechle printk(" TC %d (current TC with VPE EPC above)\n", tc); 22341c594abSRalf Baechle } else { 22441c594abSRalf Baechle haltval = read_tc_c0_tchalt(); 22541c594abSRalf Baechle write_tc_c0_tchalt(1); 22641c594abSRalf Baechle tcstatval = read_tc_c0_tcstatus(); 22741c594abSRalf Baechle printk(" TC %d\n", tc); 22841c594abSRalf Baechle } 22941c594abSRalf Baechle printk(" TCStatus : %08lx\n", tcstatval); 23041c594abSRalf Baechle printk(" TCBind : %08lx\n", read_tc_c0_tcbind()); 23141c594abSRalf Baechle printk(" TCRestart : %08lx\n", read_tc_c0_tcrestart()); 23241c594abSRalf Baechle printk(" TCHalt : %08lx\n", haltval); 23341c594abSRalf Baechle printk(" TCContext : %08lx\n", read_tc_c0_tccontext()); 23441c594abSRalf Baechle if (!haltval) 23541c594abSRalf Baechle write_tc_c0_tchalt(0); 23641c594abSRalf Baechle } 23741c594abSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 23841c594abSRalf Baechle smtc_soft_dump(); 23941c594abSRalf Baechle #endif /* CONFIG_MIPT_MT_SMTC */ 24041c594abSRalf Baechle printk("===========================\n"); 24141c594abSRalf Baechle evpe(vpflags); 24241c594abSRalf Baechle local_irq_restore(flags); 24341c594abSRalf Baechle } 24441c594abSRalf Baechle 24541c594abSRalf Baechle static int mt_opt_norps = 0; 24641c594abSRalf Baechle static int mt_opt_rpsctl = -1; 24741c594abSRalf Baechle static int mt_opt_nblsu = -1; 24841c594abSRalf Baechle static int mt_opt_forceconfig7 = 0; 24941c594abSRalf Baechle static int mt_opt_config7 = -1; 25041c594abSRalf Baechle 25141c594abSRalf Baechle static int __init rps_disable(char *s) 25241c594abSRalf Baechle { 25341c594abSRalf Baechle mt_opt_norps = 1; 25441c594abSRalf Baechle return 1; 25541c594abSRalf Baechle } 25641c594abSRalf Baechle __setup("norps", rps_disable); 25741c594abSRalf Baechle 25841c594abSRalf Baechle static int __init rpsctl_set(char *str) 25941c594abSRalf Baechle { 26041c594abSRalf Baechle get_option(&str, &mt_opt_rpsctl); 26141c594abSRalf Baechle return 1; 26241c594abSRalf Baechle } 26341c594abSRalf Baechle __setup("rpsctl=", rpsctl_set); 26441c594abSRalf Baechle 26541c594abSRalf Baechle static int __init nblsu_set(char *str) 26641c594abSRalf Baechle { 26741c594abSRalf Baechle get_option(&str, &mt_opt_nblsu); 26841c594abSRalf Baechle return 1; 26941c594abSRalf Baechle } 27041c594abSRalf Baechle __setup("nblsu=", nblsu_set); 27141c594abSRalf Baechle 27241c594abSRalf Baechle static int __init config7_set(char *str) 27341c594abSRalf Baechle { 27441c594abSRalf Baechle get_option(&str, &mt_opt_config7); 27541c594abSRalf Baechle mt_opt_forceconfig7 = 1; 27641c594abSRalf Baechle return 1; 27741c594abSRalf Baechle } 27841c594abSRalf Baechle __setup("config7=", config7_set); 27941c594abSRalf Baechle 28041c594abSRalf Baechle /* Experimental cache flush control parameters that should go away some day */ 28141c594abSRalf Baechle int mt_protiflush = 0; 28241c594abSRalf Baechle int mt_protdflush = 0; 28341c594abSRalf Baechle int mt_n_iflushes = 1; 28441c594abSRalf Baechle int mt_n_dflushes = 1; 28541c594abSRalf Baechle 28641c594abSRalf Baechle static int __init set_protiflush(char *s) 28741c594abSRalf Baechle { 28841c594abSRalf Baechle mt_protiflush = 1; 28941c594abSRalf Baechle return 1; 29041c594abSRalf Baechle } 29141c594abSRalf Baechle __setup("protiflush", set_protiflush); 29241c594abSRalf Baechle 29341c594abSRalf Baechle static int __init set_protdflush(char *s) 29441c594abSRalf Baechle { 29541c594abSRalf Baechle mt_protdflush = 1; 29641c594abSRalf Baechle return 1; 29741c594abSRalf Baechle } 29841c594abSRalf Baechle __setup("protdflush", set_protdflush); 29941c594abSRalf Baechle 30041c594abSRalf Baechle static int __init niflush(char *s) 30141c594abSRalf Baechle { 30241c594abSRalf Baechle get_option(&s, &mt_n_iflushes); 30341c594abSRalf Baechle return 1; 30441c594abSRalf Baechle } 30541c594abSRalf Baechle __setup("niflush=", niflush); 30641c594abSRalf Baechle 30741c594abSRalf Baechle static int __init ndflush(char *s) 30841c594abSRalf Baechle { 30941c594abSRalf Baechle get_option(&s, &mt_n_dflushes); 31041c594abSRalf Baechle return 1; 31141c594abSRalf Baechle } 31241c594abSRalf Baechle __setup("ndflush=", ndflush); 31341c594abSRalf Baechle #ifdef CONFIG_MIPS_MT_FPAFF 31441c594abSRalf Baechle static int fpaff_threshold = -1; 31541c594abSRalf Baechle 31641c594abSRalf Baechle static int __init fpaff_thresh(char *str) 31741c594abSRalf Baechle { 31841c594abSRalf Baechle get_option(&str, &fpaff_threshold); 31941c594abSRalf Baechle return 1; 32041c594abSRalf Baechle } 32141c594abSRalf Baechle 32241c594abSRalf Baechle __setup("fpaff=", fpaff_thresh); 32341c594abSRalf Baechle #endif /* CONFIG_MIPS_MT_FPAFF */ 32441c594abSRalf Baechle 32541c594abSRalf Baechle static unsigned int itc_base = 0; 32641c594abSRalf Baechle 32741c594abSRalf Baechle static int __init set_itc_base(char *str) 32841c594abSRalf Baechle { 32941c594abSRalf Baechle get_option(&str, &itc_base); 33041c594abSRalf Baechle return 1; 33141c594abSRalf Baechle } 33241c594abSRalf Baechle 33341c594abSRalf Baechle __setup("itcbase=", set_itc_base); 33441c594abSRalf Baechle 33541c594abSRalf Baechle void mips_mt_set_cpuoptions(void) 33641c594abSRalf Baechle { 33741c594abSRalf Baechle unsigned int oconfig7 = read_c0_config7(); 33841c594abSRalf Baechle unsigned int nconfig7 = oconfig7; 33941c594abSRalf Baechle 34041c594abSRalf Baechle if (mt_opt_norps) { 34141c594abSRalf Baechle printk("\"norps\" option deprectated: use \"rpsctl=\"\n"); 34241c594abSRalf Baechle } 34341c594abSRalf Baechle if (mt_opt_rpsctl >= 0) { 34441c594abSRalf Baechle printk("34K return prediction stack override set to %d.\n", 34541c594abSRalf Baechle mt_opt_rpsctl); 34641c594abSRalf Baechle if (mt_opt_rpsctl) 34741c594abSRalf Baechle nconfig7 |= (1 << 2); 34841c594abSRalf Baechle else 34941c594abSRalf Baechle nconfig7 &= ~(1 << 2); 35041c594abSRalf Baechle } 35141c594abSRalf Baechle if (mt_opt_nblsu >= 0) { 35241c594abSRalf Baechle printk("34K ALU/LSU sync override set to %d.\n", mt_opt_nblsu); 35341c594abSRalf Baechle if (mt_opt_nblsu) 35441c594abSRalf Baechle nconfig7 |= (1 << 5); 35541c594abSRalf Baechle else 35641c594abSRalf Baechle nconfig7 &= ~(1 << 5); 35741c594abSRalf Baechle } 35841c594abSRalf Baechle if (mt_opt_forceconfig7) { 35941c594abSRalf Baechle printk("CP0.Config7 forced to 0x%08x.\n", mt_opt_config7); 36041c594abSRalf Baechle nconfig7 = mt_opt_config7; 36141c594abSRalf Baechle } 36241c594abSRalf Baechle if (oconfig7 != nconfig7) { 36341c594abSRalf Baechle __asm__ __volatile("sync"); 36441c594abSRalf Baechle write_c0_config7(nconfig7); 36541c594abSRalf Baechle ehb (); 36641c594abSRalf Baechle printk("Config7: 0x%08x\n", read_c0_config7()); 36741c594abSRalf Baechle } 36841c594abSRalf Baechle 36941c594abSRalf Baechle /* Report Cache management debug options */ 37041c594abSRalf Baechle if (mt_protiflush) 37141c594abSRalf Baechle printk("I-cache flushes single-threaded\n"); 37241c594abSRalf Baechle if (mt_protdflush) 37341c594abSRalf Baechle printk("D-cache flushes single-threaded\n"); 37441c594abSRalf Baechle if (mt_n_iflushes != 1) 37541c594abSRalf Baechle printk("I-Cache Flushes Repeated %d times\n", mt_n_iflushes); 37641c594abSRalf Baechle if (mt_n_dflushes != 1) 37741c594abSRalf Baechle printk("D-Cache Flushes Repeated %d times\n", mt_n_dflushes); 37841c594abSRalf Baechle 37941c594abSRalf Baechle #ifdef CONFIG_MIPS_MT_FPAFF 38041c594abSRalf Baechle /* FPU Use Factor empirically derived from experiments on 34K */ 38141c594abSRalf Baechle #define FPUSEFACTOR 333 38241c594abSRalf Baechle 38341c594abSRalf Baechle if (fpaff_threshold >= 0) { 38441c594abSRalf Baechle mt_fpemul_threshold = fpaff_threshold; 38541c594abSRalf Baechle } else { 38641c594abSRalf Baechle mt_fpemul_threshold = 38741c594abSRalf Baechle (FPUSEFACTOR * (loops_per_jiffy/(500000/HZ))) / HZ; 38841c594abSRalf Baechle } 38941c594abSRalf Baechle printk("FPU Affinity set after %ld emulations\n", 39041c594abSRalf Baechle mt_fpemul_threshold); 39141c594abSRalf Baechle #endif /* CONFIG_MIPS_MT_FPAFF */ 39241c594abSRalf Baechle 39341c594abSRalf Baechle if (itc_base != 0) { 39441c594abSRalf Baechle /* 39541c594abSRalf Baechle * Configure ITC mapping. This code is very 39641c594abSRalf Baechle * specific to the 34K core family, which uses 39741c594abSRalf Baechle * a special mode bit ("ITC") in the ErrCtl 39841c594abSRalf Baechle * register to enable access to ITC control 39941c594abSRalf Baechle * registers via cache "tag" operations. 40041c594abSRalf Baechle */ 40141c594abSRalf Baechle unsigned long ectlval; 40241c594abSRalf Baechle unsigned long itcblkgrn; 40341c594abSRalf Baechle 40441c594abSRalf Baechle /* ErrCtl register is known as "ecc" to Linux */ 40541c594abSRalf Baechle ectlval = read_c0_ecc(); 40641c594abSRalf Baechle write_c0_ecc(ectlval | (0x1 << 26)); 40741c594abSRalf Baechle ehb(); 40841c594abSRalf Baechle #define INDEX_0 (0x80000000) 40941c594abSRalf Baechle #define INDEX_8 (0x80000008) 41041c594abSRalf Baechle /* Read "cache tag" for Dcache pseudo-index 8 */ 41141c594abSRalf Baechle cache_op(Index_Load_Tag_D, INDEX_8); 41241c594abSRalf Baechle ehb(); 41341c594abSRalf Baechle itcblkgrn = read_c0_dtaglo(); 41441c594abSRalf Baechle itcblkgrn &= 0xfffe0000; 41541c594abSRalf Baechle /* Set for 128 byte pitch of ITC cells */ 41641c594abSRalf Baechle itcblkgrn |= 0x00000c00; 41741c594abSRalf Baechle /* Stage in Tag register */ 41841c594abSRalf Baechle write_c0_dtaglo(itcblkgrn); 41941c594abSRalf Baechle ehb(); 42041c594abSRalf Baechle /* Write out to ITU with CACHE op */ 42141c594abSRalf Baechle cache_op(Index_Store_Tag_D, INDEX_8); 42241c594abSRalf Baechle /* Now set base address, and turn ITC on with 0x1 bit */ 42341c594abSRalf Baechle write_c0_dtaglo((itc_base & 0xfffffc00) | 0x1 ); 42441c594abSRalf Baechle ehb(); 42541c594abSRalf Baechle /* Write out to ITU with CACHE op */ 42641c594abSRalf Baechle cache_op(Index_Store_Tag_D, INDEX_0); 42741c594abSRalf Baechle write_c0_ecc(ectlval); 42841c594abSRalf Baechle ehb(); 42941c594abSRalf Baechle printk("Mapped %ld ITC cells starting at 0x%08x\n", 43041c594abSRalf Baechle ((itcblkgrn & 0x7fe00000) >> 20), itc_base); 43141c594abSRalf Baechle } 43241c594abSRalf Baechle } 43341c594abSRalf Baechle 43441c594abSRalf Baechle /* 43541c594abSRalf Baechle * Function to protect cache flushes from concurrent execution 43641c594abSRalf Baechle * depends on MP software model chosen. 43741c594abSRalf Baechle */ 43841c594abSRalf Baechle 43941c594abSRalf Baechle void mt_cflush_lockdown(void) 44041c594abSRalf Baechle { 44141c594abSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 44241c594abSRalf Baechle void smtc_cflush_lockdown(void); 44341c594abSRalf Baechle 44441c594abSRalf Baechle smtc_cflush_lockdown(); 44541c594abSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */ 44641c594abSRalf Baechle /* FILL IN VSMP and AP/SP VERSIONS HERE */ 44741c594abSRalf Baechle } 44841c594abSRalf Baechle 44941c594abSRalf Baechle void mt_cflush_release(void) 45041c594abSRalf Baechle { 45141c594abSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 45241c594abSRalf Baechle void smtc_cflush_release(void); 45341c594abSRalf Baechle 45441c594abSRalf Baechle smtc_cflush_release(); 45541c594abSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */ 45641c594abSRalf Baechle /* FILL IN VSMP and AP/SP VERSIONS HERE */ 45741c594abSRalf Baechle } 458*27a3bbafSRalf Baechle 459*27a3bbafSRalf Baechle struct class *mt_class; 460*27a3bbafSRalf Baechle 461*27a3bbafSRalf Baechle static int __init mt_init(void) 462*27a3bbafSRalf Baechle { 463*27a3bbafSRalf Baechle struct class *mtc; 464*27a3bbafSRalf Baechle 465*27a3bbafSRalf Baechle mtc = class_create(THIS_MODULE, "mt"); 466*27a3bbafSRalf Baechle if (IS_ERR(mtc)) 467*27a3bbafSRalf Baechle return PTR_ERR(mtc); 468*27a3bbafSRalf Baechle 469*27a3bbafSRalf Baechle mt_class = mtc; 470*27a3bbafSRalf Baechle 471*27a3bbafSRalf Baechle return 0; 472*27a3bbafSRalf Baechle } 473*27a3bbafSRalf Baechle 474*27a3bbafSRalf Baechle subsys_initcall(mt_init); 475