xref: /openbmc/linux/arch/mips/kernel/mips-mt.c (revision f72af3cf06370cedbe387364d447223e5252a000)
141c594abSRalf Baechle /*
241c594abSRalf Baechle  * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels
341c594abSRalf Baechle  * Copyright (C) 2005 Mips Technologies, Inc
441c594abSRalf Baechle  */
541c594abSRalf Baechle 
641c594abSRalf Baechle #include <linux/kernel.h>
741c594abSRalf Baechle #include <linux/sched.h>
841c594abSRalf Baechle #include <linux/cpumask.h>
941c594abSRalf Baechle #include <linux/interrupt.h>
10*f72af3cfSYoichi Yuasa #include <linux/security.h>
1141c594abSRalf Baechle 
1241c594abSRalf Baechle #include <asm/cpu.h>
1341c594abSRalf Baechle #include <asm/processor.h>
1441c594abSRalf Baechle #include <asm/atomic.h>
1541c594abSRalf Baechle #include <asm/system.h>
1641c594abSRalf Baechle #include <asm/hardirq.h>
1741c594abSRalf Baechle #include <asm/mmu_context.h>
1841c594abSRalf Baechle #include <asm/smp.h>
1941c594abSRalf Baechle #include <asm/mipsmtregs.h>
2041c594abSRalf Baechle #include <asm/r4kcache.h>
2141c594abSRalf Baechle #include <asm/cacheflush.h>
2241c594abSRalf Baechle 
2341c594abSRalf Baechle /*
2441c594abSRalf Baechle  * CPU mask used to set process affinity for MT VPEs/TCs with FPUs
2541c594abSRalf Baechle  */
2641c594abSRalf Baechle 
2741c594abSRalf Baechle cpumask_t mt_fpu_cpumask;
2841c594abSRalf Baechle 
2941c594abSRalf Baechle #ifdef CONFIG_MIPS_MT_FPAFF
3041c594abSRalf Baechle 
3141c594abSRalf Baechle #include <linux/cpu.h>
3241c594abSRalf Baechle #include <linux/delay.h>
3341c594abSRalf Baechle #include <asm/uaccess.h>
3441c594abSRalf Baechle 
3541c594abSRalf Baechle unsigned long mt_fpemul_threshold = 0;
3641c594abSRalf Baechle 
3741c594abSRalf Baechle /*
3841c594abSRalf Baechle  * Replacement functions for the sys_sched_setaffinity() and
3941c594abSRalf Baechle  * sys_sched_getaffinity() system calls, so that we can integrate
4041c594abSRalf Baechle  * FPU affinity with the user's requested processor affinity.
4141c594abSRalf Baechle  * This code is 98% identical with the sys_sched_setaffinity()
4241c594abSRalf Baechle  * and sys_sched_getaffinity() system calls, and should be
4341c594abSRalf Baechle  * updated when kernel/sched.c changes.
4441c594abSRalf Baechle  */
4541c594abSRalf Baechle 
4641c594abSRalf Baechle /*
4741c594abSRalf Baechle  * find_process_by_pid - find a process with a matching PID value.
4841c594abSRalf Baechle  * used in sys_sched_set/getaffinity() in kernel/sched.c, so
4941c594abSRalf Baechle  * cloned here.
5041c594abSRalf Baechle  */
5136c8b586SIngo Molnar static inline struct task_struct *find_process_by_pid(pid_t pid)
5241c594abSRalf Baechle {
5341c594abSRalf Baechle 	return pid ? find_task_by_pid(pid) : current;
5441c594abSRalf Baechle }
5541c594abSRalf Baechle 
5641c594abSRalf Baechle 
5741c594abSRalf Baechle /*
5841c594abSRalf Baechle  * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
5941c594abSRalf Baechle  */
6041c594abSRalf Baechle asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
6141c594abSRalf Baechle 				      unsigned long __user *user_mask_ptr)
6241c594abSRalf Baechle {
6341c594abSRalf Baechle 	cpumask_t new_mask;
6441c594abSRalf Baechle 	cpumask_t effective_mask;
6541c594abSRalf Baechle 	int retval;
6636c8b586SIngo Molnar 	struct task_struct *p;
6741c594abSRalf Baechle 
6841c594abSRalf Baechle 	if (len < sizeof(new_mask))
6941c594abSRalf Baechle 		return -EINVAL;
7041c594abSRalf Baechle 
7141c594abSRalf Baechle 	if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
7241c594abSRalf Baechle 		return -EFAULT;
7341c594abSRalf Baechle 
7441c594abSRalf Baechle 	lock_cpu_hotplug();
7541c594abSRalf Baechle 	read_lock(&tasklist_lock);
7641c594abSRalf Baechle 
7741c594abSRalf Baechle 	p = find_process_by_pid(pid);
7841c594abSRalf Baechle 	if (!p) {
7941c594abSRalf Baechle 		read_unlock(&tasklist_lock);
8041c594abSRalf Baechle 		unlock_cpu_hotplug();
8141c594abSRalf Baechle 		return -ESRCH;
8241c594abSRalf Baechle 	}
8341c594abSRalf Baechle 
8441c594abSRalf Baechle 	/*
8541c594abSRalf Baechle 	 * It is not safe to call set_cpus_allowed with the
8641c594abSRalf Baechle 	 * tasklist_lock held.  We will bump the task_struct's
8741c594abSRalf Baechle 	 * usage count and drop tasklist_lock before invoking
8841c594abSRalf Baechle 	 * set_cpus_allowed.
8941c594abSRalf Baechle 	 */
9041c594abSRalf Baechle 	get_task_struct(p);
9141c594abSRalf Baechle 
9241c594abSRalf Baechle 	retval = -EPERM;
9341c594abSRalf Baechle 	if ((current->euid != p->euid) && (current->euid != p->uid) &&
9441c594abSRalf Baechle 			!capable(CAP_SYS_NICE)) {
9541c594abSRalf Baechle 		read_unlock(&tasklist_lock);
9641c594abSRalf Baechle 		goto out_unlock;
9741c594abSRalf Baechle 	}
9841c594abSRalf Baechle 
9941c594abSRalf Baechle 	/* Record new user-specified CPU set for future reference */
10041c594abSRalf Baechle 	p->thread.user_cpus_allowed = new_mask;
10141c594abSRalf Baechle 
10241c594abSRalf Baechle 	/* Unlock the task list */
10341c594abSRalf Baechle 	read_unlock(&tasklist_lock);
10441c594abSRalf Baechle 
10541c594abSRalf Baechle 	/* Compute new global allowed CPU set if necessary */
10641c594abSRalf Baechle 	if( (p->thread.mflags & MF_FPUBOUND)
10741c594abSRalf Baechle 	&& cpus_intersects(new_mask, mt_fpu_cpumask)) {
10841c594abSRalf Baechle 		cpus_and(effective_mask, new_mask, mt_fpu_cpumask);
10941c594abSRalf Baechle 		retval = set_cpus_allowed(p, effective_mask);
11041c594abSRalf Baechle 	} else {
11141c594abSRalf Baechle 		p->thread.mflags &= ~MF_FPUBOUND;
11241c594abSRalf Baechle 		retval = set_cpus_allowed(p, new_mask);
11341c594abSRalf Baechle 	}
11441c594abSRalf Baechle 
11541c594abSRalf Baechle 
11641c594abSRalf Baechle out_unlock:
11741c594abSRalf Baechle 	put_task_struct(p);
11841c594abSRalf Baechle 	unlock_cpu_hotplug();
11941c594abSRalf Baechle 	return retval;
12041c594abSRalf Baechle }
12141c594abSRalf Baechle 
12241c594abSRalf Baechle /*
12341c594abSRalf Baechle  * mipsmt_sys_sched_getaffinity - get the cpu affinity of a process
12441c594abSRalf Baechle  */
12541c594abSRalf Baechle asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
12641c594abSRalf Baechle 				      unsigned long __user *user_mask_ptr)
12741c594abSRalf Baechle {
12841c594abSRalf Baechle 	unsigned int real_len;
12941c594abSRalf Baechle 	cpumask_t mask;
13041c594abSRalf Baechle 	int retval;
13136c8b586SIngo Molnar 	struct task_struct *p;
13241c594abSRalf Baechle 
13341c594abSRalf Baechle 	real_len = sizeof(mask);
13441c594abSRalf Baechle 	if (len < real_len)
13541c594abSRalf Baechle 		return -EINVAL;
13641c594abSRalf Baechle 
13741c594abSRalf Baechle 	lock_cpu_hotplug();
13841c594abSRalf Baechle 	read_lock(&tasklist_lock);
13941c594abSRalf Baechle 
14041c594abSRalf Baechle 	retval = -ESRCH;
14141c594abSRalf Baechle 	p = find_process_by_pid(pid);
14241c594abSRalf Baechle 	if (!p)
14341c594abSRalf Baechle 		goto out_unlock;
14441c594abSRalf Baechle 
14541c594abSRalf Baechle 	retval = 0;
14641c594abSRalf Baechle 
14741c594abSRalf Baechle 	cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map);
14841c594abSRalf Baechle 
14941c594abSRalf Baechle out_unlock:
15041c594abSRalf Baechle 	read_unlock(&tasklist_lock);
15141c594abSRalf Baechle 	unlock_cpu_hotplug();
15241c594abSRalf Baechle 	if (retval)
15341c594abSRalf Baechle 		return retval;
15441c594abSRalf Baechle 	if (copy_to_user(user_mask_ptr, &mask, real_len))
15541c594abSRalf Baechle 		return -EFAULT;
15641c594abSRalf Baechle 	return real_len;
15741c594abSRalf Baechle }
15841c594abSRalf Baechle 
15941c594abSRalf Baechle #endif /* CONFIG_MIPS_MT_FPAFF */
16041c594abSRalf Baechle 
16141c594abSRalf Baechle /*
16241c594abSRalf Baechle  * Dump new MIPS MT state for the core. Does not leave TCs halted.
16341c594abSRalf Baechle  * Takes an argument which taken to be a pre-call MVPControl value.
16441c594abSRalf Baechle  */
16541c594abSRalf Baechle 
16641c594abSRalf Baechle void mips_mt_regdump(unsigned long mvpctl)
16741c594abSRalf Baechle {
16841c594abSRalf Baechle 	unsigned long flags;
16941c594abSRalf Baechle 	unsigned long vpflags;
17041c594abSRalf Baechle 	unsigned long mvpconf0;
17141c594abSRalf Baechle 	int nvpe;
17241c594abSRalf Baechle 	int ntc;
17341c594abSRalf Baechle 	int i;
17441c594abSRalf Baechle 	int tc;
17541c594abSRalf Baechle 	unsigned long haltval;
17641c594abSRalf Baechle 	unsigned long tcstatval;
17741c594abSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC
17841c594abSRalf Baechle 	void smtc_soft_dump(void);
17941c594abSRalf Baechle #endif /* CONFIG_MIPT_MT_SMTC */
18041c594abSRalf Baechle 
18141c594abSRalf Baechle 	local_irq_save(flags);
18241c594abSRalf Baechle 	vpflags = dvpe();
18341c594abSRalf Baechle 	printk("=== MIPS MT State Dump ===\n");
18441c594abSRalf Baechle 	printk("-- Global State --\n");
18541c594abSRalf Baechle 	printk("   MVPControl Passed: %08lx\n", mvpctl);
18641c594abSRalf Baechle 	printk("   MVPControl Read: %08lx\n", vpflags);
18741c594abSRalf Baechle 	printk("   MVPConf0 : %08lx\n", (mvpconf0 = read_c0_mvpconf0()));
18841c594abSRalf Baechle 	nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
18941c594abSRalf Baechle 	ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
19041c594abSRalf Baechle 	printk("-- per-VPE State --\n");
19141c594abSRalf Baechle 	for(i = 0; i < nvpe; i++) {
19241c594abSRalf Baechle 	    for(tc = 0; tc < ntc; tc++) {
19341c594abSRalf Baechle 			settc(tc);
19441c594abSRalf Baechle 		if((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) {
19541c594abSRalf Baechle 		    printk("  VPE %d\n", i);
19641c594abSRalf Baechle 		    printk("   VPEControl : %08lx\n", read_vpe_c0_vpecontrol());
19741c594abSRalf Baechle 		    printk("   VPEConf0 : %08lx\n", read_vpe_c0_vpeconf0());
19841c594abSRalf Baechle 		    printk("   VPE%d.Status : %08lx\n",
19941c594abSRalf Baechle 				i, read_vpe_c0_status());
20041c594abSRalf Baechle 		    printk("   VPE%d.EPC : %08lx\n", i, read_vpe_c0_epc());
20141c594abSRalf Baechle 		    printk("   VPE%d.Cause : %08lx\n", i, read_vpe_c0_cause());
20241c594abSRalf Baechle 		    printk("   VPE%d.Config7 : %08lx\n",
20341c594abSRalf Baechle 				i, read_vpe_c0_config7());
20441c594abSRalf Baechle 		    break; /* Next VPE */
20541c594abSRalf Baechle 		}
20641c594abSRalf Baechle 	    }
20741c594abSRalf Baechle 	}
20841c594abSRalf Baechle 	printk("-- per-TC State --\n");
20941c594abSRalf Baechle 	for(tc = 0; tc < ntc; tc++) {
21041c594abSRalf Baechle 		settc(tc);
21141c594abSRalf Baechle 		if(read_tc_c0_tcbind() == read_c0_tcbind()) {
21241c594abSRalf Baechle 			/* Are we dumping ourself?  */
21341c594abSRalf Baechle 			haltval = 0; /* Then we're not halted, and mustn't be */
21441c594abSRalf Baechle 			tcstatval = flags; /* And pre-dump TCStatus is flags */
21541c594abSRalf Baechle 			printk("  TC %d (current TC with VPE EPC above)\n", tc);
21641c594abSRalf Baechle 		} else {
21741c594abSRalf Baechle 			haltval = read_tc_c0_tchalt();
21841c594abSRalf Baechle 			write_tc_c0_tchalt(1);
21941c594abSRalf Baechle 			tcstatval = read_tc_c0_tcstatus();
22041c594abSRalf Baechle 			printk("  TC %d\n", tc);
22141c594abSRalf Baechle 		}
22241c594abSRalf Baechle 		printk("   TCStatus : %08lx\n", tcstatval);
22341c594abSRalf Baechle 		printk("   TCBind : %08lx\n", read_tc_c0_tcbind());
22441c594abSRalf Baechle 		printk("   TCRestart : %08lx\n", read_tc_c0_tcrestart());
22541c594abSRalf Baechle 		printk("   TCHalt : %08lx\n", haltval);
22641c594abSRalf Baechle 		printk("   TCContext : %08lx\n", read_tc_c0_tccontext());
22741c594abSRalf Baechle 		if (!haltval)
22841c594abSRalf Baechle 			write_tc_c0_tchalt(0);
22941c594abSRalf Baechle 	}
23041c594abSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC
23141c594abSRalf Baechle 	smtc_soft_dump();
23241c594abSRalf Baechle #endif /* CONFIG_MIPT_MT_SMTC */
23341c594abSRalf Baechle 	printk("===========================\n");
23441c594abSRalf Baechle 	evpe(vpflags);
23541c594abSRalf Baechle 	local_irq_restore(flags);
23641c594abSRalf Baechle }
23741c594abSRalf Baechle 
23841c594abSRalf Baechle static int mt_opt_norps = 0;
23941c594abSRalf Baechle static int mt_opt_rpsctl = -1;
24041c594abSRalf Baechle static int mt_opt_nblsu = -1;
24141c594abSRalf Baechle static int mt_opt_forceconfig7 = 0;
24241c594abSRalf Baechle static int mt_opt_config7 = -1;
24341c594abSRalf Baechle 
24441c594abSRalf Baechle static int __init rps_disable(char *s)
24541c594abSRalf Baechle {
24641c594abSRalf Baechle 	mt_opt_norps = 1;
24741c594abSRalf Baechle 	return 1;
24841c594abSRalf Baechle }
24941c594abSRalf Baechle __setup("norps", rps_disable);
25041c594abSRalf Baechle 
25141c594abSRalf Baechle static int __init rpsctl_set(char *str)
25241c594abSRalf Baechle {
25341c594abSRalf Baechle 	get_option(&str, &mt_opt_rpsctl);
25441c594abSRalf Baechle 	return 1;
25541c594abSRalf Baechle }
25641c594abSRalf Baechle __setup("rpsctl=", rpsctl_set);
25741c594abSRalf Baechle 
25841c594abSRalf Baechle static int __init nblsu_set(char *str)
25941c594abSRalf Baechle {
26041c594abSRalf Baechle 	get_option(&str, &mt_opt_nblsu);
26141c594abSRalf Baechle 	return 1;
26241c594abSRalf Baechle }
26341c594abSRalf Baechle __setup("nblsu=", nblsu_set);
26441c594abSRalf Baechle 
26541c594abSRalf Baechle static int __init config7_set(char *str)
26641c594abSRalf Baechle {
26741c594abSRalf Baechle 	get_option(&str, &mt_opt_config7);
26841c594abSRalf Baechle 	mt_opt_forceconfig7 = 1;
26941c594abSRalf Baechle 	return 1;
27041c594abSRalf Baechle }
27141c594abSRalf Baechle __setup("config7=", config7_set);
27241c594abSRalf Baechle 
27341c594abSRalf Baechle /* Experimental cache flush control parameters that should go away some day */
27441c594abSRalf Baechle int mt_protiflush = 0;
27541c594abSRalf Baechle int mt_protdflush = 0;
27641c594abSRalf Baechle int mt_n_iflushes = 1;
27741c594abSRalf Baechle int mt_n_dflushes = 1;
27841c594abSRalf Baechle 
27941c594abSRalf Baechle static int __init set_protiflush(char *s)
28041c594abSRalf Baechle {
28141c594abSRalf Baechle 	mt_protiflush = 1;
28241c594abSRalf Baechle 	return 1;
28341c594abSRalf Baechle }
28441c594abSRalf Baechle __setup("protiflush", set_protiflush);
28541c594abSRalf Baechle 
28641c594abSRalf Baechle static int __init set_protdflush(char *s)
28741c594abSRalf Baechle {
28841c594abSRalf Baechle 	mt_protdflush = 1;
28941c594abSRalf Baechle 	return 1;
29041c594abSRalf Baechle }
29141c594abSRalf Baechle __setup("protdflush", set_protdflush);
29241c594abSRalf Baechle 
29341c594abSRalf Baechle static int __init niflush(char *s)
29441c594abSRalf Baechle {
29541c594abSRalf Baechle 	get_option(&s, &mt_n_iflushes);
29641c594abSRalf Baechle 	return 1;
29741c594abSRalf Baechle }
29841c594abSRalf Baechle __setup("niflush=", niflush);
29941c594abSRalf Baechle 
30041c594abSRalf Baechle static int __init ndflush(char *s)
30141c594abSRalf Baechle {
30241c594abSRalf Baechle 	get_option(&s, &mt_n_dflushes);
30341c594abSRalf Baechle 	return 1;
30441c594abSRalf Baechle }
30541c594abSRalf Baechle __setup("ndflush=", ndflush);
30641c594abSRalf Baechle #ifdef CONFIG_MIPS_MT_FPAFF
30741c594abSRalf Baechle static int fpaff_threshold = -1;
30841c594abSRalf Baechle 
30941c594abSRalf Baechle static int __init fpaff_thresh(char *str)
31041c594abSRalf Baechle {
31141c594abSRalf Baechle 	get_option(&str, &fpaff_threshold);
31241c594abSRalf Baechle 	return 1;
31341c594abSRalf Baechle }
31441c594abSRalf Baechle 
31541c594abSRalf Baechle __setup("fpaff=", fpaff_thresh);
31641c594abSRalf Baechle #endif /* CONFIG_MIPS_MT_FPAFF */
31741c594abSRalf Baechle 
31841c594abSRalf Baechle static unsigned int itc_base = 0;
31941c594abSRalf Baechle 
32041c594abSRalf Baechle static int __init set_itc_base(char *str)
32141c594abSRalf Baechle {
32241c594abSRalf Baechle 	get_option(&str, &itc_base);
32341c594abSRalf Baechle 	return 1;
32441c594abSRalf Baechle }
32541c594abSRalf Baechle 
32641c594abSRalf Baechle __setup("itcbase=", set_itc_base);
32741c594abSRalf Baechle 
32841c594abSRalf Baechle void mips_mt_set_cpuoptions(void)
32941c594abSRalf Baechle {
33041c594abSRalf Baechle 	unsigned int oconfig7 = read_c0_config7();
33141c594abSRalf Baechle 	unsigned int nconfig7 = oconfig7;
33241c594abSRalf Baechle 
33341c594abSRalf Baechle 	if (mt_opt_norps) {
33441c594abSRalf Baechle 		printk("\"norps\" option deprectated: use \"rpsctl=\"\n");
33541c594abSRalf Baechle 	}
33641c594abSRalf Baechle 	if (mt_opt_rpsctl >= 0) {
33741c594abSRalf Baechle 		printk("34K return prediction stack override set to %d.\n",
33841c594abSRalf Baechle 			mt_opt_rpsctl);
33941c594abSRalf Baechle 		if (mt_opt_rpsctl)
34041c594abSRalf Baechle 			nconfig7 |= (1 << 2);
34141c594abSRalf Baechle 		else
34241c594abSRalf Baechle 			nconfig7 &= ~(1 << 2);
34341c594abSRalf Baechle 	}
34441c594abSRalf Baechle 	if (mt_opt_nblsu >= 0) {
34541c594abSRalf Baechle 		printk("34K ALU/LSU sync override set to %d.\n", mt_opt_nblsu);
34641c594abSRalf Baechle 		if (mt_opt_nblsu)
34741c594abSRalf Baechle 			nconfig7 |= (1 << 5);
34841c594abSRalf Baechle 		else
34941c594abSRalf Baechle 			nconfig7 &= ~(1 << 5);
35041c594abSRalf Baechle 	}
35141c594abSRalf Baechle 	if (mt_opt_forceconfig7) {
35241c594abSRalf Baechle 		printk("CP0.Config7 forced to 0x%08x.\n", mt_opt_config7);
35341c594abSRalf Baechle 		nconfig7 = mt_opt_config7;
35441c594abSRalf Baechle 	}
35541c594abSRalf Baechle 	if (oconfig7 != nconfig7) {
35641c594abSRalf Baechle 		__asm__ __volatile("sync");
35741c594abSRalf Baechle 		write_c0_config7(nconfig7);
35841c594abSRalf Baechle 		ehb ();
35941c594abSRalf Baechle 		printk("Config7: 0x%08x\n", read_c0_config7());
36041c594abSRalf Baechle 	}
36141c594abSRalf Baechle 
36241c594abSRalf Baechle 	/* Report Cache management debug options */
36341c594abSRalf Baechle 	if (mt_protiflush)
36441c594abSRalf Baechle 		printk("I-cache flushes single-threaded\n");
36541c594abSRalf Baechle 	if (mt_protdflush)
36641c594abSRalf Baechle 		printk("D-cache flushes single-threaded\n");
36741c594abSRalf Baechle 	if (mt_n_iflushes != 1)
36841c594abSRalf Baechle 		printk("I-Cache Flushes Repeated %d times\n", mt_n_iflushes);
36941c594abSRalf Baechle 	if (mt_n_dflushes != 1)
37041c594abSRalf Baechle 		printk("D-Cache Flushes Repeated %d times\n", mt_n_dflushes);
37141c594abSRalf Baechle 
37241c594abSRalf Baechle #ifdef CONFIG_MIPS_MT_FPAFF
37341c594abSRalf Baechle 	/* FPU Use Factor empirically derived from experiments on 34K */
37441c594abSRalf Baechle #define FPUSEFACTOR 333
37541c594abSRalf Baechle 
37641c594abSRalf Baechle 	if (fpaff_threshold >= 0) {
37741c594abSRalf Baechle 		mt_fpemul_threshold = fpaff_threshold;
37841c594abSRalf Baechle 	} else {
37941c594abSRalf Baechle 		mt_fpemul_threshold =
38041c594abSRalf Baechle 			(FPUSEFACTOR * (loops_per_jiffy/(500000/HZ))) / HZ;
38141c594abSRalf Baechle 	}
38241c594abSRalf Baechle 	printk("FPU Affinity set after %ld emulations\n",
38341c594abSRalf Baechle 			mt_fpemul_threshold);
38441c594abSRalf Baechle #endif /* CONFIG_MIPS_MT_FPAFF */
38541c594abSRalf Baechle 
38641c594abSRalf Baechle 	if (itc_base != 0) {
38741c594abSRalf Baechle 		/*
38841c594abSRalf Baechle 		 * Configure ITC mapping.  This code is very
38941c594abSRalf Baechle 		 * specific to the 34K core family, which uses
39041c594abSRalf Baechle 		 * a special mode bit ("ITC") in the ErrCtl
39141c594abSRalf Baechle 		 * register to enable access to ITC control
39241c594abSRalf Baechle 		 * registers via cache "tag" operations.
39341c594abSRalf Baechle 		 */
39441c594abSRalf Baechle 		unsigned long ectlval;
39541c594abSRalf Baechle 		unsigned long itcblkgrn;
39641c594abSRalf Baechle 
39741c594abSRalf Baechle 		/* ErrCtl register is known as "ecc" to Linux */
39841c594abSRalf Baechle 		ectlval = read_c0_ecc();
39941c594abSRalf Baechle 		write_c0_ecc(ectlval | (0x1 << 26));
40041c594abSRalf Baechle 		ehb();
40141c594abSRalf Baechle #define INDEX_0 (0x80000000)
40241c594abSRalf Baechle #define INDEX_8 (0x80000008)
40341c594abSRalf Baechle 		/* Read "cache tag" for Dcache pseudo-index 8 */
40441c594abSRalf Baechle 		cache_op(Index_Load_Tag_D, INDEX_8);
40541c594abSRalf Baechle 		ehb();
40641c594abSRalf Baechle 		itcblkgrn = read_c0_dtaglo();
40741c594abSRalf Baechle 		itcblkgrn &= 0xfffe0000;
40841c594abSRalf Baechle 		/* Set for 128 byte pitch of ITC cells */
40941c594abSRalf Baechle 		itcblkgrn |= 0x00000c00;
41041c594abSRalf Baechle 		/* Stage in Tag register */
41141c594abSRalf Baechle 		write_c0_dtaglo(itcblkgrn);
41241c594abSRalf Baechle 		ehb();
41341c594abSRalf Baechle 		/* Write out to ITU with CACHE op */
41441c594abSRalf Baechle 		cache_op(Index_Store_Tag_D, INDEX_8);
41541c594abSRalf Baechle 		/* Now set base address, and turn ITC on with 0x1 bit */
41641c594abSRalf Baechle 		write_c0_dtaglo((itc_base & 0xfffffc00) | 0x1 );
41741c594abSRalf Baechle 		ehb();
41841c594abSRalf Baechle 		/* Write out to ITU with CACHE op */
41941c594abSRalf Baechle 		cache_op(Index_Store_Tag_D, INDEX_0);
42041c594abSRalf Baechle 		write_c0_ecc(ectlval);
42141c594abSRalf Baechle 		ehb();
42241c594abSRalf Baechle 		printk("Mapped %ld ITC cells starting at 0x%08x\n",
42341c594abSRalf Baechle 			((itcblkgrn & 0x7fe00000) >> 20), itc_base);
42441c594abSRalf Baechle 	}
42541c594abSRalf Baechle }
42641c594abSRalf Baechle 
42741c594abSRalf Baechle /*
42841c594abSRalf Baechle  * Function to protect cache flushes from concurrent execution
42941c594abSRalf Baechle  * depends on MP software model chosen.
43041c594abSRalf Baechle  */
43141c594abSRalf Baechle 
43241c594abSRalf Baechle void mt_cflush_lockdown(void)
43341c594abSRalf Baechle {
43441c594abSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC
43541c594abSRalf Baechle 	void smtc_cflush_lockdown(void);
43641c594abSRalf Baechle 
43741c594abSRalf Baechle 	smtc_cflush_lockdown();
43841c594abSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */
43941c594abSRalf Baechle 	/* FILL IN VSMP and AP/SP VERSIONS HERE */
44041c594abSRalf Baechle }
44141c594abSRalf Baechle 
44241c594abSRalf Baechle void mt_cflush_release(void)
44341c594abSRalf Baechle {
44441c594abSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC
44541c594abSRalf Baechle 	void smtc_cflush_release(void);
44641c594abSRalf Baechle 
44741c594abSRalf Baechle 	smtc_cflush_release();
44841c594abSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */
44941c594abSRalf Baechle 	/* FILL IN VSMP and AP/SP VERSIONS HERE */
45041c594abSRalf Baechle }
451