mtrr.c (adfe7512e1d0b2e83215b0ec56337d2df9f1032d) mtrr.c (f8bd9f25c9815161a39886fdd96d110b536a6074)
1/* Generic MTRR (Memory Type Range Register) driver.
2
3 Copyright (C) 1997-2000 Richard Gooch
4 Copyright (c) 2002 Patrick Mochel
5
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Library General Public
8 License as published by the Free Software Foundation; either

--- 55 unchanged lines hidden (view full) ---

64 return !!mtrr_if;
65}
66
67unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
68static DEFINE_MUTEX(mtrr_mutex);
69
70u64 size_or_mask, size_and_mask;
71
1/* Generic MTRR (Memory Type Range Register) driver.
2
3 Copyright (C) 1997-2000 Richard Gooch
4 Copyright (c) 2002 Patrick Mochel
5
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Library General Public
8 License as published by the Free Software Foundation; either

--- 55 unchanged lines hidden (view full) ---

64 return !!mtrr_if;
65}
66
67unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
68static DEFINE_MUTEX(mtrr_mutex);
69
70u64 size_or_mask, size_and_mask;
71
72static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __ro_after_init;
73
74const struct mtrr_ops *mtrr_if;
75
72const struct mtrr_ops *mtrr_if;
73
76void __init set_mtrr_ops(const struct mtrr_ops *ops)
77{
78 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
79 mtrr_ops[ops->vendor] = ops;
80}
81
82/* Returns non-zero if we have the write-combining memory type */
83static int have_wrcomb(void)
84{
85 struct pci_dev *dev;
86
87 dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL);
88 if (dev != NULL) {
89 /*

--- 487 unchanged lines hidden (view full) ---

577{
578 if (handle < MTRR_TO_PHYS_WC_OFFSET)
579 return -1;
580 else
581 return handle - MTRR_TO_PHYS_WC_OFFSET;
582}
583EXPORT_SYMBOL_GPL(arch_phys_wc_index);
584
74/* Returns non-zero if we have the write-combining memory type */
75static int have_wrcomb(void)
76{
77 struct pci_dev *dev;
78
79 dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL);
80 if (dev != NULL) {
81 /*

--- 487 unchanged lines hidden (view full) ---

569{
570 if (handle < MTRR_TO_PHYS_WC_OFFSET)
571 return -1;
572 else
573 return handle - MTRR_TO_PHYS_WC_OFFSET;
574}
575EXPORT_SYMBOL_GPL(arch_phys_wc_index);
576
585/*
586 * HACK ALERT!
587 * These should be called implicitly, but we can't yet until all the initcall
588 * stuff is done...
589 */
590static void __init init_ifs(void)
591{
592#ifndef CONFIG_X86_64
593 amd_init_mtrr();
594 cyrix_init_mtrr();
595 centaur_init_mtrr();
596#endif
597}
598
599/* The suspend/resume methods are only for CPU without MTRR. CPU using generic
600 * MTRR driver doesn't require this
601 */
602struct mtrr_value {
603 mtrr_type ltype;
604 unsigned long lbase;
605 unsigned long lsize;
606};

--- 41 unchanged lines hidden (view full) ---

648 * This needs to be called early; before any of the other CPUs are
649 * initialized (i.e. before smp_init()).
650 *
651 */
652void __init mtrr_bp_init(void)
653{
654 u32 phys_addr;
655
577/* The suspend/resume methods are only for CPU without MTRR. CPU using generic
578 * MTRR driver doesn't require this
579 */
580struct mtrr_value {
581 mtrr_type ltype;
582 unsigned long lbase;
583 unsigned long lsize;
584};

--- 41 unchanged lines hidden (view full) ---

626 * This needs to be called early; before any of the other CPUs are
627 * initialized (i.e. before smp_init()).
628 *
629 */
630void __init mtrr_bp_init(void)
631{
632 u32 phys_addr;
633
656 init_ifs();
657
658 phys_addr = 32;
659
660 if (boot_cpu_has(X86_FEATURE_MTRR)) {
661 mtrr_if = &generic_mtrr_ops;
662 size_or_mask = SIZE_OR_MASK_BITS(36);
663 size_and_mask = 0x00f00000;
664 phys_addr = 36;
665

--- 24 unchanged lines hidden (view full) ---

690 size_and_mask = 0;
691 phys_addr = 32;
692 }
693 } else {
694 switch (boot_cpu_data.x86_vendor) {
695 case X86_VENDOR_AMD:
696 if (cpu_feature_enabled(X86_FEATURE_K6_MTRR)) {
697 /* Pre-Athlon (K6) AMD CPU MTRRs */
634 phys_addr = 32;
635
636 if (boot_cpu_has(X86_FEATURE_MTRR)) {
637 mtrr_if = &generic_mtrr_ops;
638 size_or_mask = SIZE_OR_MASK_BITS(36);
639 size_and_mask = 0x00f00000;
640 phys_addr = 36;
641

--- 24 unchanged lines hidden (view full) ---

666 size_and_mask = 0;
667 phys_addr = 32;
668 }
669 } else {
670 switch (boot_cpu_data.x86_vendor) {
671 case X86_VENDOR_AMD:
672 if (cpu_feature_enabled(X86_FEATURE_K6_MTRR)) {
673 /* Pre-Athlon (K6) AMD CPU MTRRs */
698 mtrr_if = mtrr_ops[X86_VENDOR_AMD];
674 mtrr_if = &amd_mtrr_ops;
699 size_or_mask = SIZE_OR_MASK_BITS(32);
700 size_and_mask = 0;
701 }
702 break;
703 case X86_VENDOR_CENTAUR:
704 if (cpu_feature_enabled(X86_FEATURE_CENTAUR_MCR)) {
675 size_or_mask = SIZE_OR_MASK_BITS(32);
676 size_and_mask = 0;
677 }
678 break;
679 case X86_VENDOR_CENTAUR:
680 if (cpu_feature_enabled(X86_FEATURE_CENTAUR_MCR)) {
705 mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];
681 mtrr_if = &centaur_mtrr_ops;
706 size_or_mask = SIZE_OR_MASK_BITS(32);
707 size_and_mask = 0;
708 }
709 break;
710 case X86_VENDOR_CYRIX:
711 if (cpu_feature_enabled(X86_FEATURE_CYRIX_ARR)) {
682 size_or_mask = SIZE_OR_MASK_BITS(32);
683 size_and_mask = 0;
684 }
685 break;
686 case X86_VENDOR_CYRIX:
687 if (cpu_feature_enabled(X86_FEATURE_CYRIX_ARR)) {
712 mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
688 mtrr_if = &cyrix_mtrr_ops;
713 size_or_mask = SIZE_OR_MASK_BITS(32);
714 size_and_mask = 0;
715 }
716 break;
717 default:
718 break;
719 }
720 }

--- 58 unchanged lines hidden ---
689 size_or_mask = SIZE_OR_MASK_BITS(32);
690 size_and_mask = 0;
691 }
692 break;
693 default:
694 break;
695 }
696 }

--- 58 unchanged lines hidden ---