topology.c (1087ad4e3f88c474b8134a482720782922bf3fdf) topology.c (0fb3978b0aac3a5c08637aed03cc2d65f793508f)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Scheduler topology setup/handling methods
4 */
5#include "sched.h"
6
7DEFINE_MUTEX(sched_domains_mutex);
8

--- 1478 unchanged lines hidden (view full) ---

1487enum numa_topology_type sched_numa_topology_type;
1488
1489static int sched_domains_numa_levels;
1490static int sched_domains_curr_level;
1491
1492int sched_max_numa_distance;
1493static int *sched_domains_numa_distance;
1494static struct cpumask ***sched_domains_numa_masks;
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Scheduler topology setup/handling methods
4 */
5#include "sched.h"
6
7DEFINE_MUTEX(sched_domains_mutex);
8

--- 1478 unchanged lines hidden (view full) ---

1487enum numa_topology_type sched_numa_topology_type;
1488
1489static int sched_domains_numa_levels;
1490static int sched_domains_curr_level;
1491
1492int sched_max_numa_distance;
1493static int *sched_domains_numa_distance;
1494static struct cpumask ***sched_domains_numa_masks;
1495
1496static unsigned long __read_mostly *sched_numa_onlined_nodes;
1497#endif
1498
1499/*
1500 * SD_flags allowed in topology descriptions.
1501 *
1502 * These flags are purely descriptive of the topology and do not prescribe
1503 * behaviour. Behaviour is artificial and mapped in the below sd_init()
1504 * function:

--- 141 unchanged lines hidden (view full) ---

1646 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
1647#endif
1648 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
1649 { NULL, },
1650};
1651
1652static struct sched_domain_topology_level *sched_domain_topology =
1653 default_topology;
1495#endif
1496
1497/*
1498 * SD_flags allowed in topology descriptions.
1499 *
1500 * These flags are purely descriptive of the topology and do not prescribe
1501 * behaviour. Behaviour is artificial and mapped in the below sd_init()
1502 * function:

--- 141 unchanged lines hidden (view full) ---

1644 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
1645#endif
1646 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
1647 { NULL, },
1648};
1649
1650static struct sched_domain_topology_level *sched_domain_topology =
1651 default_topology;
1652static struct sched_domain_topology_level *sched_domain_topology_saved;
1654
1655#define for_each_sd_topology(tl) \
1656 for (tl = sched_domain_topology; tl->mask; tl++)
1657
1658void set_sched_topology(struct sched_domain_topology_level *tl)
1659{
1660 if (WARN_ON_ONCE(sched_smp_initialized))
1661 return;
1662
1663 sched_domain_topology = tl;
1653
1654#define for_each_sd_topology(tl) \
1655 for (tl = sched_domain_topology; tl->mask; tl++)
1656
1657void set_sched_topology(struct sched_domain_topology_level *tl)
1658{
1659 if (WARN_ON_ONCE(sched_smp_initialized))
1660 return;
1661
1662 sched_domain_topology = tl;
1663 sched_domain_topology_saved = NULL;
1664}
1665
1666#ifdef CONFIG_NUMA
1667
1668static const struct cpumask *sd_numa_mask(int cpu)
1669{
1670 return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
1671}

--- 7 unchanged lines hidden (view full) ---

1679 return;
1680
1681 done = true;
1682
1683 printk(KERN_WARNING "ERROR: %s\n\n", str);
1684
1685 for (i = 0; i < nr_node_ids; i++) {
1686 printk(KERN_WARNING " ");
1664}
1665
1666#ifdef CONFIG_NUMA
1667
1668static const struct cpumask *sd_numa_mask(int cpu)
1669{
1670 return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
1671}

--- 7 unchanged lines hidden (view full) ---

1679 return;
1680
1681 done = true;
1682
1683 printk(KERN_WARNING "ERROR: %s\n\n", str);
1684
1685 for (i = 0; i < nr_node_ids; i++) {
1686 printk(KERN_WARNING " ");
1687 for (j = 0; j < nr_node_ids; j++)
1688 printk(KERN_CONT "%02d ", node_distance(i,j));
1687 for (j = 0; j < nr_node_ids; j++) {
1688 if (!node_state(i, N_CPU) || !node_state(j, N_CPU))
1689 printk(KERN_CONT "(%02d) ", node_distance(i,j));
1690 else
1691 printk(KERN_CONT " %02d ", node_distance(i,j));
1692 }
1689 printk(KERN_CONT "\n");
1690 }
1691 printk(KERN_WARNING "\n");
1692}
1693
1694bool find_numa_distance(int distance)
1695{
1693 printk(KERN_CONT "\n");
1694 }
1695 printk(KERN_WARNING "\n");
1696}
1697
1698bool find_numa_distance(int distance)
1699{
1696 int i;
1700 bool found = false;
1701 int i, *distances;
1697
1698 if (distance == node_distance(0, 0))
1699 return true;
1700
1702
1703 if (distance == node_distance(0, 0))
1704 return true;
1705
1706 rcu_read_lock();
1707 distances = rcu_dereference(sched_domains_numa_distance);
1708 if (!distances)
1709 goto unlock;
1701 for (i = 0; i < sched_domains_numa_levels; i++) {
1710 for (i = 0; i < sched_domains_numa_levels; i++) {
1702 if (sched_domains_numa_distance[i] == distance)
1703 return true;
1711 if (distances[i] == distance) {
1712 found = true;
1713 break;
1714 }
1704 }
1715 }
1716unlock:
1717 rcu_read_unlock();
1705
1718
1706 return false;
1719 return found;
1707}
1708
1720}
1721
1722#define for_each_cpu_node_but(n, nbut) \
1723 for_each_node_state(n, N_CPU) \
1724 if (n == nbut) \
1725 continue; \
1726 else
1727
1709/*
1710 * A system can have three types of NUMA topology:
1711 * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system
1712 * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes
1713 * NUMA_BACKPLANE: nodes can reach other nodes through a backplane
1714 *
1715 * The difference between a glueless mesh topology and a backplane
1716 * topology lies in whether communication between not directly
1717 * connected nodes goes through intermediary nodes (where programs
1718 * could run), or through backplane controllers. This affects
1719 * placement of programs.
1720 *
1721 * The type of topology can be discerned with the following tests:
1722 * - If the maximum distance between any nodes is 1 hop, the system
1723 * is directly connected.
1724 * - If for two nodes A and B, located N > 1 hops away from each other,
1725 * there is an intermediary node C, which is < N hops away from both
1726 * nodes A and B, the system is a glueless mesh.
1727 */
1728/*
1729 * A system can have three types of NUMA topology:
1730 * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system
1731 * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes
1732 * NUMA_BACKPLANE: nodes can reach other nodes through a backplane
1733 *
1734 * The difference between a glueless mesh topology and a backplane
1735 * topology lies in whether communication between not directly
1736 * connected nodes goes through intermediary nodes (where programs
1737 * could run), or through backplane controllers. This affects
1738 * placement of programs.
1739 *
1740 * The type of topology can be discerned with the following tests:
1741 * - If the maximum distance between any nodes is 1 hop, the system
1742 * is directly connected.
1743 * - If for two nodes A and B, located N > 1 hops away from each other,
1744 * there is an intermediary node C, which is < N hops away from both
1745 * nodes A and B, the system is a glueless mesh.
1746 */
1728static void init_numa_topology_type(void)
1747static void init_numa_topology_type(int offline_node)
1729{
1730 int a, b, c, n;
1731
1732 n = sched_max_numa_distance;
1733
1734 if (sched_domains_numa_levels <= 2) {
1735 sched_numa_topology_type = NUMA_DIRECT;
1736 return;
1737 }
1738
1748{
1749 int a, b, c, n;
1750
1751 n = sched_max_numa_distance;
1752
1753 if (sched_domains_numa_levels <= 2) {
1754 sched_numa_topology_type = NUMA_DIRECT;
1755 return;
1756 }
1757
1739 for_each_online_node(a) {
1740 for_each_online_node(b) {
1758 for_each_cpu_node_but(a, offline_node) {
1759 for_each_cpu_node_but(b, offline_node) {
1741 /* Find two nodes furthest removed from each other. */
1742 if (node_distance(a, b) < n)
1743 continue;
1744
1745 /* Is there an intermediary node between a and b? */
1760 /* Find two nodes furthest removed from each other. */
1761 if (node_distance(a, b) < n)
1762 continue;
1763
1764 /* Is there an intermediary node between a and b? */
1746 for_each_online_node(c) {
1765 for_each_cpu_node_but(c, offline_node) {
1747 if (node_distance(a, c) < n &&
1748 node_distance(b, c) < n) {
1749 sched_numa_topology_type =
1750 NUMA_GLUELESS_MESH;
1751 return;
1752 }
1753 }
1754
1755 sched_numa_topology_type = NUMA_BACKPLANE;
1756 return;
1757 }
1758 }
1766 if (node_distance(a, c) < n &&
1767 node_distance(b, c) < n) {
1768 sched_numa_topology_type =
1769 NUMA_GLUELESS_MESH;
1770 return;
1771 }
1772 }
1773
1774 sched_numa_topology_type = NUMA_BACKPLANE;
1775 return;
1776 }
1777 }
1778
1779 pr_err("Failed to find a NUMA topology type, defaulting to DIRECT\n");
1780 sched_numa_topology_type = NUMA_DIRECT;
1759}
1760
1761
1762#define NR_DISTANCE_VALUES (1 << DISTANCE_BITS)
1763
1781}
1782
1783
1784#define NR_DISTANCE_VALUES (1 << DISTANCE_BITS)
1785
1764void sched_init_numa(void)
1786void sched_init_numa(int offline_node)
1765{
1766 struct sched_domain_topology_level *tl;
1767 unsigned long *distance_map;
1768 int nr_levels = 0;
1769 int i, j;
1787{
1788 struct sched_domain_topology_level *tl;
1789 unsigned long *distance_map;
1790 int nr_levels = 0;
1791 int i, j;
1792 int *distances;
1793 struct cpumask ***masks;
1770
1771 /*
1772 * O(nr_nodes^2) deduplicating selection sort -- in order to find the
1773 * unique distances in the node_distance() table.
1774 */
1775 distance_map = bitmap_alloc(NR_DISTANCE_VALUES, GFP_KERNEL);
1776 if (!distance_map)
1777 return;
1778
1779 bitmap_zero(distance_map, NR_DISTANCE_VALUES);
1794
1795 /*
1796 * O(nr_nodes^2) deduplicating selection sort -- in order to find the
1797 * unique distances in the node_distance() table.
1798 */
1799 distance_map = bitmap_alloc(NR_DISTANCE_VALUES, GFP_KERNEL);
1800 if (!distance_map)
1801 return;
1802
1803 bitmap_zero(distance_map, NR_DISTANCE_VALUES);
1780 for (i = 0; i < nr_node_ids; i++) {
1781 for (j = 0; j < nr_node_ids; j++) {
1804 for_each_cpu_node_but(i, offline_node) {
1805 for_each_cpu_node_but(j, offline_node) {
1782 int distance = node_distance(i, j);
1783
1784 if (distance < LOCAL_DISTANCE || distance >= NR_DISTANCE_VALUES) {
1785 sched_numa_warn("Invalid distance value range");
1806 int distance = node_distance(i, j);
1807
1808 if (distance < LOCAL_DISTANCE || distance >= NR_DISTANCE_VALUES) {
1809 sched_numa_warn("Invalid distance value range");
1810 bitmap_free(distance_map);
1786 return;
1787 }
1788
1789 bitmap_set(distance_map, distance, 1);
1790 }
1791 }
1792 /*
1793 * We can now figure out how many unique distance values there are and
1794 * allocate memory accordingly.
1795 */
1796 nr_levels = bitmap_weight(distance_map, NR_DISTANCE_VALUES);
1797
1811 return;
1812 }
1813
1814 bitmap_set(distance_map, distance, 1);
1815 }
1816 }
1817 /*
1818 * We can now figure out how many unique distance values there are and
1819 * allocate memory accordingly.
1820 */
1821 nr_levels = bitmap_weight(distance_map, NR_DISTANCE_VALUES);
1822
1798 sched_domains_numa_distance = kcalloc(nr_levels, sizeof(int), GFP_KERNEL);
1799 if (!sched_domains_numa_distance) {
1823 distances = kcalloc(nr_levels, sizeof(int), GFP_KERNEL);
1824 if (!distances) {
1800 bitmap_free(distance_map);
1801 return;
1802 }
1803
1804 for (i = 0, j = 0; i < nr_levels; i++, j++) {
1805 j = find_next_bit(distance_map, NR_DISTANCE_VALUES, j);
1825 bitmap_free(distance_map);
1826 return;
1827 }
1828
1829 for (i = 0, j = 0; i < nr_levels; i++, j++) {
1830 j = find_next_bit(distance_map, NR_DISTANCE_VALUES, j);
1806 sched_domains_numa_distance[i] = j;
1831 distances[i] = j;
1807 }
1832 }
1833 rcu_assign_pointer(sched_domains_numa_distance, distances);
1808
1809 bitmap_free(distance_map);
1810
1811 /*
1812 * 'nr_levels' contains the number of unique distances
1813 *
1814 * The sched_domains_numa_distance[] array includes the actual distance
1815 * numbers.

--- 5 unchanged lines hidden (view full) ---

1821 * the array will contain less then 'nr_levels' members. This could be
1822 * dangerous when we use it to iterate array sched_domains_numa_masks[][]
1823 * in other functions.
1824 *
1825 * We reset it to 'nr_levels' at the end of this function.
1826 */
1827 sched_domains_numa_levels = 0;
1828
1834
1835 bitmap_free(distance_map);
1836
1837 /*
1838 * 'nr_levels' contains the number of unique distances
1839 *
1840 * The sched_domains_numa_distance[] array includes the actual distance
1841 * numbers.

--- 5 unchanged lines hidden (view full) ---

1847 * the array will contain less then 'nr_levels' members. This could be
1848 * dangerous when we use it to iterate array sched_domains_numa_masks[][]
1849 * in other functions.
1850 *
1851 * We reset it to 'nr_levels' at the end of this function.
1852 */
1853 sched_domains_numa_levels = 0;
1854
1829 sched_domains_numa_masks = kzalloc(sizeof(void *) * nr_levels, GFP_KERNEL);
1830 if (!sched_domains_numa_masks)
1855 masks = kzalloc(sizeof(void *) * nr_levels, GFP_KERNEL);
1856 if (!masks)
1831 return;
1832
1833 /*
1834 * Now for each level, construct a mask per node which contains all
1835 * CPUs of nodes that are that many hops away from us.
1836 */
1837 for (i = 0; i < nr_levels; i++) {
1857 return;
1858
1859 /*
1860 * Now for each level, construct a mask per node which contains all
1861 * CPUs of nodes that are that many hops away from us.
1862 */
1863 for (i = 0; i < nr_levels; i++) {
1838 sched_domains_numa_masks[i] =
1839 kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
1840 if (!sched_domains_numa_masks[i])
1864 masks[i] = kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
1865 if (!masks[i])
1841 return;
1842
1866 return;
1867
1843 for (j = 0; j < nr_node_ids; j++) {
1868 for_each_cpu_node_but(j, offline_node) {
1844 struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
1845 int k;
1846
1847 if (!mask)
1848 return;
1849
1869 struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
1870 int k;
1871
1872 if (!mask)
1873 return;
1874
1850 sched_domains_numa_masks[i][j] = mask;
1875 masks[i][j] = mask;
1851
1876
1852 for_each_node(k) {
1853 /*
1854 * Distance information can be unreliable for
1855 * offline nodes, defer building the node
1856 * masks to its bringup.
1857 * This relies on all unique distance values
1858 * still being visible at init time.
1859 */
1860 if (!node_online(j))
1861 continue;
1862
1877 for_each_cpu_node_but(k, offline_node) {
1863 if (sched_debug() && (node_distance(j, k) != node_distance(k, j)))
1864 sched_numa_warn("Node-distance not symmetric");
1865
1866 if (node_distance(j, k) > sched_domains_numa_distance[i])
1867 continue;
1868
1869 cpumask_or(mask, mask, cpumask_of_node(k));
1870 }
1871 }
1872 }
1878 if (sched_debug() && (node_distance(j, k) != node_distance(k, j)))
1879 sched_numa_warn("Node-distance not symmetric");
1880
1881 if (node_distance(j, k) > sched_domains_numa_distance[i])
1882 continue;
1883
1884 cpumask_or(mask, mask, cpumask_of_node(k));
1885 }
1886 }
1887 }
1888 rcu_assign_pointer(sched_domains_numa_masks, masks);
1873
1874 /* Compute default topology size */
1875 for (i = 0; sched_domain_topology[i].mask; i++);
1876
1877 tl = kzalloc((i + nr_levels + 1) *
1878 sizeof(struct sched_domain_topology_level), GFP_KERNEL);
1879 if (!tl)
1880 return;

--- 21 unchanged lines hidden (view full) ---

1902 .mask = sd_numa_mask,
1903 .sd_flags = cpu_numa_flags,
1904 .flags = SDTL_OVERLAP,
1905 .numa_level = j,
1906 SD_INIT_NAME(NUMA)
1907 };
1908 }
1909
1889
1890 /* Compute default topology size */
1891 for (i = 0; sched_domain_topology[i].mask; i++);
1892
1893 tl = kzalloc((i + nr_levels + 1) *
1894 sizeof(struct sched_domain_topology_level), GFP_KERNEL);
1895 if (!tl)
1896 return;

--- 21 unchanged lines hidden (view full) ---

1918 .mask = sd_numa_mask,
1919 .sd_flags = cpu_numa_flags,
1920 .flags = SDTL_OVERLAP,
1921 .numa_level = j,
1922 SD_INIT_NAME(NUMA)
1923 };
1924 }
1925
1926 sched_domain_topology_saved = sched_domain_topology;
1910 sched_domain_topology = tl;
1911
1912 sched_domains_numa_levels = nr_levels;
1927 sched_domain_topology = tl;
1928
1929 sched_domains_numa_levels = nr_levels;
1913 sched_max_numa_distance = sched_domains_numa_distance[nr_levels - 1];
1930 WRITE_ONCE(sched_max_numa_distance, sched_domains_numa_distance[nr_levels - 1]);
1914
1931
1915 init_numa_topology_type();
1916
1917 sched_numa_onlined_nodes = bitmap_alloc(nr_node_ids, GFP_KERNEL);
1918 if (!sched_numa_onlined_nodes)
1919 return;
1920
1921 bitmap_zero(sched_numa_onlined_nodes, nr_node_ids);
1922 for_each_online_node(i)
1923 bitmap_set(sched_numa_onlined_nodes, i, 1);
1932 init_numa_topology_type(offline_node);
1924}
1925
1933}
1934
1926static void __sched_domains_numa_masks_set(unsigned int node)
1935
1936static void sched_reset_numa(void)
1927{
1937{
1928 int i, j;
1938 int nr_levels, *distances;
1939 struct cpumask ***masks;
1929
1940
1930 /*
1931 * NUMA masks are not built for offline nodes in sched_init_numa().
1932 * Thus, when a CPU of a never-onlined-before node gets plugged in,
1933 * adding that new CPU to the right NUMA masks is not sufficient: the
1934 * masks of that CPU's node must also be updated.
1935 */
1936 if (test_bit(node, sched_numa_onlined_nodes))
1937 return;
1941 nr_levels = sched_domains_numa_levels;
1942 sched_domains_numa_levels = 0;
1943 sched_max_numa_distance = 0;
1944 sched_numa_topology_type = NUMA_DIRECT;
1945 distances = sched_domains_numa_distance;
1946 rcu_assign_pointer(sched_domains_numa_distance, NULL);
1947 masks = sched_domains_numa_masks;
1948 rcu_assign_pointer(sched_domains_numa_masks, NULL);
1949 if (distances || masks) {
1950 int i, j;
1938
1951
1939 bitmap_set(sched_numa_onlined_nodes, node, 1);
1940
1941 for (i = 0; i < sched_domains_numa_levels; i++) {
1942 for (j = 0; j < nr_node_ids; j++) {
1943 if (!node_online(j) || node == j)
1952 synchronize_rcu();
1953 kfree(distances);
1954 for (i = 0; i < nr_levels && masks; i++) {
1955 if (!masks[i])
1944 continue;
1956 continue;
1945
1946 if (node_distance(j, node) > sched_domains_numa_distance[i])
1947 continue;
1948
1949 /* Add remote nodes in our masks */
1950 cpumask_or(sched_domains_numa_masks[i][node],
1951 sched_domains_numa_masks[i][node],
1952 sched_domains_numa_masks[0][j]);
1957 for_each_node(j)
1958 kfree(masks[i][j]);
1959 kfree(masks[i]);
1953 }
1960 }
1961 kfree(masks);
1954 }
1962 }
1963 if (sched_domain_topology_saved) {
1964 kfree(sched_domain_topology);
1965 sched_domain_topology = sched_domain_topology_saved;
1966 sched_domain_topology_saved = NULL;
1967 }
1968}
1955
1969
1970/*
1971 * Call with hotplug lock held
1972 */
1973void sched_update_numa(int cpu, bool online)
1974{
1975 int node;
1976
1977 node = cpu_to_node(cpu);
1956 /*
1978 /*
1957 * A new node has been brought up, potentially changing the topology
1958 * classification.
1959 *
1960 * Note that this is racy vs any use of sched_numa_topology_type :/
1979 * Scheduler NUMA topology is updated when the first CPU of a
1980 * node is onlined or the last CPU of a node is offlined.
1961 */
1981 */
1962 init_numa_topology_type();
1982 if (cpumask_weight(cpumask_of_node(node)) != 1)
1983 return;
1984
1985 sched_reset_numa();
1986 sched_init_numa(online ? NUMA_NO_NODE : node);
1963}
1964
1965void sched_domains_numa_masks_set(unsigned int cpu)
1966{
1967 int node = cpu_to_node(cpu);
1968 int i, j;
1969
1987}
1988
1989void sched_domains_numa_masks_set(unsigned int cpu)
1990{
1991 int node = cpu_to_node(cpu);
1992 int i, j;
1993
1970 __sched_domains_numa_masks_set(node);
1971
1972 for (i = 0; i < sched_domains_numa_levels; i++) {
1973 for (j = 0; j < nr_node_ids; j++) {
1994 for (i = 0; i < sched_domains_numa_levels; i++) {
1995 for (j = 0; j < nr_node_ids; j++) {
1974 if (!node_online(j))
1996 if (!node_state(j, N_CPU))
1975 continue;
1976
1977 /* Set ourselves in the remote node's masks */
1978 if (node_distance(j, node) <= sched_domains_numa_distance[i])
1979 cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
1980 }
1981 }
1982}
1983
1984void sched_domains_numa_masks_clear(unsigned int cpu)
1985{
1986 int i, j;
1987
1988 for (i = 0; i < sched_domains_numa_levels; i++) {
1997 continue;
1998
1999 /* Set ourselves in the remote node's masks */
2000 if (node_distance(j, node) <= sched_domains_numa_distance[i])
2001 cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
2002 }
2003 }
2004}
2005
2006void sched_domains_numa_masks_clear(unsigned int cpu)
2007{
2008 int i, j;
2009
2010 for (i = 0; i < sched_domains_numa_levels; i++) {
1989 for (j = 0; j < nr_node_ids; j++)
1990 cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
2011 for (j = 0; j < nr_node_ids; j++) {
2012 if (sched_domains_numa_masks[i][j])
2013 cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
2014 }
1991 }
1992}
1993
1994/*
1995 * sched_numa_find_closest() - given the NUMA topology, find the cpu
1996 * closest to @cpu from @cpumask.
1997 * cpumask: cpumask to find a cpu from
1998 * cpu: cpu to be close to
1999 *
2000 * returns: cpu, or nr_cpu_ids when nothing found.
2001 */
2002int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
2003{
2015 }
2016}
2017
2018/*
2019 * sched_numa_find_closest() - given the NUMA topology, find the cpu
2020 * closest to @cpu from @cpumask.
2021 * cpumask: cpumask to find a cpu from
2022 * cpu: cpu to be close to
2023 *
2024 * returns: cpu, or nr_cpu_ids when nothing found.
2025 */
2026int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
2027{
2004 int i, j = cpu_to_node(cpu);
2028 int i, j = cpu_to_node(cpu), found = nr_cpu_ids;
2029 struct cpumask ***masks;
2005
2030
2031 rcu_read_lock();
2032 masks = rcu_dereference(sched_domains_numa_masks);
2033 if (!masks)
2034 goto unlock;
2006 for (i = 0; i < sched_domains_numa_levels; i++) {
2035 for (i = 0; i < sched_domains_numa_levels; i++) {
2007 cpu = cpumask_any_and(cpus, sched_domains_numa_masks[i][j]);
2008 if (cpu < nr_cpu_ids)
2009 return cpu;
2036 if (!masks[i][j])
2037 break;
2038 cpu = cpumask_any_and(cpus, masks[i][j]);
2039 if (cpu < nr_cpu_ids) {
2040 found = cpu;
2041 break;
2042 }
2010 }
2043 }
2011 return nr_cpu_ids;
2044unlock:
2045 rcu_read_unlock();
2046
2047 return found;
2012}
2013
2014#endif /* CONFIG_NUMA */
2015
2016static int __sdt_alloc(const struct cpumask *cpu_map)
2017{
2018 struct sched_domain_topology_level *tl;
2019 int j;

--- 567 unchanged lines hidden ---
2048}
2049
2050#endif /* CONFIG_NUMA */
2051
2052static int __sdt_alloc(const struct cpumask *cpu_map)
2053{
2054 struct sched_domain_topology_level *tl;
2055 int j;

--- 567 unchanged lines hidden ---