]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[IA64] Minimize per_cpu reservations.
authorholt@sgi.com <holt@sgi.com>
Thu, 3 Apr 2008 20:17:13 +0000 (15:17 -0500)
committerTony Luck <tony.luck@intel.com>
Tue, 8 Apr 2008 20:51:35 +0000 (13:51 -0700)
This attached patch significantly shrinks boot memory allocation on ia64.
It does this by not allocating per_cpu areas for cpus that can never
exist.

In the case where acpi does not have any numa node description of the
cpus, I defaulted to assigning the first 32 round-robin on the known
nodes..  For the !CONFIG_ACPI  I used for_each_possible_cpu().

Signed-off-by: Robin Holt <holt@sgi.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
arch/ia64/kernel/acpi.c
arch/ia64/kernel/numa.c
arch/ia64/kernel/setup.c
arch/ia64/mm/discontig.c
arch/ia64/mm/numa.c
include/asm-ia64/acpi.h
include/asm-ia64/numa.h

index 78f28d825f30fc7accf50064b97466f4d5bfeaf0..c7467f863c7af5c99312767f8f176628568ab96f 100644 (file)
@@ -423,6 +423,7 @@ static u32 __devinitdata pxm_flag[PXM_FLAG_LEN];
 #define pxm_bit_set(bit)       (set_bit(bit,(void *)pxm_flag))
 #define pxm_bit_test(bit)      (test_bit(bit,(void *)pxm_flag))
 static struct acpi_table_slit __initdata *slit_table;
+cpumask_t early_cpu_possible_map = CPU_MASK_NONE;
 
 static int get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
 {
@@ -482,6 +483,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
            (pa->apic_id << 8) | (pa->local_sapic_eid);
        /* nid should be overridden as logical node id later */
        node_cpuid[srat_num_cpus].nid = pxm;
+       cpu_set(srat_num_cpus, early_cpu_possible_map);
        srat_num_cpus++;
 }
 
@@ -559,7 +561,7 @@ void __init acpi_numa_arch_fixup(void)
        }
 
        /* set logical node id in cpu structure */
-       for (i = 0; i < srat_num_cpus; i++)
+       for_each_possible_early_cpu(i)
                node_cpuid[i].nid = pxm_to_node(node_cpuid[i].nid);
 
        printk(KERN_INFO "Number of logical nodes in system = %d\n",
index a78b45f5fe2f9d3aef7c9c8aa8012987b0558a0e..c93420c97409dc2c97c41417279f7395e411b6f6 100644 (file)
@@ -73,7 +73,7 @@ void __init build_cpu_to_node_map(void)
        for(node=0; node < MAX_NUMNODES; node++)
                cpus_clear(node_to_cpu_mask[node]);
 
-       for(cpu = 0; cpu < NR_CPUS; ++cpu) {
+       for_each_possible_early_cpu(cpu) {
                node = -1;
                for (i = 0; i < NR_CPUS; ++i)
                        if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) {
index 4aa9eaea76c373bcab89dfa1d058984272030456..6206541f9e87f5d9287bf9d7eb03ef8b73bf165a 100644 (file)
@@ -493,6 +493,8 @@ setup_arch (char **cmdline_p)
        acpi_table_init();
 # ifdef CONFIG_ACPI_NUMA
        acpi_numa_init();
+       per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ?
+               32 : cpus_weight(early_cpu_possible_map)), additional_cpus);
 # endif
 #else
 # ifdef CONFIG_SMP
index 06c540a29467064a4103806b5b33d07531c6acce..6136a4c6df11dddf90560eaf13f93537216685f3 100644 (file)
@@ -104,7 +104,7 @@ static int __meminit early_nr_cpus_node(int node)
 {
        int cpu, n = 0;
 
-       for (cpu = 0; cpu < NR_CPUS; cpu++)
+       for_each_possible_early_cpu(cpu)
                if (node == node_cpuid[cpu].nid)
                        n++;
 
@@ -143,7 +143,7 @@ static void *per_cpu_node_setup(void *cpu_data, int node)
 #ifdef CONFIG_SMP
        int cpu;
 
-       for (cpu = 0; cpu < NR_CPUS; cpu++) {
+       for_each_possible_early_cpu(cpu) {
                if (node == node_cpuid[cpu].nid) {
                        memcpy(__va(cpu_data), __phys_per_cpu_start,
                               __per_cpu_end - __per_cpu_start);
@@ -346,7 +346,7 @@ static void __init initialize_pernode_data(void)
 
 #ifdef CONFIG_SMP
        /* Set the node_data pointer for each per-cpu struct */
-       for (cpu = 0; cpu < NR_CPUS; cpu++) {
+       for_each_possible_early_cpu(cpu) {
                node = node_cpuid[cpu].nid;
                per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data;
        }
@@ -494,13 +494,9 @@ void __cpuinit *per_cpu_init(void)
        int cpu;
        static int first_time = 1;
 
-
-       if (smp_processor_id() != 0)
-               return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
-
        if (first_time) {
                first_time = 0;
-               for (cpu = 0; cpu < NR_CPUS; cpu++)
+               for_each_possible_early_cpu(cpu)
                        per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
        }
 
index 7807fc5c04224fcad33d98a83d96a48a1df25423..b73bf1838e57750fb69f3ae27d6950bcc9dc46c8 100644 (file)
@@ -27,7 +27,9 @@
  */
 int num_node_memblks;
 struct node_memblk_s node_memblk[NR_NODE_MEMBLKS];
-struct node_cpuid_s node_cpuid[NR_CPUS];
+struct node_cpuid_s node_cpuid[NR_CPUS] =
+       { [0 ... NR_CPUS-1] = { .phys_id = 0, .nid = NUMA_NO_NODE } };
+
 /*
  * This is a matrix with "distances" between nodes, they should be
  * proportional to the memory access latency ratios.
index cd1cc39b5599b639441bbb00b050d40675889a2e..fcfad326f4c721cdf06301beee8487be2f4c06e9 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/init.h>
 #include <linux/numa.h>
 #include <asm/system.h>
+#include <asm/numa.h>
 
 #define COMPILER_DEPENDENT_INT64       long
 #define COMPILER_DEPENDENT_UINT64      unsigned long
@@ -115,7 +116,11 @@ extern unsigned int is_cpu_cpei_target(unsigned int cpu);
 extern void set_cpei_target_cpu(unsigned int cpu);
 extern unsigned int get_cpei_target_cpu(void);
 extern void prefill_possible_map(void);
+#ifdef CONFIG_ACPI_HOTPLUG_CPU
 extern int additional_cpus;
+#else
+#define additional_cpus 0
+#endif
 
 #ifdef CONFIG_ACPI_NUMA
 #if MAX_NUMNODES > 256
@@ -129,6 +134,34 @@ extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
 
 #define acpi_unlazy_tlb(x)
 
+#ifdef CONFIG_ACPI_NUMA
+extern cpumask_t early_cpu_possible_map;
+#define for_each_possible_early_cpu(cpu)  \
+       for_each_cpu_mask((cpu), early_cpu_possible_map)
+
+static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus)
+{
+       int low_cpu, high_cpu;
+       int cpu;
+       int next_nid = 0;
+
+       low_cpu = cpus_weight(early_cpu_possible_map);
+
+       high_cpu = max(low_cpu, min_cpus);
+       high_cpu = min(high_cpu + reserve_cpus, NR_CPUS);
+
+       for (cpu = low_cpu; cpu < high_cpu; cpu++) {
+               cpu_set(cpu, early_cpu_possible_map);
+               if (node_cpuid[cpu].nid == NUMA_NO_NODE) {
+                       node_cpuid[cpu].nid = next_nid;
+                       next_nid++;
+                       if (next_nid >= num_online_nodes())
+                               next_nid = 0;
+               }
+       }
+}
+#endif /* CONFIG_ACPI_NUMA */
+
 #endif /*__KERNEL__*/
 
 #endif /*_ASM_ACPI_H*/
index 6a8a27cfae3e04f0f35771aa9490ff3b46e8729d..3499ff57bf425db6544e9bf60f31a334d4aaa4a3 100644 (file)
@@ -22,6 +22,8 @@
 
 #include <asm/mmzone.h>
 
+#define NUMA_NO_NODE   -1
+
 extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
 extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
 extern pg_data_t *pgdat_list[MAX_NUMNODES];