allow dyn-array in per_cpu area, allocated dynamically.
usage:
| /* in .h */
| struct kernel_stat {
| struct cpu_usage_stat cpustat;
| unsigned int *irqs;
| };
|
| /* in .c */
| DEFINE_PER_CPU(struct kernel_stat, kstat);
|
| DEFINE_PER_CPU_DYN_ARRAY_ADDR(per_cpu__kstat_irqs, per_cpu__kstat.irqs, sizeof(unsigned int), nr_irqs, sizeof(unsigned long), NULL);
after setup_percpu()/per_cpu_alloc_dyn_array(), the dyn_array in
per_cpu area is ready to use.
Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
*/
void __init setup_per_cpu_areas(void)
{
- ssize_t size = PERCPU_ENOUGH_ROOM;
+ ssize_t size, old_size;
char *ptr;
int cpu;
setup_cpu_pda_map();
/* Copy section for each CPU (we discard the original) */
- size = PERCPU_ENOUGH_ROOM;
+ old_size = PERCPU_ENOUGH_ROOM;
+ size = old_size + per_cpu_dyn_array_size();
printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n",
size);
per_cpu_offset(cpu) = ptr - __per_cpu_start;
memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
+ per_cpu_alloc_dyn_array(cpu, ptr + old_size);
+
}
printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n",
VMLINUX_SYMBOL(__dyn_array_start) = .; \
*(.dyn_array.init) \
VMLINUX_SYMBOL(__dyn_array_end) = .; \
+ } \
+ . = ALIGN((align)); \
+ .per_cpu_dyn_array.init : AT(ADDR(.per_cpu_dyn_array.init) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__per_cpu_dyn_array_start) = .; \
+ *(.per_cpu_dyn_array.init) \
+ VMLINUX_SYMBOL(__per_cpu_dyn_array_end) = .; \
}
#define SECURITY_INIT \
.security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
void (*init_work)(void *);
};
extern struct dyn_array *__dyn_array_start[], *__dyn_array_end[];
+extern struct dyn_array *__per_cpu_dyn_array_start[], *__per_cpu_dyn_array_end[];
-#define DEFINE_DYN_ARRAY(nameX, sizeX, nrX, alignX, init_workX) \
+#define DEFINE_DYN_ARRAY_ADDR(nameX, addrX, sizeX, nrX, alignX, init_workX) \
static struct dyn_array __dyn_array_##nameX __initdata = \
- { .name = (void **)&nameX,\
+ { .name = (void **)&(nameX),\
.size = sizeX,\
- .nr = &nrX,\
+ .nr = &(nrX),\
.align = alignX,\
.init_work = init_workX,\
}; \
__attribute__((__section__(".dyn_array.init"))) = \
&__dyn_array_##nameX
+#define DEFINE_DYN_ARRAY(nameX, sizeX, nrX, alignX, init_workX) \
+ DEFINE_DYN_ARRAY_ADDR(nameX, nameX, sizeX, nrX, alignX, init_workX)
+
+#define DEFINE_PER_CPU_DYN_ARRAY_ADDR(nameX, addrX, sizeX, nrX, alignX, init_workX) \
+ static struct dyn_array __per_cpu_dyn_array_##nameX __initdata = \
+ { .name = (void **)&(addrX),\
+ .size = sizeX,\
+ .nr = &(nrX),\
+ .align = alignX,\
+ .init_work = init_workX,\
+ }; \
+ static struct dyn_array *__per_cpu_dyn_array_ptr_##nameX __used \
+ __attribute__((__section__(".per_cpu_dyn_array.init"))) = \
+ &__per_cpu_dyn_array_##nameX
+
+#define DEFINE_PER_CPU_DYN_ARRAY(nameX, sizeX, nrX, alignX, init_workX) \
+ DEFINE_PER_CPU_DYN_ARRAY_ADDR(nameX, nameX, nrX, alignX, init_workX)
+
extern void pre_alloc_dyn_array(void);
+extern unsigned long per_cpu_dyn_array_size(void);
+extern void per_cpu_alloc_dyn_array(int cpu, char *ptr);
#endif /* __ASSEMBLY__ */
/**
static void __init setup_per_cpu_areas(void)
{
- unsigned long size, i;
+ unsigned long size, i, old_size;
char *ptr;
unsigned long nr_possible_cpus = num_possible_cpus();
/* Copy section for each CPU (we discard the original) */
- size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
+ old_size = PERCPU_ENOUGH_ROOM;
+ size = ALIGN(old_size + per_cpu_dyn_array_size(), PAGE_SIZE);
ptr = alloc_bootmem_pages(size * nr_possible_cpus);
for_each_possible_cpu(i) {
__per_cpu_offset[i] = ptr - __per_cpu_start;
memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
+ per_cpu_alloc_dyn_array(i, ptr + old_size);
ptr += size;
}
}
#endif
}
+unsigned long per_cpu_dyn_array_size(void)
+{
+ unsigned long total_size = 0;
+#ifdef CONFIG_HAVE_DYN_ARRAY
+ unsigned long size;
+ struct dyn_array **daa;
+
+ for (daa = __per_cpu_dyn_array_start ; daa < __per_cpu_dyn_array_end; daa++) {
+ struct dyn_array *da = *daa;
+
+ size = da->size * (*da->nr);
+ print_fn_descriptor_symbol("per_cpu_dyna_array %s ", da->name);
+ printk(KERN_CONT "size:%#lx nr:%d align:%#lx\n",
+ da->size, *da->nr, da->align);
+ total_size += roundup(size, da->align);
+ }
+ if (total_size)
+ printk(KERN_DEBUG "per_cpu_dyna_array total_size: %#lx\n",
+ total_size);
+#endif
+ return total_size;
+}
+
+void per_cpu_alloc_dyn_array(int cpu, char *ptr)
+{
+#ifdef CONFIG_HAVE_DYN_ARRAY
+ unsigned long size, phys;
+ struct dyn_array **daa;
+ unsigned long addr;
+ void **array;
+
+ phys = virt_to_phys(ptr);
+
+ for (daa = __per_cpu_dyn_array_start ; daa < __per_cpu_dyn_array_end; daa++) {
+ struct dyn_array *da = *daa;
+
+ size = da->size * (*da->nr);
+ print_fn_descriptor_symbol("per_cpu_dyna_array %s ", da->name);
+ printk(KERN_CONT "size:%#lx nr:%d align:%#lx",
+ da->size, *da->nr, da->align);
+
+ phys = roundup(phys, da->align);
+ addr = (unsigned long)da->name;
+ addr += per_cpu_offset(cpu);
+ array = (void **)addr;
+ *array = phys_to_virt(phys);
+ *da->name = *array; /* so init_work could use it directly */
+ printk(KERN_CONT " %p ==> [%#lx - %#lx]\n", array, phys, phys + size);
+ phys += size;
+
+ if (da->init_work) {
+ da->init_work(da);
+ }
+ }
+#endif
+}
+
asmlinkage void __init start_kernel(void)
{
char * command_line;