From: Vivek Goyal Date: Sat, 25 Jun 2005 21:58:19 +0000 (-0700) Subject: [PATCH] kdump: Routines for copying dump pages X-Git-Tag: v2.6.13-rc1~68^2~95 X-Git-Url: http://pilppa.com/gitweb/?a=commitdiff_plain;h=60e64d46a58236e3c718074372cab6a5b56a3b15;p=linux-2.6-omap-h63xx.git [PATCH] kdump: Routines for copying dump pages This patch provides the interfaces necessary to read the dump contents, treating it as a high memory device. Signed off by Hariprasad Nellitheertha Signed-off-by: Eric Biederman Signed-off-by: Vivek Goyal Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- diff --git a/arch/i386/mm/highmem.c b/arch/i386/mm/highmem.c index 4b7aaf99d7e..b6eb4dcb877 100644 --- a/arch/i386/mm/highmem.c +++ b/arch/i386/mm/highmem.c @@ -75,6 +75,24 @@ void kunmap_atomic(void *kvaddr, enum km_type type) preempt_check_resched(); } +/* This is the same as kmap_atomic() but can map memory that doesn't + * have a struct page associated with it. + */ +void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) +{ + enum fixed_addresses idx; + unsigned long vaddr; + + inc_preempt_count(); + + idx = type + KM_TYPE_NR*smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot)); + __flush_tlb_one(vaddr); + + return (void*) vaddr; +} + struct page *kmap_atomic_to_page(void *ptr) { unsigned long idx, vaddr = (unsigned long)ptr; diff --git a/include/asm-i386/highmem.h b/include/asm-i386/highmem.h index 1df42bf347d..0fd331306b6 100644 --- a/include/asm-i386/highmem.h +++ b/include/asm-i386/highmem.h @@ -70,6 +70,7 @@ void *kmap(struct page *page); void kunmap(struct page *page); void *kmap_atomic(struct page *page, enum km_type type); void kunmap_atomic(void *kvaddr, enum km_type type); +void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); struct page *kmap_atomic_to_page(void *ptr); #define flush_cache_kmaps() do { } while (0) diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h new file mode 100644 index 00000000000..7d983b81742 --- /dev/null +++ b/include/linux/crash_dump.h @@ -0,0 +1,13 @@ +#ifndef LINUX_CRASH_DUMP_H +#define LINUX_CRASH_DUMP_H + +#ifdef CONFIG_CRASH_DUMP +#include +#include +#include +#include + +extern ssize_t copy_oldmem_page(unsigned long, char *, size_t, + unsigned long, int); +#endif /* CONFIG_CRASH_DUMP */ +#endif /* LINUX_CRASHDUMP_H */ diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 2a7e6c65c88..6bece9280eb 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -28,6 +28,7 @@ static inline void *kmap(struct page *page) #define kmap_atomic(page, idx) page_address(page) #define kunmap_atomic(addr, idx) do { } while (0) +#define kmap_atomic_pfn(pfn, idx) page_address(pfn_to_page(pfn)) #define kmap_atomic_to_page(ptr) virt_to_page(ptr) #endif /* CONFIG_HIGHMEM */ diff --git a/kernel/Makefile b/kernel/Makefile index cfc8b0dea95..cb05cd05d23 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -28,6 +28,7 @@ obj-$(CONFIG_AUDITSYSCALL) += auditsc.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_SYSFS) += ksysfs.o obj-$(CONFIG_GENERIC_HARDIRQS) += irq/ +obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_SECCOMP) += seccomp.o ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y) diff --git a/kernel/crash_dump.c b/kernel/crash_dump.c new file mode 100644 index 00000000000..5a1e6d5d203 --- /dev/null +++ b/kernel/crash_dump.c @@ -0,0 +1,49 @@ +/* + * kernel/crash_dump.c - Memory preserving reboot related code. + * + * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) + * Copyright (C) IBM Corporation, 2004. All rights reserved + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +/* + * Copy a page from "oldmem". For this page, there is no pte mapped + * in the current kernel. We stitch up a pte, similar to kmap_atomic. + */ +ssize_t copy_oldmem_page(unsigned long pfn, char *buf, + size_t csize, unsigned long offset, int userbuf) +{ + void *page, *vaddr; + + if (!csize) + return 0; + + page = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!page) + return -ENOMEM; + + vaddr = kmap_atomic_pfn(pfn, KM_PTE0); + copy_page(page, vaddr); + kunmap_atomic(vaddr, KM_PTE0); + + if (userbuf) { + if (copy_to_user(buf, (page + offset), csize)) { + kfree(page); + return -EFAULT; + } + } else { + memcpy(buf, (page + offset), csize); + } + + kfree(page); + return csize; +}