4
* Copyright (C) 2006, 2007, 2008, 2009 NEC Corporation
6
* This program is free software; you can redistribute it and/or modify
7
* it under the terms of the GNU General Public License as published by
8
* the Free Software Foundation; either version 2 of the License, or
9
* (at your option) any later version.
11
* This program is distributed in the hope that it will be useful,
12
* but WITHOUT ANY WARRANTY; without even the implied warranty of
13
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
* GNU General Public License for more details.
16
#include "makedumpfile.h"
18
struct symbol_table symbol_table;
19
struct size_table size_table;
20
struct offset_table offset_table;
21
struct array_table array_table;
22
struct number_table number_table;
23
struct srcfile_table srcfile_table;
25
struct dwarf_info dwarf_info;
26
struct vm_table vt = { 0 };
27
struct DumpInfo *info = NULL;
29
char filename_stdout[] = FILENAME_STDOUT;
33
* Forward declarations
35
void print_progress(const char *msg,
36
unsigned long current,
42
#define PROGRESS_COPY "Copying data"
43
#define PROGRESS_HOLES "Checking for memory holes"
44
#define PROGRESS_UNN_PAGES "Excluding unnecessary pages"
45
#define PROGRESS_FREE_PAGES "Excluding free pages"
46
#define PROGRESS_ZERO_PAGES "Excluding zero pages"
47
#define PROGRESS_XEN_DOMAIN "Excluding xen user domain"
48
#define PROGRESS_MAXLEN "35"
51
* The numbers of the excluded pages
53
unsigned long long pfn_zero;
54
unsigned long long pfn_memhole;
55
unsigned long long pfn_cache;
56
unsigned long long pfn_cache_private;
57
unsigned long long pfn_user;
58
unsigned long long pfn_free;
60
int retcd = FAILED; /* return code */
65
MSG("makedumpfile: version " VERSION " (released on " RELEASE_DATE ")\n");
69
#define INITIALIZE_LONG_TABLE(table, value) \
71
size_member = sizeof(long); \
72
num_member = sizeof(table) / size_member; \
73
ptr_long_table = (long *)&table; \
74
for (i = 0; i < num_member; i++, ptr_long_table++) \
75
*ptr_long_table = value; \
79
initialize_tables(void)
81
int i, size_member, num_member;
82
unsigned long long *ptr_symtable;
86
* Initialize the symbol table.
88
size_member = sizeof(symbol_table.mem_map);
89
num_member = sizeof(symbol_table) / size_member;
91
ptr_symtable = (unsigned long long *)&symbol_table;
93
for (i = 0; i < num_member; i++, ptr_symtable++)
94
*ptr_symtable = NOT_FOUND_SYMBOL;
96
INITIALIZE_LONG_TABLE(size_table, NOT_FOUND_STRUCTURE);
97
INITIALIZE_LONG_TABLE(offset_table, NOT_FOUND_STRUCTURE);
98
INITIALIZE_LONG_TABLE(array_table, NOT_FOUND_STRUCTURE);
99
INITIALIZE_LONG_TABLE(number_table, NOT_FOUND_NUMBER);
103
* Convert Physical Address to File Offset.
104
* If this function returns 0x0, File Offset isn't found.
105
* The File Offset 0x0 is in the ELF header.
106
* It is not in the memory image.
109
paddr_to_offset(unsigned long long paddr)
113
struct pt_load_segment *pls;
115
for (i = offset = 0; i < info->num_load_memory; i++) {
116
pls = &info->pt_load_segments[i];
117
if ((paddr >= pls->phys_start)
118
&& (paddr < pls->phys_end)) {
119
offset = (off_t)(paddr - pls->phys_start) +
128
vaddr_to_paddr_general(unsigned long long vaddr)
131
unsigned long long paddr = NOT_PADDR;
132
struct pt_load_segment *pls;
134
if (info->flag_refiltering)
137
for (i = 0; i < info->num_load_memory; i++) {
138
pls = &info->pt_load_segments[i];
139
if ((vaddr >= pls->virt_start)
140
&& (vaddr < pls->virt_end)) {
141
paddr = (off_t)(vaddr - pls->virt_start) +
150
* This function is slow because it doesn't use the memory.
151
* It is useful at few calls like get_str_osrelease_from_vmlinux().
154
vaddr_to_offset_slow(int fd, char *filename, unsigned long long vaddr)
157
int i, phnum, num_load, flag_elf64, elf_format;
161
elf_format = check_elf_format(fd, filename, &phnum, &num_load);
163
if (elf_format == ELF64)
165
else if (elf_format == ELF32)
170
for (i = 0; i < phnum; i++) {
171
if (flag_elf64) { /* ELF64 */
172
if (!get_elf64_phdr(fd, filename, i, &load64)) {
173
ERRMSG("Can't find Phdr %d.\n", i);
176
if (load64.p_type != PT_LOAD)
179
if ((vaddr < load64.p_vaddr)
180
|| (load64.p_vaddr + load64.p_filesz <= vaddr))
183
offset = load64.p_offset + (vaddr - load64.p_vaddr);
186
if (!get_elf32_phdr(fd, filename, i, &load32)) {
187
ERRMSG("Can't find Phdr %d.\n", i);
190
if (load32.p_type != PT_LOAD)
193
if ((vaddr < load32.p_vaddr)
194
|| (load32.p_vaddr + load32.p_filesz <= vaddr))
197
offset = load32.p_offset + (vaddr - load32.p_vaddr);
206
* Translate a domain-0's physical address to machine address.
209
ptom_xen(unsigned long long paddr)
212
unsigned long long maddr, pfn, mfn_idx, frame_idx;
214
pfn = paddr_to_pfn(paddr);
215
mfn_idx = pfn / MFNS_PER_FRAME;
216
frame_idx = pfn % MFNS_PER_FRAME;
218
if (mfn_idx >= info->p2m_frames) {
219
ERRMSG("Invalid mfn_idx(%llu).\n", mfn_idx);
222
maddr = pfn_to_paddr(info->p2m_mfn_frame_list[mfn_idx])
223
+ sizeof(unsigned long) * frame_idx;
224
if (!readmem(MADDR_XEN, maddr, &mfn, sizeof(mfn))) {
225
ERRMSG("Can't get mfn.\n");
228
maddr = pfn_to_paddr(mfn);
229
maddr |= PAGEOFFSET(paddr);
235
* Get the number of the page descriptors from the ELF info.
241
unsigned long long max_paddr;
242
struct pt_load_segment *pls;
244
if (info->flag_refiltering) {
245
info->max_mapnr = info->dh_memory->max_mapnr;
249
for (i = 0, max_paddr = 0; i < info->num_load_memory; i++) {
250
pls = &info->pt_load_segments[i];
251
if (max_paddr < pls->phys_end)
252
max_paddr = pls->phys_end;
254
info->max_mapnr = paddr_to_pfn(max_paddr);
257
DEBUG_MSG("max_mapnr : %llx\n", info->max_mapnr);
263
* Get the number of the page descriptors for Xen.
268
unsigned long max_pfn;
270
if (SYMBOL(max_pfn) == NOT_FOUND_SYMBOL)
273
if (!readmem(VADDR, SYMBOL(max_pfn), &max_pfn, sizeof max_pfn))
276
info->dom0_mapnr = max_pfn;
282
is_in_same_page(unsigned long vaddr1, unsigned long vaddr2)
284
if (round(vaddr1, info->page_size) == round(vaddr2, info->page_size))
290
#define BITMAP_SECT_LEN 4096
291
static inline int is_dumpable(struct dump_bitmap *, unsigned long long);
293
pfn_to_pos(unsigned long long pfn)
295
unsigned long desc_pos, i;
297
desc_pos = info->valid_pages[pfn / BITMAP_SECT_LEN];
298
for (i = round(pfn, BITMAP_SECT_LEN); i < pfn; i++)
299
if (is_dumpable(info->bitmap_memory, i))
306
read_page_desc(unsigned long long paddr, page_desc_t *pd)
308
struct disk_dump_header *dh;
309
unsigned long desc_pos;
310
unsigned long long pfn;
314
* Find page descriptor
316
dh = info->dh_memory;
318
= (DISKDUMP_HEADER_BLOCKS + dh->sub_hdr_size + dh->bitmap_blocks)
320
pfn = paddr_to_pfn(paddr);
321
desc_pos = pfn_to_pos(pfn);
322
offset += (off_t)desc_pos * sizeof(page_desc_t);
323
if (lseek(info->fd_memory, offset, SEEK_SET) < 0) {
324
ERRMSG("Can't seek %s. %s\n",
325
info->name_memory, strerror(errno));
330
* Read page descriptor
332
if (read(info->fd_memory, pd, sizeof(*pd)) != sizeof(*pd)) {
333
ERRMSG("Can't read %s. %s\n",
334
info->name_memory, strerror(errno));
341
if (pd->size > dh->block_size)
348
readpmem_kdump_compressed(unsigned long long paddr, void *bufptr, size_t size)
351
char buf[info->page_size];
352
char buf2[info->page_size];
354
unsigned long retlen, page_offset;
356
page_offset = paddr % info->page_size;
358
if (!is_dumpable(info->bitmap_memory, paddr_to_pfn(paddr))) {
359
ERRMSG("pfn(%llx) is excluded from %s.\n",
360
paddr_to_pfn(paddr), info->name_memory);
364
if (!read_page_desc(paddr, &pd)) {
365
ERRMSG("Can't read page_desc: %llx\n", paddr);
369
if (lseek(info->fd_memory, pd.offset, SEEK_SET) < 0) {
370
ERRMSG("Can't seek %s. %s\n",
371
info->name_memory, strerror(errno));
378
if (read(info->fd_memory, buf, pd.size) != pd.size) {
379
ERRMSG("Can't read %s. %s\n",
380
info->name_memory, strerror(errno));
384
if (pd.flags & DUMP_DH_COMPRESSED) {
385
retlen = info->page_size;
386
ret = uncompress((unsigned char *)buf2, &retlen,
387
(unsigned char *)buf, pd.size);
388
if ((ret != Z_OK) || (retlen != info->page_size)) {
389
ERRMSG("Uncompress failed: %d\n", ret);
392
memcpy(bufptr, buf2 + page_offset, size);
394
memcpy(bufptr, buf + page_offset, size);
398
ERRMSG("type_addr: %d, addr:%llx, size:%zd\n", PADDR, paddr, size);
403
readmem(int type_addr, unsigned long long addr, void *bufptr, size_t size)
405
size_t read_size, next_size;
407
unsigned long long next_addr;
408
unsigned long long paddr, maddr = NOT_PADDR;
410
const off_t failed = (off_t)-1;
414
if ((paddr = vaddr_to_paddr(addr)) == NOT_PADDR) {
415
ERRMSG("Can't convert a virtual address(%llx) to physical address.\n",
419
if (vt.mem_flags & MEMORY_XEN) {
420
if ((maddr = ptom_xen(paddr)) == NOT_PADDR) {
421
ERRMSG("Can't convert a physical address(%llx) to machine address.\n",
430
if (vt.mem_flags & MEMORY_XEN) {
431
if ((maddr = ptom_xen(paddr)) == NOT_PADDR) {
432
ERRMSG("Can't convert a physical address(%llx) to machine address.\n",
440
if ((paddr = kvtop_xen(addr)) == NOT_PADDR) {
441
ERRMSG("Can't convert a virtual address(%llx) to machine address.\n",
450
ERRMSG("Invalid address type (%d).\n", type_addr);
457
* Read each page, because pages are not necessarily continuous.
458
* Ex) pages in vmalloc area
460
if (!is_in_same_page(addr, addr + size - 1)) {
461
read_size = info->page_size - (addr % info->page_size);
462
next_addr = roundup(addr + 1, info->page_size);
463
next_size = size - read_size;
464
next_ptr = (char *)bufptr + read_size;
466
if (!readmem(type_addr, next_addr, next_ptr, next_size))
470
if (info->flag_refiltering)
471
return readpmem_kdump_compressed(paddr, bufptr, read_size);
473
if (!(offset = paddr_to_offset(paddr))) {
474
ERRMSG("Can't convert a physical address(%llx) to offset.\n",
479
if (lseek(info->fd_memory, offset, SEEK_SET) == failed) {
480
ERRMSG("Can't seek the dump memory(%s). %s\n",
481
info->name_memory, strerror(errno));
485
if (read(info->fd_memory, bufptr, read_size) != read_size) {
486
ERRMSG("Can't read the dump memory(%s). %s\n",
487
info->name_memory, strerror(errno));
493
ERRMSG("type_addr: %d, addr:%llx, size:%zd\n", type_addr, addr, size);
498
get_kernel_version(char *release)
505
* This method checks that vmlinux and vmcore are same kernel version.
508
maj = strtol(start, &end, 10);
513
min = strtol(start, &end, 10);
518
rel = strtol(start, &end, 10);
522
version = KERNEL_VERSION(maj, min, rel);
524
if ((version < OLDEST_VERSION) || (LATEST_VERSION < version)) {
525
MSG("The kernel version is not supported.\n");
526
MSG("The created dumpfile may be incomplete.\n");
532
is_page_size(long page_size)
535
* Page size is restricted to a hamming weight of 1.
537
if (page_size > 0 && !(page_size & (page_size - 1)))
544
set_page_size(long page_size)
546
if (!is_page_size(page_size)) {
547
ERRMSG("Invalid page_size: %ld", page_size);
550
info->page_size = page_size;
551
info->page_shift = ffs(info->page_size) - 1;
552
DEBUG_MSG("page_size : %ld\n", info->page_size);
558
fallback_to_current_page_size(void)
561
if (!set_page_size(sysconf(_SC_PAGE_SIZE)))
564
DEBUG_MSG("WARNING: Cannot determine page size (no vmcoreinfo).\n");
565
DEBUG_MSG("Using the dump kernel page size: %ld\n",
574
unsigned long utsname;
577
* Get the kernel version.
579
if (SYMBOL(system_utsname) != NOT_FOUND_SYMBOL) {
580
utsname = SYMBOL(system_utsname);
581
} else if (SYMBOL(init_uts_ns) != NOT_FOUND_SYMBOL) {
582
utsname = SYMBOL(init_uts_ns) + sizeof(int);
584
ERRMSG("Can't get the symbol of system_utsname.\n");
587
if (!readmem(VADDR, utsname, &info->system_utsname,
588
sizeof(struct utsname))) {
589
ERRMSG("Can't get the address of system_utsname.\n");
593
if (info->flag_read_vmcoreinfo) {
594
if (strcmp(info->system_utsname.release, info->release)) {
595
ERRMSG("%s and %s don't match.\n",
596
info->name_vmcoreinfo, info->name_memory);
597
retcd = WRONG_RELEASE;
602
info->kernel_version = get_kernel_version(info->system_utsname.release);
603
if (info->kernel_version == FALSE) {
604
ERRMSG("Can't get the kernel version.\n");
616
MSG(" Creating DUMPFILE:\n");
617
MSG(" # makedumpfile [-c|-E] [-d DL] [-x VMLINUX|-i VMCOREINFO] VMCORE DUMPFILE\n");
619
MSG(" Outputting the dump data in the flattened format to the standard output:\n");
620
MSG(" # makedumpfile -F [-c|-E] [-d DL] [-x VMLINUX|-i VMCOREINFO] VMCORE\n");
622
MSG(" Rearranging the dump data in the flattened format to a readable DUMPFILE:\n");
623
MSG(" # makedumpfile -R DUMPFILE\n");
625
MSG(" Generating VMCOREINFO:\n");
626
MSG(" # makedumpfile -g VMCOREINFO -x VMLINUX\n");
629
MSG(" Creating DUMPFILE of Xen:\n");
630
MSG(" # makedumpfile -E [--xen-syms XEN-SYMS|--xen-vmcoreinfo VMCOREINFO] VMCORE DUMPFILE\n");
632
MSG(" Generating VMCOREINFO of Xen:\n");
633
MSG(" # makedumpfile -g VMCOREINFO --xen-syms XEN-SYMS\n");
636
MSG("Available options:\n");
638
MSG(" Compress dump data by each page.\n");
639
MSG(" A user cannot specify this option with -E option, because the ELF format\n");
640
MSG(" does not support compressed data.\n");
641
MSG(" THIS IS ONLY FOR THE CRASH UTILITY.\n");
644
MSG(" Specify the type of unnecessary page for analysis.\n");
645
MSG(" Pages of the specified type are not copied to DUMPFILE. The page type\n");
646
MSG(" marked in the following table is excluded. A user can specify multiple\n");
647
MSG(" page types by setting the sum of each page type for Dump_Level (DL).\n");
648
MSG(" The maximum of Dump_Level is 31.\n");
649
MSG(" Note that Dump_Level for Xen dump filtering is 0 or 1.\n");
651
MSG(" Dump | zero cache cache user free\n");
652
MSG(" Level | page page private data page\n");
653
MSG(" -------+---------------------------------------\n");
660
MSG(" 31 | X X X X X\n");
663
MSG(" Create DUMPFILE in the ELF format.\n");
664
MSG(" This option cannot be specified with -c option, because the ELF\n");
665
MSG(" format does not support compressed data.\n");
667
MSG(" [-x VMLINUX]:\n");
668
MSG(" Specify the first kernel's VMLINUX to analyze the first kernel's\n");
669
MSG(" memory usage.\n");
670
MSG(" The page size of the first kernel and the second kernel should match.\n");
672
MSG(" [-i VMCOREINFO]:\n");
673
MSG(" Specify VMCOREINFO instead of VMLINUX for analyzing the first kernel's\n");
674
MSG(" memory usage.\n");
675
MSG(" VMCOREINFO should be made beforehand by makedumpfile with -g option,\n");
676
MSG(" and it contains the first kernel's information. If Dump_Level is 2 or\n");
677
MSG(" more and [-x VMLINUX] is not specified, this option is necessary.\n");
679
MSG(" [-g VMCOREINFO]:\n");
680
MSG(" Generate VMCOREINFO from the first kernel's VMLINUX.\n");
681
MSG(" VMCOREINFO must be generated on the system that is running the first\n");
682
MSG(" kernel. With -i option, a user can specify VMCOREINFO generated on the\n");
683
MSG(" other system that is running the same first kernel. [-x VMLINUX] must\n");
684
MSG(" be specified.\n");
687
MSG(" Output the dump data in the flattened format to the standard output\n");
688
MSG(" for transporting the dump data by SSH.\n");
689
MSG(" Analysis tools cannot read the flattened format directly. For analysis,\n");
690
MSG(" the dump data in the flattened format should be rearranged to a readable\n");
691
MSG(" DUMPFILE by -R option.\n");
694
MSG(" Rearrange the dump data in the flattened format from the standard input\n");
695
MSG(" to a readable DUMPFILE.\n");
697
MSG(" [--xen-syms XEN-SYMS]:\n");
698
MSG(" Specify the XEN-SYMS to analyze Xen's memory usage.\n");
700
MSG(" [--xen-vmcoreinfo VMCOREINFO]:\n");
701
MSG(" Specify the VMCOREINFO of Xen to analyze Xen's memory usage.\n");
703
MSG(" [--xen_phys_start XEN_PHYS_START_ADDRESS]:\n");
704
MSG(" This option is only for x86_64.\n");
705
MSG(" Specify the XEN_PHYS_START_ADDRESS, if the xen code/data is relocatable\n");
706
MSG(" and VMCORE does not contain XEN_PHYS_START_ADDRESS in the CRASHINFO.\n");
709
MSG(" Exclude all the user domain pages from Xen kdump's VMCORE, and extract\n");
710
MSG(" the part of Xen and domain-0.\n");
712
MSG(" [--message-level ML]:\n");
713
MSG(" Specify the message types.\n");
714
MSG(" Users can restrict output printed by specifying Message_Level (ML) with\n");
715
MSG(" this option. The message type marked with an X in the following table is\n");
716
MSG(" printed. For example, according to the table, specifying 7 as ML means\n");
717
MSG(" progress indicator, common message, and error message are printed, and\n");
718
MSG(" this is a default value.\n");
719
MSG(" Note that the maximum value of message_level is 31.\n");
721
MSG(" Message | progress common error debug report\n");
722
MSG(" Level | indicator message message message message\n");
723
MSG(" ---------+------------------------------------------------------\n");
728
MSG(" * 7 | X X X\n");
731
MSG(" 31 | X X X X X\n");
733
MSG(" [--vtop VIRTUAL_ADDRESS]:\n");
734
MSG(" This option is useful, when user debugs the translation problem\n");
735
MSG(" of virtual address. If specifing the VIRTUAL_ADDRESS, its physical\n");
736
MSG(" address is printed.\n");
739
MSG(" Print debugging message.\n");
742
MSG(" Overwrite DUMPFILE even if it already exists.\n");
745
MSG(" Show help message.\n");
748
MSG(" Show the version of makedumpfile.\n");
751
MSG(" This is a pathname to the first kernel's vmlinux.\n");
752
MSG(" This file must have the debug information of the first kernel to analyze\n");
753
MSG(" the first kernel's memory usage.\n");
756
MSG(" This is a pathname to the first kernel's memory core image.\n");
757
MSG(" This argument is generally /proc/vmcore.\n");
760
MSG(" This is a pathname to a file created by this command.\n");
763
MSG(" This is a pathname to the xen-syms.\n");
764
MSG(" This file must have the debug information of Xen to analyze\n");
765
MSG(" Xen's memory usage.\n");
770
open_vmcoreinfo(char *mode)
772
FILE *file_vmcoreinfo;
774
if ((file_vmcoreinfo = fopen(info->name_vmcoreinfo, mode)) == NULL) {
775
ERRMSG("Can't open the vmcoreinfo file(%s). %s\n",
776
info->name_vmcoreinfo, strerror(errno));
779
info->file_vmcoreinfo = file_vmcoreinfo;
784
open_kernel_file(void)
788
if (info->name_vmlinux) {
789
if ((fd = open(info->name_vmlinux, O_RDONLY)) < 0) {
790
ERRMSG("Can't open the kernel file(%s). %s\n",
791
info->name_vmlinux, strerror(errno));
794
info->fd_vmlinux = fd;
796
if (info->name_xen_syms) {
797
if ((fd = open(info->name_xen_syms, O_RDONLY)) < 0) {
798
ERRMSG("Can't open the kernel file(%s). %s\n",
799
info->name_xen_syms, strerror(errno));
802
info->fd_xen_syms = fd;
808
check_kdump_compressed(char *filename)
810
struct disk_dump_header dh;
812
if (!__read_disk_dump_header(&dh, filename))
815
if (strncmp(dh.signature, KDUMP_SIGNATURE, SIG_LEN))
822
get_kdump_compressed_header_info(char *filename)
824
struct disk_dump_header dh;
825
struct kdump_sub_header kh;
827
if (!read_disk_dump_header(&dh, filename))
830
if (!read_kdump_sub_header(&kh, filename))
833
if (dh.header_version < 1) {
834
ERRMSG("header does not have dump_level member\n");
837
DEBUG_MSG("diskdump main header\n");
838
DEBUG_MSG(" signature : %s\n", dh.signature);
839
DEBUG_MSG(" header_version : %d\n", dh.header_version);
840
DEBUG_MSG(" status : %d\n", dh.status);
841
DEBUG_MSG(" block_size : %d\n", dh.block_size);
842
DEBUG_MSG(" sub_hdr_size : %d\n", dh.sub_hdr_size);
843
DEBUG_MSG(" bitmap_blocks : %d\n", dh.bitmap_blocks);
844
DEBUG_MSG(" max_mapnr : 0x%x\n", dh.max_mapnr);
845
DEBUG_MSG(" total_ram_blocks : %d\n", dh.total_ram_blocks);
846
DEBUG_MSG(" device_blocks : %d\n", dh.device_blocks);
847
DEBUG_MSG(" written_blocks : %d\n", dh.written_blocks);
848
DEBUG_MSG(" current_cpu : %d\n", dh.current_cpu);
849
DEBUG_MSG(" nr_cpus : %d\n", dh.nr_cpus);
850
DEBUG_MSG("kdump sub header\n");
851
DEBUG_MSG(" phys_base : 0x%lx\n", kh.phys_base);
852
DEBUG_MSG(" dump_level : %d\n", kh.dump_level);
853
DEBUG_MSG(" split : %d\n", kh.split);
854
DEBUG_MSG(" start_pfn : 0x%lx\n", kh.start_pfn);
855
DEBUG_MSG(" end_pfn : 0x%lx\n", kh.end_pfn);
857
info->dh_memory = malloc(sizeof(dh));
858
if (info->dh_memory == NULL) {
859
ERRMSG("Can't allocate memory for the header. %s\n",
863
memcpy(info->dh_memory, &dh, sizeof(dh));
864
memcpy(&info->timestamp, &dh.timestamp, sizeof(dh.timestamp));
866
info->kh_memory = malloc(sizeof(kh));
867
if (info->kh_memory == NULL) {
868
ERRMSG("Can't allocate memory for the sub header. %s\n",
872
memcpy(info->kh_memory, &kh, sizeof(kh));
874
if (dh.header_version >= 3) {
875
/* A dumpfile contains vmcoreinfo data. */
876
info->offset_vmcoreinfo = kh.offset_vmcoreinfo;
877
info->size_vmcoreinfo = kh.size_vmcoreinfo;
881
free(info->dh_memory);
882
info->dh_memory = NULL;
888
open_dump_memory(void)
892
if ((fd = open(info->name_memory, O_RDONLY)) < 0) {
893
ERRMSG("Can't open the dump memory(%s). %s\n",
894
info->name_memory, strerror(errno));
897
info->fd_memory = fd;
899
status = check_kdump_compressed(info->name_memory);
900
if (status == TRUE) {
901
info->flag_refiltering = TRUE;
902
return get_kdump_compressed_header_info(info->name_memory);
903
} else if (status == FALSE) {
914
int open_flags = O_RDWR|O_CREAT|O_TRUNC;
916
if (!info->flag_force)
917
open_flags |= O_EXCL;
919
if (info->flag_flatten) {
921
info->name_dumpfile = filename_stdout;
922
} else if ((fd = open(info->name_dumpfile, open_flags,
923
S_IRUSR|S_IWUSR)) < 0) {
924
ERRMSG("Can't open the dump file(%s). %s\n",
925
info->name_dumpfile, strerror(errno));
928
info->fd_dumpfile = fd;
933
open_dump_bitmap(void)
937
if ((info->name_bitmap
938
= (char *)malloc(sizeof(FILENAME_BITMAP))) == NULL) {
939
ERRMSG("Can't allocate memory for the filename. %s\n",
943
strcpy(info->name_bitmap, FILENAME_BITMAP);
944
if ((fd = mkstemp(info->name_bitmap)) < 0) {
945
ERRMSG("Can't open the bitmap file(%s). %s\n",
946
info->name_bitmap, strerror(errno));
949
info->fd_bitmap = fd;
951
if (info->flag_split) {
953
* Reserve file descriptors of bitmap for creating split
954
* dumpfiles by multiple processes, because a bitmap file will
955
* be unlinked just after this and it is not possible to open
956
* a bitmap file later.
958
for (i = 0; i < info->num_dumpfile; i++) {
959
if ((fd = open(info->name_bitmap, O_RDONLY)) < 0) {
960
ERRMSG("Can't open the bitmap file(%s). %s\n",
961
info->name_bitmap, strerror(errno));
964
SPLITTING_FD_BITMAP(i) = fd;
967
unlink(info->name_bitmap);
973
* Open the following files when it generates the vmcoreinfo file.
978
open_files_for_generating_vmcoreinfo(void)
980
if (!open_kernel_file())
983
if (!open_vmcoreinfo("w"))
990
* Open the following file when it rearranges the dump data.
994
open_files_for_rearranging_dumpdata(void)
996
if (!open_dump_file())
1003
* Open the following files when it creates the dump file.
1007
* if it reads the vmcoreinfo file
1013
open_files_for_creating_dumpfile(void)
1015
if (info->flag_read_vmcoreinfo) {
1016
if (!open_vmcoreinfo("r"))
1019
if (!open_kernel_file())
1022
if (!open_dump_memory())
1025
if (!open_dump_bitmap())
1032
dump_Elf_load(Elf64_Phdr *prog, int num_load)
1034
struct pt_load_segment *pls;
1036
if (prog->p_type != PT_LOAD) {
1037
ERRMSG("%s isn't the dump memory.\n", info->name_memory);
1041
pls = &info->pt_load_segments[num_load];
1042
pls->phys_start = prog->p_paddr;
1043
pls->phys_end = pls->phys_start + prog->p_filesz;
1044
pls->virt_start = prog->p_vaddr;
1045
pls->virt_end = pls->virt_start + prog->p_filesz;
1046
pls->file_offset = prog->p_offset;
1048
DEBUG_MSG("LOAD (%d)\n", num_load);
1049
DEBUG_MSG(" phys_start : %llx\n", pls->phys_start);
1050
DEBUG_MSG(" phys_end : %llx\n", pls->phys_end);
1051
DEBUG_MSG(" virt_start : %llx\n", pls->virt_start);
1052
DEBUG_MSG(" virt_end : %llx\n", pls->virt_end);
1058
get_elf64_ehdr(Elf64_Ehdr *ehdr)
1060
const off_t failed = (off_t)-1;
1062
if (lseek(info->fd_memory, 0, SEEK_SET) == failed) {
1063
ERRMSG("Can't seek the dump memory(%s). %s\n",
1064
info->name_memory, strerror(errno));
1067
if (read(info->fd_memory, ehdr, sizeof(Elf64_Ehdr))
1068
!= sizeof(Elf64_Ehdr)) {
1069
ERRMSG("Can't read the dump memory(%s). %s\n",
1070
info->name_memory, strerror(errno));
1073
if (ehdr->e_ident[EI_CLASS] != ELFCLASS64) {
1074
ERRMSG("Can't get valid e_ident.\n");
1081
get_elf64_phdr(int fd, char *filename, int index, Elf64_Phdr *phdr)
1084
const off_t failed = (off_t)-1;
1086
offset = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) * index;
1088
if (lseek(fd, offset, SEEK_SET) == failed) {
1089
ERRMSG("Can't seek %s. %s\n", filename, strerror(errno));
1092
if (read(fd, phdr, sizeof(Elf64_Phdr)) != sizeof(Elf64_Phdr)) {
1093
ERRMSG("Can't read %s. %s\n", filename, strerror(errno));
1100
get_elf32_ehdr(Elf32_Ehdr *ehdr)
1102
const off_t failed = (off_t)-1;
1104
if (lseek(info->fd_memory, 0, SEEK_SET) == failed) {
1105
ERRMSG("Can't seek the dump memory(%s). %s\n",
1106
info->name_memory, strerror(errno));
1109
if (read(info->fd_memory, ehdr, sizeof(Elf32_Ehdr))
1110
!= sizeof(Elf32_Ehdr)) {
1111
ERRMSG("Can't read the dump memory(%s). %s\n",
1112
info->name_memory, strerror(errno));
1115
if (ehdr->e_ident[EI_CLASS] != ELFCLASS32) {
1116
ERRMSG("Can't get valid e_ident.\n");
1123
get_elf32_phdr(int fd, char *filename, int index, Elf32_Phdr *phdr)
1126
const off_t failed = (off_t)-1;
1128
offset = sizeof(Elf32_Ehdr) + sizeof(Elf32_Phdr) * index;
1130
if (lseek(fd, offset, SEEK_SET) == failed) {
1131
ERRMSG("Can't seek %s. %s\n", filename, strerror(errno));
1134
if (read(fd, phdr, sizeof(Elf32_Phdr)) != sizeof(Elf32_Phdr)) {
1135
ERRMSG("Can't read %s. %s\n", filename, strerror(errno));
1142
get_elf_phdr_memory(int index, Elf64_Phdr *phdr)
1146
if (info->flag_elf64_memory) { /* ELF64 */
1147
if (!get_elf64_phdr(info->fd_memory, info->name_memory,
1149
ERRMSG("Can't find Phdr %d.\n", index);
1153
if (!get_elf32_phdr(info->fd_memory, info->name_memory,
1155
ERRMSG("Can't find Phdr %d.\n", index);
1158
memset(phdr, 0, sizeof(Elf64_Phdr));
1159
phdr->p_type = phdr32.p_type;
1160
phdr->p_flags = phdr32.p_flags;
1161
phdr->p_offset = phdr32.p_offset;
1162
phdr->p_vaddr = phdr32.p_vaddr;
1163
phdr->p_paddr = phdr32.p_paddr;
1164
phdr->p_filesz = phdr32.p_filesz;
1165
phdr->p_memsz = phdr32.p_memsz;
1166
phdr->p_align = phdr32.p_align;
1172
check_elf_format(int fd, char *filename, int *phnum, int *num_load)
1179
const off_t failed = (off_t)-1;
1181
if (lseek(fd, 0, SEEK_SET) == failed) {
1182
ERRMSG("Can't seek %s. %s\n", filename, strerror(errno));
1185
if (read(fd, &ehdr64, sizeof(Elf64_Ehdr)) != sizeof(Elf64_Ehdr)) {
1186
ERRMSG("Can't read %s. %s\n", filename, strerror(errno));
1189
if (lseek(fd, 0, SEEK_SET) == failed) {
1190
ERRMSG("Can't seek %s. %s\n", filename, strerror(errno));
1193
if (read(fd, &ehdr32, sizeof(Elf32_Ehdr)) != sizeof(Elf32_Ehdr)) {
1194
ERRMSG("Can't read %s. %s\n", filename, strerror(errno));
1198
if ((ehdr64.e_ident[EI_CLASS] == ELFCLASS64)
1199
&& (ehdr32.e_ident[EI_CLASS] != ELFCLASS32)) {
1200
(*phnum) = ehdr64.e_phnum;
1201
for (i = 0; i < ehdr64.e_phnum; i++) {
1202
if (!get_elf64_phdr(fd, filename, i, &load64)) {
1203
ERRMSG("Can't find Phdr %d.\n", i);
1206
if (load64.p_type == PT_LOAD)
1211
} else if ((ehdr64.e_ident[EI_CLASS] != ELFCLASS64)
1212
&& (ehdr32.e_ident[EI_CLASS] == ELFCLASS32)) {
1213
(*phnum) = ehdr32.e_phnum;
1214
for (i = 0; i < ehdr32.e_phnum; i++) {
1215
if (!get_elf32_phdr(fd, filename, i, &load32)) {
1216
ERRMSG("Can't find Phdr %d.\n", i);
1219
if (load32.p_type == PT_LOAD)
1224
ERRMSG("Can't get valid ehdr.\n");
1231
int i, j, phnum, num_load, elf_format;
1233
unsigned long size_note;
1237
* Check ELF64 or ELF32.
1239
elf_format = check_elf_format(info->fd_memory, info->name_memory,
1242
if (elf_format == ELF64)
1243
info->flag_elf64_memory = TRUE;
1244
else if (elf_format == ELF32)
1245
info->flag_elf64_memory = FALSE;
1249
info->num_load_memory = num_load;
1251
if (!info->num_load_memory) {
1252
ERRMSG("Can't get the number of PT_LOAD.\n");
1255
if ((info->pt_load_segments = (struct pt_load_segment *)
1256
calloc(1, sizeof(struct pt_load_segment) *
1257
info->num_load_memory)) == NULL) {
1258
ERRMSG("Can't allocate memory for the PT_LOAD. %s\n",
1264
for (i = 0, j = 0; i < phnum; i++) {
1265
if (!get_elf_phdr_memory(i, &phdr))
1268
if (phdr.p_type == PT_NOTE) {
1269
offset_note = phdr.p_offset;
1270
size_note = phdr.p_filesz;
1272
if (phdr.p_type != PT_LOAD)
1276
info->offset_load_memory = phdr.p_offset;
1277
if (!info->offset_load_memory) {
1278
ERRMSG("Can't get the offset of page data.\n");
1282
if (j >= info->num_load_memory)
1284
if(!dump_Elf_load(&phdr, j))
1288
if (offset_note == 0 || size_note == 0) {
1289
ERRMSG("Can't find PT_NOTE Phdr.\n");
1292
if (!get_pt_note_info(offset_note, size_note)) {
1293
ERRMSG("Can't get PT_NOTE information.\n");
1301
get_symbol_addr(char *symname)
1304
unsigned long long symbol = NOT_FOUND_SYMBOL;
1308
Elf_Data *data = NULL;
1309
Elf_Scn *scn = NULL;
1310
char *sym_name = NULL;
1311
const off_t failed = (off_t)-1;
1313
if (lseek(dwarf_info.fd_debuginfo, 0, SEEK_SET) == failed) {
1314
ERRMSG("Can't seek the kernel file(%s). %s\n",
1315
dwarf_info.name_debuginfo, strerror(errno));
1316
return NOT_FOUND_SYMBOL;
1318
if (!(elfd = elf_begin(dwarf_info.fd_debuginfo, ELF_C_READ, NULL))) {
1319
ERRMSG("Can't get first elf header of %s.\n",
1320
dwarf_info.name_debuginfo);
1321
return NOT_FOUND_SYMBOL;
1323
while ((scn = elf_nextscn(elfd, scn)) != NULL) {
1324
if (gelf_getshdr(scn, &shdr) == NULL) {
1325
ERRMSG("Can't get section header.\n");
1328
if (shdr.sh_type == SHT_SYMTAB)
1332
ERRMSG("Can't find symbol table.\n");
1336
data = elf_getdata(scn, data);
1338
if ((!data) || (data->d_size == 0)) {
1339
ERRMSG("No data in symbol table.\n");
1343
for (i = 0; i < (shdr.sh_size/shdr.sh_entsize); i++) {
1344
if (gelf_getsym(data, i, &sym) == NULL) {
1345
ERRMSG("Can't get symbol at index %d.\n", i);
1348
sym_name = elf_strptr(elfd, shdr.sh_link, sym.st_name);
1350
if (sym_name == NULL)
1353
if (!strcmp(sym_name, symname)) {
1354
symbol = sym.st_value;
1366
get_next_symbol_addr(char *symname)
1369
unsigned long symbol = NOT_FOUND_SYMBOL;
1370
unsigned long next_symbol = NOT_FOUND_SYMBOL;
1374
Elf_Data *data = NULL;
1375
Elf_Scn *scn = NULL;
1376
char *sym_name = NULL;
1377
const off_t failed = (off_t)-1;
1379
if (lseek(dwarf_info.fd_debuginfo, 0, SEEK_SET) == failed) {
1380
ERRMSG("Can't seek the kernel file(%s). %s\n",
1381
dwarf_info.name_debuginfo, strerror(errno));
1382
return NOT_FOUND_SYMBOL;
1384
if (!(elfd = elf_begin(dwarf_info.fd_debuginfo, ELF_C_READ, NULL))) {
1385
ERRMSG("Can't get first elf header of %s.\n",
1386
dwarf_info.name_debuginfo);
1387
return NOT_FOUND_SYMBOL;
1389
while ((scn = elf_nextscn(elfd, scn)) != NULL) {
1390
if (gelf_getshdr(scn, &shdr) == NULL) {
1391
ERRMSG("Can't get section header.\n");
1394
if (shdr.sh_type == SHT_SYMTAB)
1398
ERRMSG("Can't find symbol table.\n");
1402
data = elf_getdata(scn, data);
1404
if ((!data) || (data->d_size == 0)) {
1405
ERRMSG("No data in symbol table.\n");
1409
for (i = 0; i < (shdr.sh_size/shdr.sh_entsize); i++) {
1410
if (gelf_getsym(data, i, &sym) == NULL) {
1411
ERRMSG("Can't get symbol at index %d.\n", i);
1414
sym_name = elf_strptr(elfd, shdr.sh_link, sym.st_name);
1416
if (sym_name == NULL)
1419
if (!strcmp(sym_name, symname)) {
1420
symbol = sym.st_value;
1425
if (symbol == NOT_FOUND_SYMBOL)
1429
* Search for next symbol.
1431
for (i = 0; i < (shdr.sh_size/shdr.sh_entsize); i++) {
1432
if (gelf_getsym(data, i, &sym) == NULL) {
1433
ERRMSG("Can't get symbol at index %d.\n", i);
1436
sym_name = elf_strptr(elfd, shdr.sh_link, sym.st_name);
1438
if (sym_name == NULL)
1441
if (symbol < sym.st_value) {
1442
if (next_symbol == NOT_FOUND_SYMBOL)
1443
next_symbol = sym.st_value;
1445
else if (sym.st_value < next_symbol)
1446
next_symbol = sym.st_value;
1457
is_kvaddr(unsigned long long addr)
1459
return (addr >= (unsigned long long)(KVBASE));
1463
get_data_member_location(Dwarf_Die *die, long *offset)
1466
Dwarf_Attribute attr;
1469
if (dwarf_attr(die, DW_AT_data_member_location, &attr) == NULL)
1472
if (dwarf_getlocation(&attr, &expr, &expcnt) < 0)
1475
(*offset) = expr[0].number;
1481
get_die_type(Dwarf *dwarfd, Dwarf_Die *die, Dwarf_Die *die_type)
1483
Dwarf_Attribute attr;
1484
Dwarf_Off offset_type, offset_cu;
1486
offset_cu = dwarf_dieoffset(die) - dwarf_cuoffset(die);
1489
* Get the offset of DW_AT_type.
1491
if (dwarf_attr(die, DW_AT_type, &attr) == NULL)
1494
if (dwarf_formref(&attr, &offset_type) < 0)
1497
if (dwarf_offdie(dwarfd, offset_type + offset_cu, die_type) == NULL) {
1498
ERRMSG("Can't get CU die.\n");
1505
get_data_array_length(Dwarf *dwarfd, Dwarf_Die *die)
1508
Dwarf_Attribute attr;
1510
Dwarf_Word upper_bound;
1512
if (!get_die_type(dwarfd, die, &die_type)) {
1513
ERRMSG("Can't get CU die of DW_AT_type.\n");
1516
tag = dwarf_tag(&die_type);
1517
if (tag != DW_TAG_array_type) {
1519
* This kernel doesn't have the member of array.
1525
* Get the demanded array length.
1527
dwarf_child(&die_type, &die_type);
1529
tag = dwarf_tag(&die_type);
1530
if (tag == DW_TAG_subrange_type)
1532
} while (dwarf_siblingof(&die_type, &die_type));
1534
if (tag != DW_TAG_subrange_type)
1537
if (dwarf_attr(&die_type, DW_AT_upper_bound, &attr) == NULL)
1540
if (dwarf_formudata(&attr, &upper_bound) < 0)
1543
if (upper_bound < 0)
1546
dwarf_info.array_length = upper_bound + 1;
1552
check_array_type(Dwarf *dwarfd, Dwarf_Die *die)
1557
if (!get_die_type(dwarfd, die, &die_type)) {
1558
ERRMSG("Can't get CU die of DW_AT_type.\n");
1561
tag = dwarf_tag(&die_type);
1562
if (tag == DW_TAG_array_type)
1563
dwarf_info.array_length = FOUND_ARRAY_TYPE;
1569
* Function for searching struct page.union.struct.mapping.
1572
__search_mapping(Dwarf *dwarfd, Dwarf_Die *die, long *offset)
1576
Dwarf_Die child, *walker;
1578
if (dwarf_child(die, &child) != 0)
1583
tag = dwarf_tag(walker);
1584
name = dwarf_diename(walker);
1586
if (tag != DW_TAG_member)
1588
if ((!name) || strcmp(name, dwarf_info.member_name))
1590
if (!get_data_member_location(walker, offset))
1594
} while (!dwarf_siblingof(walker, walker));
1600
* Function for searching struct page.union.struct.
1603
search_mapping(Dwarf *dwarfd, Dwarf_Die *die, long *offset)
1605
Dwarf_Die child, *walker;
1606
Dwarf_Die die_struct;
1608
if (dwarf_child(die, &child) != 0)
1614
if (dwarf_tag(walker) != DW_TAG_member)
1616
if (!get_die_type(dwarfd, walker, &die_struct))
1618
if (dwarf_tag(&die_struct) != DW_TAG_structure_type)
1620
if (__search_mapping(dwarfd, &die_struct, offset))
1622
} while (!dwarf_siblingof(walker, walker));
1628
search_member(Dwarf *dwarfd, Dwarf_Die *die)
1631
long offset, offset_union;
1633
Dwarf_Die child, *walker, die_union;
1635
if (dwarf_child(die, &child) != 0)
1641
tag = dwarf_tag(walker);
1642
name = dwarf_diename(walker);
1644
if (tag != DW_TAG_member)
1647
switch (dwarf_info.cmd) {
1648
case DWARF_INFO_GET_MEMBER_OFFSET:
1649
if ((!name) || strcmp(name, dwarf_info.member_name))
1652
* Get the member offset.
1654
if (!get_data_member_location(walker, &offset))
1656
dwarf_info.member_offset = offset;
1658
case DWARF_INFO_GET_MEMBER_OFFSET_IN_UNION:
1659
if (!get_die_type(dwarfd, walker, &die_union))
1661
if (dwarf_tag(&die_union) != DW_TAG_union_type)
1664
* Search page.mapping in union.
1666
if (!search_mapping(dwarfd, &die_union, &offset_union))
1669
* Get the member offset.
1671
if (!get_data_member_location(walker, &offset))
1673
dwarf_info.member_offset = offset + offset_union;
1675
case DWARF_INFO_GET_MEMBER_OFFSET_1ST_UNION:
1676
if (!get_die_type(dwarfd, walker, &die_union))
1678
if (dwarf_tag(&die_union) != DW_TAG_union_type)
1681
* Get the member offset.
1683
if (!get_data_member_location(walker, &offset))
1685
dwarf_info.member_offset = offset;
1687
case DWARF_INFO_GET_MEMBER_ARRAY_LENGTH:
1688
if ((!name) || strcmp(name, dwarf_info.member_name))
1691
* Get the member length.
1693
if (!get_data_array_length(dwarfd, walker))
1697
} while (!dwarf_siblingof(walker, walker));
1700
* Return even if not found.
1706
is_search_structure(int cmd)
1708
if ((cmd == DWARF_INFO_GET_STRUCT_SIZE)
1709
|| (cmd == DWARF_INFO_GET_MEMBER_OFFSET)
1710
|| (cmd == DWARF_INFO_GET_MEMBER_OFFSET_IN_UNION)
1711
|| (cmd == DWARF_INFO_GET_MEMBER_OFFSET_1ST_UNION)
1712
|| (cmd == DWARF_INFO_GET_MEMBER_ARRAY_LENGTH))
1719
is_search_number(int cmd)
1721
if (cmd == DWARF_INFO_GET_ENUM_NUMBER)
1728
is_search_symbol(int cmd)
1730
if ((cmd == DWARF_INFO_GET_SYMBOL_ARRAY_LENGTH)
1731
|| (cmd == DWARF_INFO_CHECK_SYMBOL_ARRAY_TYPE))
1738
is_search_typedef(int cmd)
1740
if ((cmd == DWARF_INFO_GET_TYPEDEF_SIZE)
1741
|| (cmd == DWARF_INFO_GET_TYPEDEF_SRCNAME))
1748
search_structure(Dwarf *dwarfd, Dwarf_Die *die, int *found)
1754
* If we get to here then we don't have any more
1755
* children, check to see if this is a relevant tag
1758
tag = dwarf_tag(die);
1759
name = dwarf_diename(die);
1760
if ((tag != DW_TAG_structure_type) || (!name)
1761
|| strcmp(name, dwarf_info.struct_name))
1764
* Skip if DW_AT_byte_size is not included.
1766
dwarf_info.struct_size = dwarf_bytesize(die);
1768
if (dwarf_info.struct_size > 0)
1771
} while (!dwarf_siblingof(die, die));
1773
if (dwarf_info.struct_size <= 0) {
1775
* Not found the demanded structure.
1781
* Found the demanded structure.
1784
switch (dwarf_info.cmd) {
1785
case DWARF_INFO_GET_STRUCT_SIZE:
1787
case DWARF_INFO_GET_MEMBER_OFFSET:
1788
case DWARF_INFO_GET_MEMBER_OFFSET_IN_UNION:
1789
case DWARF_INFO_GET_MEMBER_OFFSET_1ST_UNION:
1790
case DWARF_INFO_GET_MEMBER_ARRAY_LENGTH:
1791
search_member(dwarfd, die);
1797
search_number(Dwarf *dwarfd, Dwarf_Die *die, int *found)
1800
Dwarf_Word const_value;
1801
Dwarf_Attribute attr;
1802
Dwarf_Die child, *walker;
1806
tag = dwarf_tag(die);
1807
if (tag != DW_TAG_enumeration_type)
1810
if (dwarf_child(die, &child) != 0)
1816
tag = dwarf_tag(walker);
1817
name = dwarf_diename(walker);
1819
if ((tag != DW_TAG_enumerator) || (!name)
1820
|| strcmp(name, dwarf_info.enum_name))
1823
if (!dwarf_attr(walker, DW_AT_const_value, &attr))
1826
if (dwarf_formudata(&attr, &const_value) < 0)
1830
dwarf_info.enum_number = (long)const_value;
1832
} while (!dwarf_siblingof(walker, walker));
1834
} while (!dwarf_siblingof(die, die));
1838
search_typedef(Dwarf *dwarfd, Dwarf_Die *die, int *found)
1841
char *src_name = NULL;
1846
* If we get to here then we don't have any more
1847
* children, check to see if this is a relevant tag
1850
tag = dwarf_tag(die);
1851
name = dwarf_diename(die);
1853
if ((tag != DW_TAG_typedef) || (!name)
1854
|| strcmp(name, dwarf_info.struct_name))
1857
if (dwarf_info.cmd == DWARF_INFO_GET_TYPEDEF_SIZE) {
1858
if (!get_die_type(dwarfd, die, &die_type)) {
1859
ERRMSG("Can't get CU die of DW_AT_type.\n");
1862
dwarf_info.struct_size = dwarf_bytesize(&die_type);
1863
if (dwarf_info.struct_size <= 0)
1868
} else if (dwarf_info.cmd == DWARF_INFO_GET_TYPEDEF_SRCNAME) {
1869
src_name = (char *)dwarf_decl_file(die);
1874
strncpy(dwarf_info.src_name, src_name, LEN_SRCFILE);
1877
} while (!dwarf_siblingof(die, die));
1881
search_symbol(Dwarf *dwarfd, Dwarf_Die *die, int *found)
1887
* If we get to here then we don't have any more
1888
* children, check to see if this is a relevant tag
1891
tag = dwarf_tag(die);
1892
name = dwarf_diename(die);
1894
if ((tag == DW_TAG_variable) && (name)
1895
&& !strcmp(name, dwarf_info.symbol_name))
1898
} while (!dwarf_siblingof(die, die));
1900
if ((tag != DW_TAG_variable) || (!name)
1901
|| strcmp(name, dwarf_info.symbol_name)) {
1903
* Not found the demanded symbol.
1909
* Found the demanded symbol.
1912
switch (dwarf_info.cmd) {
1913
case DWARF_INFO_GET_SYMBOL_ARRAY_LENGTH:
1914
get_data_array_length(dwarfd, die);
1916
case DWARF_INFO_CHECK_SYMBOL_ARRAY_TYPE:
1917
check_array_type(dwarfd, die);
1923
search_die_tree(Dwarf *dwarfd, Dwarf_Die *die, int *found)
1928
* start by looking at the children
1930
if (dwarf_child(die, &child) == 0)
1931
search_die_tree(dwarfd, &child, found);
1936
if (is_search_structure(dwarf_info.cmd))
1937
search_structure(dwarfd, die, found);
1939
else if (is_search_number(dwarf_info.cmd))
1940
search_number(dwarfd, die, found);
1942
else if (is_search_symbol(dwarf_info.cmd))
1943
search_symbol(dwarfd, die, found);
1945
else if (is_search_typedef(dwarf_info.cmd))
1946
search_typedef(dwarfd, die, found);
1950
get_debug_info(void)
1954
size_t shstrndx, header_size;
1955
uint8_t address_size, offset_size;
1956
Dwarf *dwarfd = NULL;
1958
Dwarf_Off off = 0, next_off = 0, abbrev_offset = 0;
1959
Elf_Scn *scn = NULL;
1960
GElf_Shdr scnhdr_mem, *scnhdr = NULL;
1962
const off_t failed = (off_t)-1;
1966
if (lseek(dwarf_info.fd_debuginfo, 0, SEEK_SET) == failed) {
1967
ERRMSG("Can't seek the kernel file(%s). %s\n",
1968
dwarf_info.name_debuginfo, strerror(errno));
1971
if (!(elfd = elf_begin(dwarf_info.fd_debuginfo, ELF_C_READ_MMAP, NULL))) {
1972
ERRMSG("Can't get first elf header of %s.\n",
1973
dwarf_info.name_debuginfo);
1976
if (!(dwarfd = dwarf_begin_elf(elfd, DWARF_C_READ, NULL))) {
1977
ERRMSG("Can't create a handle for a new debug session.\n");
1980
if (elf_getshstrndx(elfd, &shstrndx) < 0) {
1981
ERRMSG("Can't get the section index of the string table.\n");
1986
* Search for ".debug_info" section.
1988
while ((scn = elf_nextscn(elfd, scn)) != NULL) {
1989
scnhdr = gelf_getshdr(scn, &scnhdr_mem);
1990
name = elf_strptr(elfd, shstrndx, scnhdr->sh_name);
1991
if (!strcmp(name, ".debug_info"))
1994
if (strcmp(name, ".debug_info")) {
1995
ERRMSG("Can't get .debug_info section.\n");
2000
* Search by each CompileUnit.
2002
while (dwarf_nextcu(dwarfd, off, &next_off, &header_size,
2003
&abbrev_offset, &address_size, &offset_size) == 0) {
2005
if (dwarf_offdie(dwarfd, off, &cu_die) == NULL) {
2006
ERRMSG("Can't get CU die.\n");
2009
search_die_tree(dwarfd, &cu_die, &found);
2025
* Get the size of structure.
2028
get_structure_size(char *structname, int flag_typedef)
2031
dwarf_info.cmd = DWARF_INFO_GET_TYPEDEF_SIZE;
2033
dwarf_info.cmd = DWARF_INFO_GET_STRUCT_SIZE;
2035
dwarf_info.struct_name = structname;
2036
dwarf_info.struct_size = NOT_FOUND_STRUCTURE;
2038
if (!get_debug_info())
2039
return FAILED_DWARFINFO;
2041
return dwarf_info.struct_size;
2045
* Get the offset of member.
2048
get_member_offset(char *structname, char *membername, int cmd)
2050
dwarf_info.cmd = cmd;
2051
dwarf_info.struct_name = structname;
2052
dwarf_info.struct_size = NOT_FOUND_STRUCTURE;
2053
dwarf_info.member_name = membername;
2054
dwarf_info.member_offset = NOT_FOUND_STRUCTURE;
2056
if (!get_debug_info())
2057
return FAILED_DWARFINFO;
2059
return dwarf_info.member_offset;
2063
* Get the length of array.
2066
get_array_length(char *name01, char *name02, unsigned int cmd)
2069
case DWARF_INFO_GET_SYMBOL_ARRAY_LENGTH:
2070
dwarf_info.symbol_name = name01;
2072
case DWARF_INFO_CHECK_SYMBOL_ARRAY_TYPE:
2073
dwarf_info.symbol_name = name01;
2075
case DWARF_INFO_GET_MEMBER_ARRAY_LENGTH:
2076
dwarf_info.struct_name = name01;
2077
dwarf_info.member_name = name02;
2080
dwarf_info.cmd = cmd;
2081
dwarf_info.struct_size = NOT_FOUND_STRUCTURE;
2082
dwarf_info.member_offset = NOT_FOUND_STRUCTURE;
2083
dwarf_info.array_length = NOT_FOUND_STRUCTURE;
2085
if (!get_debug_info())
2086
return FAILED_DWARFINFO;
2088
return dwarf_info.array_length;
2092
get_enum_number(char *enum_name) {
2094
dwarf_info.cmd = DWARF_INFO_GET_ENUM_NUMBER;
2095
dwarf_info.enum_name = enum_name;
2096
dwarf_info.enum_number = NOT_FOUND_NUMBER;
2098
if (!get_debug_info())
2099
return FAILED_DWARFINFO;
2101
return dwarf_info.enum_number;
2105
* Get the source filename.
2108
get_source_filename(char *structname, char *src_name, int cmd)
2110
dwarf_info.cmd = cmd;
2111
dwarf_info.struct_name = structname;
2113
if (!get_debug_info())
2116
strncpy(src_name, dwarf_info.src_name, LEN_SRCFILE);
2122
get_symbol_info(void)
2127
SYMBOL_INIT(mem_map, "mem_map");
2128
SYMBOL_INIT(vmem_map, "vmem_map");
2129
SYMBOL_INIT(mem_section, "mem_section");
2130
SYMBOL_INIT(pkmap_count, "pkmap_count");
2131
SYMBOL_INIT_NEXT(pkmap_count_next, "pkmap_count");
2132
SYMBOL_INIT(system_utsname, "system_utsname");
2133
SYMBOL_INIT(init_uts_ns, "init_uts_ns");
2134
SYMBOL_INIT(_stext, "_stext");
2135
SYMBOL_INIT(swapper_pg_dir, "swapper_pg_dir");
2136
SYMBOL_INIT(init_level4_pgt, "init_level4_pgt");
2137
SYMBOL_INIT(vmlist, "vmlist");
2138
SYMBOL_INIT(phys_base, "phys_base");
2139
SYMBOL_INIT(node_online_map, "node_online_map");
2140
SYMBOL_INIT(node_states, "node_states");
2141
SYMBOL_INIT(node_memblk, "node_memblk");
2142
SYMBOL_INIT(node_data, "node_data");
2143
SYMBOL_INIT(pgdat_list, "pgdat_list");
2144
SYMBOL_INIT(contig_page_data, "contig_page_data");
2145
SYMBOL_INIT(log_buf, "log_buf");
2146
SYMBOL_INIT(log_buf_len, "log_buf_len");
2147
SYMBOL_INIT(log_end, "log_end");
2148
SYMBOL_INIT(max_pfn, "max_pfn");
2150
if (SYMBOL(node_data) != NOT_FOUND_SYMBOL)
2151
SYMBOL_ARRAY_TYPE_INIT(node_data, "node_data");
2152
if (SYMBOL(pgdat_list) != NOT_FOUND_SYMBOL)
2153
SYMBOL_ARRAY_LENGTH_INIT(pgdat_list, "pgdat_list");
2154
if (SYMBOL(mem_section) != NOT_FOUND_SYMBOL)
2155
SYMBOL_ARRAY_LENGTH_INIT(mem_section, "mem_section");
2156
if (SYMBOL(node_memblk) != NOT_FOUND_SYMBOL)
2157
SYMBOL_ARRAY_LENGTH_INIT(node_memblk, "node_memblk");
2163
get_structure_info(void)
2166
* Get offsets of the page_discriptor's members.
2168
SIZE_INIT(page, "page");
2169
OFFSET_INIT(page.flags, "page", "flags");
2170
OFFSET_INIT(page._count, "page", "_count");
2172
OFFSET_INIT(page.mapping, "page", "mapping");
2175
* On linux-2.6.16 or later, page.mapping is defined
2176
* in anonymous union.
2178
if (OFFSET(page.mapping) == NOT_FOUND_STRUCTURE)
2179
OFFSET_IN_UNION_INIT(page.mapping, "page", "mapping");
2182
* Some vmlinux(s) don't have debugging information about
2183
* page.mapping. Then, makedumpfile assumes that there is
2184
* "mapping" next to "private(unsigned long)" in the first
2187
if (OFFSET(page.mapping) == NOT_FOUND_STRUCTURE) {
2188
OFFSET(page.mapping) = get_member_offset("page", NULL,
2189
DWARF_INFO_GET_MEMBER_OFFSET_1ST_UNION);
2190
if (OFFSET(page.mapping) == FAILED_DWARFINFO)
2192
if (OFFSET(page.mapping) != NOT_FOUND_STRUCTURE)
2193
OFFSET(page.mapping) += sizeof(unsigned long);
2196
OFFSET_INIT(page.lru, "page", "lru");
2199
* Get offsets of the mem_section's members.
2201
SIZE_INIT(mem_section, "mem_section");
2202
OFFSET_INIT(mem_section.section_mem_map, "mem_section",
2206
* Get offsets of the pglist_data's members.
2208
SIZE_INIT(pglist_data, "pglist_data");
2209
OFFSET_INIT(pglist_data.node_zones, "pglist_data", "node_zones");
2210
OFFSET_INIT(pglist_data.nr_zones, "pglist_data", "nr_zones");
2211
OFFSET_INIT(pglist_data.node_mem_map, "pglist_data", "node_mem_map");
2212
OFFSET_INIT(pglist_data.node_start_pfn, "pglist_data","node_start_pfn");
2213
OFFSET_INIT(pglist_data.node_spanned_pages, "pglist_data",
2214
"node_spanned_pages");
2215
OFFSET_INIT(pglist_data.pgdat_next, "pglist_data", "pgdat_next");
2218
* Get offsets of the zone's members.
2220
SIZE_INIT(zone, "zone");
2221
OFFSET_INIT(zone.free_pages, "zone", "free_pages");
2222
OFFSET_INIT(zone.free_area, "zone", "free_area");
2223
OFFSET_INIT(zone.vm_stat, "zone", "vm_stat");
2224
OFFSET_INIT(zone.spanned_pages, "zone", "spanned_pages");
2225
MEMBER_ARRAY_LENGTH_INIT(zone.free_area, "zone", "free_area");
2228
* Get offsets of the free_area's members.
2230
SIZE_INIT(free_area, "free_area");
2231
OFFSET_INIT(free_area.free_list, "free_area", "free_list");
2232
MEMBER_ARRAY_LENGTH_INIT(free_area.free_list, "free_area", "free_list");
2235
* Get offsets of the list_head's members.
2237
SIZE_INIT(list_head, "list_head");
2238
OFFSET_INIT(list_head.next, "list_head", "next");
2239
OFFSET_INIT(list_head.prev, "list_head", "prev");
2242
* Get offsets of the node_memblk_s's members.
2244
SIZE_INIT(node_memblk_s, "node_memblk_s");
2245
OFFSET_INIT(node_memblk_s.start_paddr, "node_memblk_s", "start_paddr");
2246
OFFSET_INIT(node_memblk_s.size, "node_memblk_s", "size");
2247
OFFSET_INIT(node_memblk_s.nid, "node_memblk_s", "nid");
2249
OFFSET_INIT(vm_struct.addr, "vm_struct", "addr");
2251
ENUM_NUMBER_INIT(NR_FREE_PAGES, "NR_FREE_PAGES");
2252
ENUM_NUMBER_INIT(N_ONLINE, "N_ONLINE");
2254
ENUM_NUMBER_INIT(PG_lru, "PG_lru");
2255
ENUM_NUMBER_INIT(PG_private, "PG_private");
2256
ENUM_NUMBER_INIT(PG_swapcache, "PG_swapcache");
2258
TYPEDEF_SIZE_INIT(nodemask_t, "nodemask_t");
2264
get_srcfile_info(void)
2266
TYPEDEF_SRCFILE_INIT(pud_t, "pud_t");
2272
get_value_for_old_linux(void)
2274
if (NUMBER(PG_lru) == NOT_FOUND_NUMBER)
2275
NUMBER(PG_lru) = PG_lru_ORIGINAL;
2276
if (NUMBER(PG_private) == NOT_FOUND_NUMBER)
2277
NUMBER(PG_private) = PG_private_ORIGINAL;
2278
if (NUMBER(PG_swapcache) == NOT_FOUND_NUMBER)
2279
NUMBER(PG_swapcache) = PG_swapcache_ORIGINAL;
2284
get_str_osrelease_from_vmlinux(void)
2286
struct utsname system_utsname;
2287
unsigned long long utsname;
2289
const off_t failed = (off_t)-1;
2292
* Get the kernel version.
2294
if (SYMBOL(system_utsname) != NOT_FOUND_SYMBOL) {
2295
utsname = SYMBOL(system_utsname);
2296
} else if (SYMBOL(init_uts_ns) != NOT_FOUND_SYMBOL) {
2297
utsname = SYMBOL(init_uts_ns) + sizeof(int);
2299
ERRMSG("Can't get the symbol of system_utsname.\n");
2302
offset = vaddr_to_offset_slow(dwarf_info.fd_debuginfo,
2303
dwarf_info.name_debuginfo, utsname);
2306
ERRMSG("Can't convert vaddr (%llx) of utsname to an offset.\n",
2310
if (lseek(dwarf_info.fd_debuginfo, offset, SEEK_SET) == failed) {
2311
ERRMSG("Can't seek %s. %s\n", dwarf_info.name_debuginfo,
2315
if (read(dwarf_info.fd_debuginfo, &system_utsname, sizeof system_utsname)
2316
!= sizeof system_utsname) {
2317
ERRMSG("Can't read %s. %s\n", dwarf_info.name_debuginfo,
2321
if (!strncpy(info->release, system_utsname.release, STRLEN_OSRELEASE)){
2322
ERRMSG("Can't do strncpy for osrelease.");
2329
is_sparsemem_extreme(void)
2331
if (ARRAY_LENGTH(mem_section)
2332
== (NR_MEM_SECTIONS() / _SECTIONS_PER_ROOT_EXTREME()))
2343
if ((SIZE(page) == NOT_FOUND_STRUCTURE)
2344
|| (OFFSET(page.flags) == NOT_FOUND_STRUCTURE)
2345
|| (OFFSET(page._count) == NOT_FOUND_STRUCTURE)
2346
|| (OFFSET(page.mapping) == NOT_FOUND_STRUCTURE)) {
2347
ret = NOT_FOUND_MEMTYPE;
2348
} else if ((((SYMBOL(node_data) != NOT_FOUND_SYMBOL)
2349
&& (ARRAY_LENGTH(node_data) != NOT_FOUND_STRUCTURE))
2350
|| ((SYMBOL(pgdat_list) != NOT_FOUND_SYMBOL)
2351
&& (OFFSET(pglist_data.pgdat_next) != NOT_FOUND_STRUCTURE))
2352
|| ((SYMBOL(pgdat_list) != NOT_FOUND_SYMBOL)
2353
&& (ARRAY_LENGTH(pgdat_list) != NOT_FOUND_STRUCTURE)))
2354
&& (SIZE(pglist_data) != NOT_FOUND_STRUCTURE)
2355
&& (OFFSET(pglist_data.node_mem_map) != NOT_FOUND_STRUCTURE)
2356
&& (OFFSET(pglist_data.node_start_pfn) != NOT_FOUND_STRUCTURE)
2357
&& (OFFSET(pglist_data.node_spanned_pages) !=NOT_FOUND_STRUCTURE)){
2359
} else if ((SYMBOL(mem_section) != NOT_FOUND_SYMBOL)
2360
&& (SIZE(mem_section) != NOT_FOUND_STRUCTURE)
2361
&& (OFFSET(mem_section.section_mem_map) != NOT_FOUND_STRUCTURE)
2362
&& (ARRAY_LENGTH(mem_section) != NOT_FOUND_STRUCTURE)) {
2363
if (is_sparsemem_extreme())
2367
} else if (SYMBOL(mem_map) != NOT_FOUND_SYMBOL) {
2370
ret = NOT_FOUND_MEMTYPE;
2377
generate_vmcoreinfo(void)
2379
if (!set_page_size(sysconf(_SC_PAGE_SIZE)))
2382
dwarf_info.fd_debuginfo = info->fd_vmlinux;
2383
dwarf_info.name_debuginfo = info->name_vmlinux;
2385
if (!get_symbol_info())
2388
if (!get_structure_info())
2391
if (!get_srcfile_info())
2394
if ((SYMBOL(system_utsname) == NOT_FOUND_SYMBOL)
2395
&& (SYMBOL(init_uts_ns) == NOT_FOUND_SYMBOL)) {
2396
ERRMSG("Can't get the symbol of system_utsname.\n");
2399
if (!get_str_osrelease_from_vmlinux())
2402
if (!(info->kernel_version = get_kernel_version(info->release)))
2405
if (get_mem_type() == NOT_FOUND_MEMTYPE) {
2406
ERRMSG("Can't find the memory type.\n");
2411
* write 1st kernel's OSRELEASE
2413
fprintf(info->file_vmcoreinfo, "%s%s\n", STR_OSRELEASE,
2417
* write 1st kernel's PAGESIZE
2419
fprintf(info->file_vmcoreinfo, "%s%ld\n", STR_PAGESIZE,
2423
* write the symbol of 1st kernel
2425
WRITE_SYMBOL("mem_map", mem_map);
2426
WRITE_SYMBOL("vmem_map", vmem_map);
2427
WRITE_SYMBOL("mem_section", mem_section);
2428
WRITE_SYMBOL("pkmap_count", pkmap_count);
2429
WRITE_SYMBOL("pkmap_count_next", pkmap_count_next);
2430
WRITE_SYMBOL("system_utsname", system_utsname);
2431
WRITE_SYMBOL("init_uts_ns", init_uts_ns);
2432
WRITE_SYMBOL("_stext", _stext);
2433
WRITE_SYMBOL("swapper_pg_dir", swapper_pg_dir);
2434
WRITE_SYMBOL("init_level4_pgt", init_level4_pgt);
2435
WRITE_SYMBOL("vmlist", vmlist);
2436
WRITE_SYMBOL("phys_base", phys_base);
2437
WRITE_SYMBOL("node_online_map", node_online_map);
2438
WRITE_SYMBOL("node_states", node_states);
2439
WRITE_SYMBOL("node_data", node_data);
2440
WRITE_SYMBOL("pgdat_list", pgdat_list);
2441
WRITE_SYMBOL("contig_page_data", contig_page_data);
2442
WRITE_SYMBOL("log_buf", log_buf);
2443
WRITE_SYMBOL("log_buf_len", log_buf_len);
2444
WRITE_SYMBOL("log_end", log_end);
2445
WRITE_SYMBOL("max_pfn", max_pfn);
2448
* write the structure size of 1st kernel
2450
WRITE_STRUCTURE_SIZE("page", page);
2451
WRITE_STRUCTURE_SIZE("mem_section", mem_section);
2452
WRITE_STRUCTURE_SIZE("pglist_data", pglist_data);
2453
WRITE_STRUCTURE_SIZE("zone", zone);
2454
WRITE_STRUCTURE_SIZE("free_area", free_area);
2455
WRITE_STRUCTURE_SIZE("list_head", list_head);
2456
WRITE_STRUCTURE_SIZE("node_memblk_s", node_memblk_s);
2457
WRITE_STRUCTURE_SIZE("nodemask_t", nodemask_t);
2460
* write the member offset of 1st kernel
2462
WRITE_MEMBER_OFFSET("page.flags", page.flags);
2463
WRITE_MEMBER_OFFSET("page._count", page._count);
2464
WRITE_MEMBER_OFFSET("page.mapping", page.mapping);
2465
WRITE_MEMBER_OFFSET("page.lru", page.lru);
2466
WRITE_MEMBER_OFFSET("mem_section.section_mem_map",
2467
mem_section.section_mem_map);
2468
WRITE_MEMBER_OFFSET("pglist_data.node_zones", pglist_data.node_zones);
2469
WRITE_MEMBER_OFFSET("pglist_data.nr_zones", pglist_data.nr_zones);
2470
WRITE_MEMBER_OFFSET("pglist_data.node_mem_map",
2471
pglist_data.node_mem_map);
2472
WRITE_MEMBER_OFFSET("pglist_data.node_start_pfn",
2473
pglist_data.node_start_pfn);
2474
WRITE_MEMBER_OFFSET("pglist_data.node_spanned_pages",
2475
pglist_data.node_spanned_pages);
2476
WRITE_MEMBER_OFFSET("pglist_data.pgdat_next", pglist_data.pgdat_next);
2477
WRITE_MEMBER_OFFSET("zone.free_pages", zone.free_pages);
2478
WRITE_MEMBER_OFFSET("zone.free_area", zone.free_area);
2479
WRITE_MEMBER_OFFSET("zone.vm_stat", zone.vm_stat);
2480
WRITE_MEMBER_OFFSET("zone.spanned_pages", zone.spanned_pages);
2481
WRITE_MEMBER_OFFSET("free_area.free_list", free_area.free_list);
2482
WRITE_MEMBER_OFFSET("list_head.next", list_head.next);
2483
WRITE_MEMBER_OFFSET("list_head.prev", list_head.prev);
2484
WRITE_MEMBER_OFFSET("node_memblk_s.start_paddr", node_memblk_s.start_paddr);
2485
WRITE_MEMBER_OFFSET("node_memblk_s.size", node_memblk_s.size);
2486
WRITE_MEMBER_OFFSET("node_memblk_s.nid", node_memblk_s.nid);
2487
WRITE_MEMBER_OFFSET("vm_struct.addr", vm_struct.addr);
2489
if (SYMBOL(node_data) != NOT_FOUND_SYMBOL)
2490
WRITE_ARRAY_LENGTH("node_data", node_data);
2491
if (SYMBOL(pgdat_list) != NOT_FOUND_SYMBOL)
2492
WRITE_ARRAY_LENGTH("pgdat_list", pgdat_list);
2493
if (SYMBOL(mem_section) != NOT_FOUND_SYMBOL)
2494
WRITE_ARRAY_LENGTH("mem_section", mem_section);
2495
if (SYMBOL(node_memblk) != NOT_FOUND_SYMBOL)
2496
WRITE_ARRAY_LENGTH("node_memblk", node_memblk);
2498
WRITE_ARRAY_LENGTH("zone.free_area", zone.free_area);
2499
WRITE_ARRAY_LENGTH("free_area.free_list", free_area.free_list);
2501
WRITE_NUMBER("NR_FREE_PAGES", NR_FREE_PAGES);
2502
WRITE_NUMBER("N_ONLINE", N_ONLINE);
2504
WRITE_NUMBER("PG_lru", PG_lru);
2505
WRITE_NUMBER("PG_private", PG_private);
2506
WRITE_NUMBER("PG_swapcache", PG_swapcache);
2509
* write the source file of 1st kernel
2511
WRITE_SRCFILE("pud_t", pud_t);
2517
read_vmcoreinfo_basic_info(void)
2520
long page_size = FALSE;
2521
char buf[BUFSIZE_FGETS], *endp;
2522
unsigned int get_release = FALSE, i;
2524
if (fseek(info->file_vmcoreinfo, 0, SEEK_SET) < 0) {
2525
ERRMSG("Can't seek the vmcoreinfo file(%s). %s\n",
2526
info->name_vmcoreinfo, strerror(errno));
2530
while (fgets(buf, BUFSIZE_FGETS, info->file_vmcoreinfo)) {
2534
if (buf[i - 1] == '\n')
2536
if (strncmp(buf, STR_OSRELEASE, strlen(STR_OSRELEASE)) == 0) {
2538
/* if the release have been stored, skip this time. */
2539
if (strlen(info->release))
2541
strcpy(info->release, buf + strlen(STR_OSRELEASE));
2543
if (strncmp(buf, STR_PAGESIZE, strlen(STR_PAGESIZE)) == 0) {
2544
page_size = strtol(buf+strlen(STR_PAGESIZE),&endp,10);
2545
if ((!page_size || page_size == LONG_MAX)
2546
|| strlen(endp) != 0) {
2547
ERRMSG("Invalid data in %s: %s",
2548
info->name_vmcoreinfo, buf);
2551
if (!set_page_size(page_size)) {
2552
ERRMSG("Invalid data in %s: %s",
2553
info->name_vmcoreinfo, buf);
2557
if (strncmp(buf, STR_CRASHTIME, strlen(STR_CRASHTIME)) == 0) {
2558
tv_sec = strtol(buf+strlen(STR_CRASHTIME),&endp,10);
2559
if ((!tv_sec || tv_sec == LONG_MAX)
2560
|| strlen(endp) != 0) {
2561
ERRMSG("Invalid data in %s: %s",
2562
info->name_vmcoreinfo, buf);
2565
info->timestamp.tv_sec = tv_sec;
2567
if (strncmp(buf, STR_CONFIG_X86_PAE,
2568
strlen(STR_CONFIG_X86_PAE)) == 0)
2569
vt.mem_flags |= MEMORY_X86_PAE;
2571
if (strncmp(buf, STR_CONFIG_PGTABLE_3,
2572
strlen(STR_CONFIG_PGTABLE_3)) == 0)
2573
vt.mem_flags |= MEMORY_PAGETABLE_3L;
2575
if (strncmp(buf, STR_CONFIG_PGTABLE_4,
2576
strlen(STR_CONFIG_PGTABLE_4)) == 0)
2577
vt.mem_flags |= MEMORY_PAGETABLE_4L;
2579
if (!get_release || !info->page_size) {
2580
ERRMSG("Invalid format in %s", info->name_vmcoreinfo);
2587
read_vmcoreinfo_symbol(char *str_symbol)
2589
unsigned long symbol = NOT_FOUND_SYMBOL;
2590
char buf[BUFSIZE_FGETS], *endp;
2593
if (fseek(info->file_vmcoreinfo, 0, SEEK_SET) < 0) {
2594
ERRMSG("Can't seek the vmcoreinfo file(%s). %s\n",
2595
info->name_vmcoreinfo, strerror(errno));
2596
return INVALID_SYMBOL_DATA;
2599
while (fgets(buf, BUFSIZE_FGETS, info->file_vmcoreinfo)) {
2603
if (buf[i - 1] == '\n')
2605
if (strncmp(buf, str_symbol, strlen(str_symbol)) == 0) {
2606
symbol = strtoul(buf + strlen(str_symbol), &endp, 16);
2607
if ((!symbol || symbol == ULONG_MAX)
2608
|| strlen(endp) != 0) {
2609
ERRMSG("Invalid data in %s: %s",
2610
info->name_vmcoreinfo, buf);
2611
return INVALID_SYMBOL_DATA;
2620
read_vmcoreinfo_long(char *str_structure)
2622
long data = NOT_FOUND_LONG_VALUE;
2623
char buf[BUFSIZE_FGETS], *endp;
2626
if (fseek(info->file_vmcoreinfo, 0, SEEK_SET) < 0) {
2627
ERRMSG("Can't seek the vmcoreinfo file(%s). %s\n",
2628
info->name_vmcoreinfo, strerror(errno));
2629
return INVALID_STRUCTURE_DATA;
2632
while (fgets(buf, BUFSIZE_FGETS, info->file_vmcoreinfo)) {
2636
if (buf[i - 1] == '\n')
2638
if (strncmp(buf, str_structure, strlen(str_structure)) == 0) {
2639
data = strtol(buf + strlen(str_structure), &endp, 10);
2640
if ((data == LONG_MAX) || strlen(endp) != 0) {
2641
ERRMSG("Invalid data in %s: %s",
2642
info->name_vmcoreinfo, buf);
2643
return INVALID_STRUCTURE_DATA;
2652
read_vmcoreinfo_string(char *str_in, char *str_out)
2654
char buf[BUFSIZE_FGETS];
2657
if (fseek(info->file_vmcoreinfo, 0, SEEK_SET) < 0) {
2658
ERRMSG("Can't seek the vmcoreinfo file(%s). %s\n",
2659
info->name_vmcoreinfo, strerror(errno));
2663
while (fgets(buf, BUFSIZE_FGETS, info->file_vmcoreinfo)) {
2667
if (buf[i - 1] == '\n')
2669
if (strncmp(buf, str_in, strlen(str_in)) == 0) {
2670
strncpy(str_out, buf + strlen(str_in), LEN_SRCFILE - strlen(str_in));
2678
read_vmcoreinfo(void)
2680
if (!read_vmcoreinfo_basic_info())
2683
READ_SYMBOL("mem_map", mem_map);
2684
READ_SYMBOL("vmem_map", vmem_map);
2685
READ_SYMBOL("mem_section", mem_section);
2686
READ_SYMBOL("pkmap_count", pkmap_count);
2687
READ_SYMBOL("pkmap_count_next", pkmap_count_next);
2688
READ_SYMBOL("system_utsname", system_utsname);
2689
READ_SYMBOL("init_uts_ns", init_uts_ns);
2690
READ_SYMBOL("_stext", _stext);
2691
READ_SYMBOL("swapper_pg_dir", swapper_pg_dir);
2692
READ_SYMBOL("init_level4_pgt", init_level4_pgt);
2693
READ_SYMBOL("vmlist", vmlist);
2694
READ_SYMBOL("phys_base", phys_base);
2695
READ_SYMBOL("node_online_map", node_online_map);
2696
READ_SYMBOL("node_states", node_states);
2697
READ_SYMBOL("node_data", node_data);
2698
READ_SYMBOL("pgdat_list", pgdat_list);
2699
READ_SYMBOL("contig_page_data", contig_page_data);
2700
READ_SYMBOL("log_buf", log_buf);
2701
READ_SYMBOL("log_buf_len", log_buf_len);
2702
READ_SYMBOL("log_end", log_end);
2703
READ_SYMBOL("max_pfn", max_pfn);
2705
READ_STRUCTURE_SIZE("page", page);
2706
READ_STRUCTURE_SIZE("mem_section", mem_section);
2707
READ_STRUCTURE_SIZE("pglist_data", pglist_data);
2708
READ_STRUCTURE_SIZE("zone", zone);
2709
READ_STRUCTURE_SIZE("free_area", free_area);
2710
READ_STRUCTURE_SIZE("list_head", list_head);
2711
READ_STRUCTURE_SIZE("node_memblk_s", node_memblk_s);
2712
READ_STRUCTURE_SIZE("nodemask_t", nodemask_t);
2714
READ_MEMBER_OFFSET("page.flags", page.flags);
2715
READ_MEMBER_OFFSET("page._count", page._count);
2716
READ_MEMBER_OFFSET("page.mapping", page.mapping);
2717
READ_MEMBER_OFFSET("page.lru", page.lru);
2718
READ_MEMBER_OFFSET("mem_section.section_mem_map",
2719
mem_section.section_mem_map);
2720
READ_MEMBER_OFFSET("pglist_data.node_zones", pglist_data.node_zones);
2721
READ_MEMBER_OFFSET("pglist_data.nr_zones", pglist_data.nr_zones);
2722
READ_MEMBER_OFFSET("pglist_data.node_mem_map",pglist_data.node_mem_map);
2723
READ_MEMBER_OFFSET("pglist_data.node_start_pfn",
2724
pglist_data.node_start_pfn);
2725
READ_MEMBER_OFFSET("pglist_data.node_spanned_pages",
2726
pglist_data.node_spanned_pages);
2727
READ_MEMBER_OFFSET("pglist_data.pgdat_next", pglist_data.pgdat_next);
2728
READ_MEMBER_OFFSET("zone.free_pages", zone.free_pages);
2729
READ_MEMBER_OFFSET("zone.free_area", zone.free_area);
2730
READ_MEMBER_OFFSET("zone.vm_stat", zone.vm_stat);
2731
READ_MEMBER_OFFSET("zone.spanned_pages", zone.spanned_pages);
2732
READ_MEMBER_OFFSET("free_area.free_list", free_area.free_list);
2733
READ_MEMBER_OFFSET("list_head.next", list_head.next);
2734
READ_MEMBER_OFFSET("list_head.prev", list_head.prev);
2735
READ_MEMBER_OFFSET("node_memblk_s.start_paddr", node_memblk_s.start_paddr);
2736
READ_MEMBER_OFFSET("node_memblk_s.size", node_memblk_s.size);
2737
READ_MEMBER_OFFSET("node_memblk_s.nid", node_memblk_s.nid);
2738
READ_MEMBER_OFFSET("vm_struct.addr", vm_struct.addr);
2740
READ_ARRAY_LENGTH("node_data", node_data);
2741
READ_ARRAY_LENGTH("pgdat_list", pgdat_list);
2742
READ_ARRAY_LENGTH("mem_section", mem_section);
2743
READ_ARRAY_LENGTH("node_memblk", node_memblk);
2744
READ_ARRAY_LENGTH("zone.free_area", zone.free_area);
2745
READ_ARRAY_LENGTH("free_area.free_list", free_area.free_list);
2747
READ_NUMBER("NR_FREE_PAGES", NR_FREE_PAGES);
2748
READ_NUMBER("N_ONLINE", N_ONLINE);
2750
READ_NUMBER("PG_lru", PG_lru);
2751
READ_NUMBER("PG_private", PG_private);
2752
READ_NUMBER("PG_swapcache", PG_swapcache);
2754
READ_SRCFILE("pud_t", pud_t);
2759
#define MAX_SIZE_NHDR MAX(sizeof(Elf64_Nhdr), sizeof(Elf32_Nhdr))
2762
offset_next_note(void *note)
2769
* Both name and desc in ELF Note elements are padded to
2772
if (info->flag_elf64_memory) {
2773
note64 = (Elf64_Nhdr *)note;
2774
offset = sizeof(Elf64_Nhdr)
2775
+ roundup(note64->n_namesz, 4)
2776
+ roundup(note64->n_descsz, 4);
2778
note32 = (Elf32_Nhdr *)note;
2779
offset = sizeof(Elf32_Nhdr)
2780
+ roundup(note32->n_namesz, 4)
2781
+ roundup(note32->n_descsz, 4);
2787
note_type(void *note)
2793
if (info->flag_elf64_memory) {
2794
note64 = (Elf64_Nhdr *)note;
2795
type = note64->n_type;
2797
note32 = (Elf32_Nhdr *)note;
2798
type = note32->n_type;
2804
note_descsz(void *note)
2810
if (info->flag_elf64_memory) {
2811
note64 = (Elf64_Nhdr *)note;
2812
size = note64->n_descsz;
2814
note32 = (Elf32_Nhdr *)note;
2815
size = note32->n_descsz;
2821
offset_note_desc(void *note)
2827
if (info->flag_elf64_memory) {
2828
note64 = (Elf64_Nhdr *)note;
2829
offset = sizeof(Elf64_Nhdr) + roundup(note64->n_namesz, 4);
2831
note32 = (Elf32_Nhdr *)note;
2832
offset = sizeof(Elf32_Nhdr) + roundup(note32->n_namesz, 4);
2838
get_pt_note_info(off_t off_note, unsigned long sz_note)
2840
int n_type, size_desc;
2841
unsigned long p2m_mfn;
2842
off_t offset, offset_desc, off_p2m = 0;
2843
char buf[VMCOREINFO_XEN_NOTE_NAME_BYTES];
2844
char note[MAX_SIZE_NHDR];
2845
const off_t failed = (off_t)-1;
2848
while (offset < off_note + sz_note) {
2849
if (lseek(info->fd_memory, offset, SEEK_SET) == failed) {
2850
ERRMSG("Can't seek the dump memory(%s). %s\n",
2851
info->name_memory, strerror(errno));
2854
if (read(info->fd_memory, note, sizeof(note)) != sizeof(note)) {
2855
ERRMSG("Can't read the dump memory(%s). %s\n",
2856
info->name_memory, strerror(errno));
2859
if (read(info->fd_memory, &buf, sizeof(buf)) != sizeof(buf)) {
2860
ERRMSG("Can't read the dump memory(%s). %s\n",
2861
info->name_memory, strerror(errno));
2864
n_type = note_type(note);
2865
offset_desc = offset + offset_note_desc(note);
2866
size_desc = note_descsz(note);
2869
* Check whether /proc/vmcore contains vmcoreinfo,
2870
* and get both the offset and the size.
2872
* NOTE: The owner name of xen should be checked at first,
2873
* because its name is "VMCOREINFO_XEN" and the one
2874
* of linux is "VMCOREINFO".
2876
if (!strncmp(VMCOREINFO_XEN_NOTE_NAME, buf,
2877
VMCOREINFO_XEN_NOTE_NAME_BYTES)) {
2878
info->offset_vmcoreinfo_xen = offset_desc;
2879
info->size_vmcoreinfo_xen = size_desc;
2880
} else if (!strncmp(VMCOREINFO_NOTE_NAME, buf,
2881
VMCOREINFO_NOTE_NAME_BYTES)) {
2882
info->offset_vmcoreinfo = offset_desc;
2883
info->size_vmcoreinfo = size_desc;
2886
* Check whether /proc/vmcore contains xen's note.
2888
} else if (n_type == XEN_ELFNOTE_CRASH_INFO) {
2889
vt.mem_flags |= MEMORY_XEN;
2890
info->offset_xen_crash_info = offset_desc;
2891
info->size_xen_crash_info = size_desc;
2893
off_p2m = offset + offset_next_note(note)
2895
if (lseek(info->fd_memory, off_p2m, SEEK_SET)
2897
ERRMSG("Can't seek the dump memory(%s). %s\n",
2898
info->name_memory, strerror(errno));
2901
if (read(info->fd_memory, &p2m_mfn, sizeof(p2m_mfn))
2902
!= sizeof(p2m_mfn)) {
2903
ERRMSG("Can't read the dump memory(%s). %s\n",
2904
info->name_memory, strerror(errno));
2907
info->p2m_mfn = p2m_mfn;
2909
offset += offset_next_note(note);
2911
if (vt.mem_flags & MEMORY_XEN)
2912
DEBUG_MSG("Xen kdump\n");
2914
DEBUG_MSG("Linux kdump\n");
2920
* Extract vmcoreinfo from /proc/vmcore and output it to /tmp/vmcoreinfo.tmp.
2923
copy_vmcoreinfo(off_t offset, unsigned long size)
2926
char buf[VMCOREINFO_BYTES];
2927
const off_t failed = (off_t)-1;
2929
if (!offset || !size)
2932
if ((fd = mkstemp(info->name_vmcoreinfo)) < 0) {
2933
ERRMSG("Can't open the vmcoreinfo file(%s). %s\n",
2934
info->name_vmcoreinfo, strerror(errno));
2937
if (lseek(info->fd_memory, offset, SEEK_SET) == failed) {
2938
ERRMSG("Can't seek the dump memory(%s). %s\n",
2939
info->name_memory, strerror(errno));
2942
if (read(info->fd_memory, &buf, size) != size) {
2943
ERRMSG("Can't read the dump memory(%s). %s\n",
2944
info->name_memory, strerror(errno));
2947
if (write(fd, &buf, size) != size) {
2948
ERRMSG("Can't write the vmcoreinfo file(%s). %s\n",
2949
info->name_vmcoreinfo, strerror(errno));
2952
if (close(fd) < 0) {
2953
ERRMSG("Can't close the vmcoreinfo file(%s). %s\n",
2954
info->name_vmcoreinfo, strerror(errno));
2961
read_vmcoreinfo_from_vmcore(off_t offset, unsigned long size, int flag_xen_hv)
2966
* Copy vmcoreinfo to /tmp/vmcoreinfoXXXXXX.
2968
if (!(info->name_vmcoreinfo = strdup(FILENAME_VMCOREINFO))) {
2969
MSG("Can't duplicate strings(%s).\n", FILENAME_VMCOREINFO);
2972
if (!copy_vmcoreinfo(offset, size))
2976
* Read vmcoreinfo from /tmp/vmcoreinfoXXXXXX.
2978
if (!open_vmcoreinfo("r"))
2981
unlink(info->name_vmcoreinfo);
2984
if (!read_vmcoreinfo_xen())
2987
if (!read_vmcoreinfo())
2994
free(info->name_vmcoreinfo);
2995
info->name_vmcoreinfo = NULL;
3001
* Get the number of online nodes.
3004
get_nodes_online(void)
3006
int len, i, j, online;
3007
unsigned long node_online_map = 0, bitbuf, *maskptr;
3009
if ((SYMBOL(node_online_map) == NOT_FOUND_SYMBOL)
3010
&& (SYMBOL(node_states) == NOT_FOUND_SYMBOL))
3013
if (SIZE(nodemask_t) == NOT_FOUND_STRUCTURE) {
3014
ERRMSG("Can't get the size of nodemask_t.\n");
3018
len = SIZE(nodemask_t);
3019
vt.node_online_map_len = len/sizeof(unsigned long);
3020
if (!(vt.node_online_map = (unsigned long *)malloc(len))) {
3021
ERRMSG("Can't allocate memory for the node online map. %s\n",
3025
if (SYMBOL(node_online_map) != NOT_FOUND_SYMBOL) {
3026
node_online_map = SYMBOL(node_online_map);
3027
} else if (SYMBOL(node_states) != NOT_FOUND_SYMBOL) {
3029
* For linux-2.6.23-rc4-mm1
3031
node_online_map = SYMBOL(node_states)
3032
+ (SIZE(nodemask_t) * NUMBER(N_ONLINE));
3034
if (!readmem(VADDR, node_online_map, vt.node_online_map, len)){
3035
ERRMSG("Can't get the node online map.\n");
3039
maskptr = (unsigned long *)vt.node_online_map;
3040
for (i = 0; i < vt.node_online_map_len; i++, maskptr++) {
3042
for (j = 0; j < sizeof(bitbuf) * 8; j++) {
3043
online += bitbuf & 1;
3044
bitbuf = bitbuf >> 1;
3053
if (!(vt.numnodes = get_nodes_online())) {
3057
DEBUG_MSG("num of NODEs : %d\n", vt.numnodes);
3064
next_online_node(int first)
3067
unsigned long mask, *maskptr;
3069
/* It cannot occur */
3070
if ((first/(sizeof(unsigned long) * 8)) >= vt.node_online_map_len) {
3071
ERRMSG("next_online_node: %d is too large!\n", first);
3075
maskptr = (unsigned long *)vt.node_online_map;
3076
for (i = node = 0; i < vt.node_online_map_len; i++, maskptr++) {
3078
for (j = 0; j < (sizeof(unsigned long) * 8); j++, node++) {
3090
next_online_pgdat(int node)
3093
unsigned long pgdat;
3096
* Get the pglist_data structure from symbol "node_data".
3097
* The array number of symbol "node_data" cannot be gotten
3098
* from vmlinux. Instead, check it is DW_TAG_array_type.
3100
if ((SYMBOL(node_data) == NOT_FOUND_SYMBOL)
3101
|| (ARRAY_LENGTH(node_data) == NOT_FOUND_STRUCTURE))
3104
if (!readmem(VADDR, SYMBOL(node_data) + (node * sizeof(void *)),
3105
&pgdat, sizeof pgdat))
3108
if (!is_kvaddr(pgdat))
3115
* Get the pglist_data structure from symbol "pgdat_list".
3117
if (SYMBOL(pgdat_list) == NOT_FOUND_SYMBOL)
3121
&& (ARRAY_LENGTH(pgdat_list) == NOT_FOUND_STRUCTURE))
3124
else if ((ARRAY_LENGTH(pgdat_list) != NOT_FOUND_STRUCTURE)
3125
&& (ARRAY_LENGTH(pgdat_list) < node))
3128
if (!readmem(VADDR, SYMBOL(pgdat_list) + (node * sizeof(void *)),
3129
&pgdat, sizeof pgdat))
3132
if (!is_kvaddr(pgdat))
3139
* linux-2.6.16 or former
3141
if ((SYMBOL(pgdat_list) == NOT_FOUND_SYMBOL)
3142
|| (OFFSET(pglist_data.pgdat_next) == NOT_FOUND_STRUCTURE))
3145
if (!readmem(VADDR, SYMBOL(pgdat_list), &pgdat, sizeof pgdat))
3148
if (!is_kvaddr(pgdat))
3154
for (i = 1; i <= node; i++) {
3155
if (!readmem(VADDR, pgdat+OFFSET(pglist_data.pgdat_next),
3156
&pgdat, sizeof pgdat))
3159
if (!is_kvaddr(pgdat))
3166
* Get the pglist_data structure from symbol "contig_page_data".
3168
if (SYMBOL(contig_page_data) == NOT_FOUND_SYMBOL)
3174
return SYMBOL(contig_page_data);
3178
dump_mem_map(unsigned long long pfn_start,
3179
unsigned long long pfn_end, unsigned long mem_map, int num_mm)
3181
struct mem_map_data *mmd;
3183
mmd = &info->mem_map_data[num_mm];
3184
mmd->pfn_start = pfn_start;
3185
mmd->pfn_end = pfn_end;
3186
mmd->mem_map = mem_map;
3188
DEBUG_MSG("mem_map (%d)\n", num_mm);
3189
DEBUG_MSG(" mem_map : %lx\n", mem_map);
3190
DEBUG_MSG(" pfn_start : %llx\n", pfn_start);
3191
DEBUG_MSG(" pfn_end : %llx\n", pfn_end);
3197
get_mm_flatmem(void)
3199
unsigned long mem_map;
3202
* Get the address of the symbol "mem_map".
3204
if (!readmem(VADDR, SYMBOL(mem_map), &mem_map, sizeof mem_map)
3206
ERRMSG("Can't get the address of mem_map.\n");
3209
info->num_mem_map = 1;
3210
if ((info->mem_map_data = (struct mem_map_data *)
3211
malloc(sizeof(struct mem_map_data)*info->num_mem_map)) == NULL) {
3212
ERRMSG("Can't allocate memory for the mem_map_data. %s\n",
3216
if (vt.mem_flags & MEMORY_XEN)
3217
dump_mem_map(0, info->dom0_mapnr, mem_map, 0);
3219
dump_mem_map(0, info->max_mapnr, mem_map, 0);
3225
get_node_memblk(int num_memblk,
3226
unsigned long *start_paddr, unsigned long *size, int *nid)
3228
unsigned long node_memblk;
3230
if (ARRAY_LENGTH(node_memblk) <= num_memblk) {
3231
ERRMSG("Invalid num_memblk.\n");
3234
node_memblk = SYMBOL(node_memblk) + SIZE(node_memblk_s) * num_memblk;
3235
if (!readmem(VADDR, node_memblk+OFFSET(node_memblk_s.start_paddr),
3236
start_paddr, sizeof(unsigned long))) {
3237
ERRMSG("Can't get node_memblk_s.start_paddr.\n");
3240
if (!readmem(VADDR, node_memblk + OFFSET(node_memblk_s.size),
3241
size, sizeof(unsigned long))) {
3242
ERRMSG("Can't get node_memblk_s.size.\n");
3245
if (!readmem(VADDR, node_memblk + OFFSET(node_memblk_s.nid),
3246
nid, sizeof(int))) {
3247
ERRMSG("Can't get node_memblk_s.nid.\n");
3254
get_num_mm_discontigmem(void)
3257
unsigned long start_paddr, size;
3259
if ((SYMBOL(node_memblk) == NOT_FOUND_SYMBOL)
3260
|| (ARRAY_LENGTH(node_memblk) == NOT_FOUND_STRUCTURE)
3261
|| (SIZE(node_memblk_s) == NOT_FOUND_STRUCTURE)
3262
|| (OFFSET(node_memblk_s.start_paddr) == NOT_FOUND_STRUCTURE)
3263
|| (OFFSET(node_memblk_s.size) == NOT_FOUND_STRUCTURE)
3264
|| (OFFSET(node_memblk_s.nid) == NOT_FOUND_STRUCTURE)) {
3267
for (i = 0; i < ARRAY_LENGTH(node_memblk); i++) {
3268
if (!get_node_memblk(i, &start_paddr, &size, &nid)) {
3269
ERRMSG("Can't get the node_memblk (%d)\n", i);
3272
if (!start_paddr && !size &&!nid)
3275
DEBUG_MSG("nid : %d\n", nid);
3276
DEBUG_MSG(" start_paddr: %lx\n", start_paddr);
3277
DEBUG_MSG(" size : %lx\n", size);
3281
* On non-NUMA systems, node_memblk_s is not set.
3291
separate_mem_map(struct mem_map_data *mmd, int *id_mm, int nid_pgdat,
3292
unsigned long mem_map_pgdat, unsigned long pfn_start_pgdat)
3295
unsigned long start_paddr, size, pfn_start, pfn_end, mem_map;
3297
for (i = 0; i < ARRAY_LENGTH(node_memblk); i++) {
3298
if (!get_node_memblk(i, &start_paddr, &size, &nid)) {
3299
ERRMSG("Can't get the node_memblk (%d)\n", i);
3302
if (!start_paddr && !size && !nid)
3306
* Check pglist_data.node_id and node_memblk_s.nid match.
3308
if (nid_pgdat != nid)
3311
pfn_start = paddr_to_pfn(start_paddr);
3312
pfn_end = paddr_to_pfn(start_paddr + size);
3314
if (pfn_start < pfn_start_pgdat) {
3315
ERRMSG("node_memblk_s.start_paddr of node (%d) is invalid.\n", nid);
3318
if (info->max_mapnr < pfn_end) {
3319
DEBUG_MSG("pfn_end of node (%d) is over max_mapnr.\n",
3321
DEBUG_MSG(" pfn_start: %lx\n", pfn_start);
3322
DEBUG_MSG(" pfn_end : %lx\n", pfn_end);
3323
DEBUG_MSG(" max_mapnr: %llx\n", info->max_mapnr);
3325
pfn_end = info->max_mapnr;
3328
mem_map = mem_map_pgdat+SIZE(page)*(pfn_start-pfn_start_pgdat);
3330
mmd->pfn_start = pfn_start;
3331
mmd->pfn_end = pfn_end;
3332
mmd->mem_map = mem_map;
3341
get_mm_discontigmem(void)
3343
int i, j, id_mm, node, num_mem_map, separate_mm = FALSE;
3344
unsigned long pgdat, mem_map, pfn_start, pfn_end, node_spanned_pages;
3345
unsigned long vmem_map;
3346
struct mem_map_data temp_mmd;
3348
num_mem_map = get_num_mm_discontigmem();
3349
if (num_mem_map < vt.numnodes) {
3350
ERRMSG("Can't get the number of mem_map.\n");
3353
struct mem_map_data mmd[num_mem_map];
3354
if (vt.numnodes < num_mem_map) {
3360
* This note is only for ia64 discontigmem kernel.
3361
* It is better to take mem_map information from a symbol vmem_map
3362
* instead of pglist_data.node_mem_map, because some node_mem_map
3363
* sometimes does not have mem_map information corresponding to its
3366
if (SYMBOL(vmem_map) != NOT_FOUND_SYMBOL) {
3367
if (!readmem(VADDR, SYMBOL(vmem_map), &vmem_map, sizeof vmem_map)) {
3368
ERRMSG("Can't get vmem_map.\n");
3374
* Get the first node_id.
3376
if ((node = next_online_node(0)) < 0) {
3377
ERRMSG("Can't get next online node.\n");
3380
if (!(pgdat = next_online_pgdat(node))) {
3381
ERRMSG("Can't get pgdat list.\n");
3385
for (i = 0; i < vt.numnodes; i++) {
3386
if (!readmem(VADDR, pgdat + OFFSET(pglist_data.node_start_pfn),
3387
&pfn_start, sizeof pfn_start)) {
3388
ERRMSG("Can't get node_start_pfn.\n");
3391
if (!readmem(VADDR,pgdat+OFFSET(pglist_data.node_spanned_pages),
3392
&node_spanned_pages, sizeof node_spanned_pages)) {
3393
ERRMSG("Can't get node_spanned_pages.\n");
3396
pfn_end = pfn_start + node_spanned_pages;
3398
if (SYMBOL(vmem_map) == NOT_FOUND_SYMBOL) {
3399
if (!readmem(VADDR, pgdat + OFFSET(pglist_data.node_mem_map),
3400
&mem_map, sizeof mem_map)) {
3401
ERRMSG("Can't get mem_map.\n");
3405
mem_map = vmem_map + (SIZE(page) * pfn_start);
3409
* For some ia64 NUMA systems.
3410
* On some systems, a node has the separated memory.
3411
* And pglist_data(s) have the duplicated memory range
3414
* Nid: Physical address
3415
* 0 : 0x1000000000 - 0x2000000000
3416
* 1 : 0x2000000000 - 0x3000000000
3417
* 2 : 0x0000000000 - 0x6020000000 <- Overlapping
3418
* 3 : 0x3000000000 - 0x4000000000
3419
* 4 : 0x4000000000 - 0x5000000000
3420
* 5 : 0x5000000000 - 0x6000000000
3422
* Then, mem_map(s) should be separated by
3423
* node_memblk_s info.
3425
if (!separate_mem_map(&mmd[id_mm], &id_mm, node,
3426
mem_map, pfn_start)) {
3427
ERRMSG("Can't separate mem_map.\n");
3431
if (info->max_mapnr < pfn_end) {
3432
DEBUG_MSG("pfn_end of node (%d) is over max_mapnr.\n",
3434
DEBUG_MSG(" pfn_start: %lx\n", pfn_start);
3435
DEBUG_MSG(" pfn_end : %lx\n", pfn_end);
3436
DEBUG_MSG(" max_mapnr: %llx\n", info->max_mapnr);
3438
pfn_end = info->max_mapnr;
3442
* The number of mem_map is the same as the number
3445
mmd[id_mm].pfn_start = pfn_start;
3446
mmd[id_mm].pfn_end = pfn_end;
3447
mmd[id_mm].mem_map = mem_map;
3452
* Get pglist_data of the next node.
3454
if (i < (vt.numnodes - 1)) {
3455
if ((node = next_online_node(node + 1)) < 0) {
3456
ERRMSG("Can't get next online node.\n");
3458
} else if (!(pgdat = next_online_pgdat(node))) {
3459
ERRMSG("Can't determine pgdat list (node %d).\n",
3467
* Sort mem_map by pfn_start.
3469
for (i = 0; i < (num_mem_map - 1); i++) {
3470
for (j = i + 1; j < num_mem_map; j++) {
3471
if (mmd[j].pfn_start < mmd[i].pfn_start) {
3480
* Calculate the number of mem_map.
3482
info->num_mem_map = num_mem_map;
3483
if (mmd[0].pfn_start != 0)
3484
info->num_mem_map++;
3486
for (i = 0; i < num_mem_map - 1; i++) {
3487
if (mmd[i].pfn_end > mmd[i + 1].pfn_start) {
3488
ERRMSG("The mem_map is overlapped with the next one.\n");
3489
ERRMSG("mmd[%d].pfn_end = %llx\n", i, mmd[i].pfn_end);
3490
ERRMSG("mmd[%d].pfn_start = %llx\n", i + 1, mmd[i + 1].pfn_start);
3492
} else if (mmd[i].pfn_end == mmd[i + 1].pfn_start)
3494
* Continuous mem_map
3499
* Discontinuous mem_map
3501
info->num_mem_map++;
3503
if (mmd[num_mem_map - 1].pfn_end < info->max_mapnr)
3504
info->num_mem_map++;
3506
if ((info->mem_map_data = (struct mem_map_data *)
3507
malloc(sizeof(struct mem_map_data)*info->num_mem_map)) == NULL) {
3508
ERRMSG("Can't allocate memory for the mem_map_data. %s\n",
3514
* Create mem_map data.
3517
if (mmd[0].pfn_start != 0) {
3518
dump_mem_map(0, mmd[0].pfn_start, NOT_MEMMAP_ADDR, id_mm);
3521
for (i = 0; i < num_mem_map; i++) {
3522
dump_mem_map(mmd[i].pfn_start, mmd[i].pfn_end,
3523
mmd[i].mem_map, id_mm);
3525
if ((i < num_mem_map - 1)
3526
&& (mmd[i].pfn_end != mmd[i + 1].pfn_start)) {
3527
dump_mem_map(mmd[i].pfn_end, mmd[i +1].pfn_start,
3528
NOT_MEMMAP_ADDR, id_mm);
3532
i = num_mem_map - 1;
3533
if (vt.mem_flags & MEMORY_XEN) {
3534
if (mmd[i].pfn_end < info->dom0_mapnr)
3535
dump_mem_map(mmd[i].pfn_end, info->dom0_mapnr,
3536
NOT_MEMMAP_ADDR, id_mm);
3538
if (mmd[i].pfn_end < info->max_mapnr)
3539
dump_mem_map(mmd[i].pfn_end, info->max_mapnr,
3540
NOT_MEMMAP_ADDR, id_mm);
3546
nr_to_section(unsigned long nr, unsigned long *mem_sec)
3550
if (!is_kvaddr(mem_sec[SECTION_NR_TO_ROOT(nr)]))
3553
if (is_sparsemem_extreme())
3554
addr = mem_sec[SECTION_NR_TO_ROOT(nr)] +
3555
(nr & SECTION_ROOT_MASK()) * SIZE(mem_section);
3557
addr = SYMBOL(mem_section) + (nr * SIZE(mem_section));
3559
if (!is_kvaddr(addr))
3566
section_mem_map_addr(unsigned long addr)
3571
if (!is_kvaddr(addr))
3574
if ((mem_section = malloc(SIZE(mem_section))) == NULL) {
3575
ERRMSG("Can't allocate memory for a struct mem_section. %s\n",
3579
if (!readmem(VADDR, addr, mem_section, SIZE(mem_section))) {
3580
ERRMSG("Can't get a struct mem_section(%lx).\n", addr);
3584
map = ULONG(mem_section + OFFSET(mem_section.section_mem_map));
3585
map &= SECTION_MAP_MASK;
3592
sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long section_nr)
3594
if (!is_kvaddr(coded_mem_map))
3597
return coded_mem_map +
3598
(SECTION_NR_TO_PFN(section_nr) * SIZE(page));
3602
get_mm_sparsemem(void)
3604
unsigned int section_nr, mem_section_size, num_section;
3605
unsigned long long pfn_start, pfn_end;
3606
unsigned long section, mem_map;
3607
unsigned long *mem_sec = NULL;
3612
* Get the address of the symbol "mem_section".
3614
num_section = divideup(info->max_mapnr, PAGES_PER_SECTION());
3615
if (is_sparsemem_extreme()) {
3616
info->sections_per_root = _SECTIONS_PER_ROOT_EXTREME();
3617
mem_section_size = sizeof(void *) * NR_SECTION_ROOTS();
3619
info->sections_per_root = _SECTIONS_PER_ROOT();
3620
mem_section_size = SIZE(mem_section) * NR_SECTION_ROOTS();
3622
if ((mem_sec = malloc(mem_section_size)) == NULL) {
3623
ERRMSG("Can't allocate memory for the mem_section. %s\n",
3627
if (!readmem(VADDR, SYMBOL(mem_section), mem_sec,
3628
mem_section_size)) {
3629
ERRMSG("Can't get the address of mem_section.\n");
3632
info->num_mem_map = num_section;
3633
if ((info->mem_map_data = (struct mem_map_data *)
3634
malloc(sizeof(struct mem_map_data)*info->num_mem_map)) == NULL) {
3635
ERRMSG("Can't allocate memory for the mem_map_data. %s\n",
3639
for (section_nr = 0; section_nr < num_section; section_nr++) {
3640
section = nr_to_section(section_nr, mem_sec);
3641
mem_map = section_mem_map_addr(section);
3642
mem_map = sparse_decode_mem_map(mem_map, section_nr);
3643
if (!is_kvaddr(mem_map))
3644
mem_map = NOT_MEMMAP_ADDR;
3645
pfn_start = section_nr * PAGES_PER_SECTION();
3646
pfn_end = pfn_start + PAGES_PER_SECTION();
3647
if (info->max_mapnr < pfn_end)
3648
pfn_end = info->max_mapnr;
3649
dump_mem_map(pfn_start, pfn_end, mem_map, section_nr);
3653
if (mem_sec != NULL)
3660
get_mem_map_without_mm(void)
3662
info->num_mem_map = 1;
3663
if ((info->mem_map_data = (struct mem_map_data *)
3664
malloc(sizeof(struct mem_map_data)*info->num_mem_map)) == NULL) {
3665
ERRMSG("Can't allocate memory for the mem_map_data. %s\n",
3669
if (vt.mem_flags & MEMORY_XEN)
3670
dump_mem_map(0, info->dom0_mapnr, NOT_MEMMAP_ADDR, 0);
3672
dump_mem_map(0, info->max_mapnr, NOT_MEMMAP_ADDR, 0);
3682
if (vt.mem_flags & MEMORY_XEN) {
3683
if (!get_dom0_mapnr()) {
3684
ERRMSG("Can't domain-0 pfn.\n");
3687
DEBUG_MSG("domain-0 pfn : %llx\n", info->dom0_mapnr);
3690
switch (get_mem_type()) {
3693
DEBUG_MSG("Memory type : SPARSEMEM\n");
3695
ret = get_mm_sparsemem();
3699
DEBUG_MSG("Memory type : SPARSEMEM_EX\n");
3701
ret = get_mm_sparsemem();
3705
DEBUG_MSG("Memory type : DISCONTIGMEM\n");
3707
ret = get_mm_discontigmem();
3711
DEBUG_MSG("Memory type : FLATMEM\n");
3713
ret = get_mm_flatmem();
3716
ERRMSG("Can't distinguish the memory type.\n");
3724
initialize_bitmap_memory(void)
3726
struct disk_dump_header *dh;
3727
struct dump_bitmap *bmp;
3728
off_t bitmap_offset;
3729
int bitmap_len, max_sect_len;
3734
dh = info->dh_memory;
3735
block_size = dh->block_size;
3738
= (DISKDUMP_HEADER_BLOCKS + dh->sub_hdr_size) * block_size;
3739
bitmap_len = block_size * dh->bitmap_blocks;
3741
bmp = malloc(sizeof(struct dump_bitmap));
3743
ERRMSG("Can't allocate memory for the memory-bitmap. %s\n",
3747
bmp->fd = info->fd_memory;
3748
bmp->file_name = info->name_memory;
3750
memset(bmp->buf, 0, BUFSIZE_BITMAP);
3751
bmp->offset = bitmap_offset + bitmap_len / 2;
3752
info->bitmap_memory = bmp;
3754
max_sect_len = divideup(dh->max_mapnr, BITMAP_SECT_LEN);
3755
info->valid_pages = calloc(sizeof(ulong), max_sect_len);
3756
if (info->valid_pages == NULL) {
3757
ERRMSG("Can't allocate memory for the valid_pages. %s\n",
3762
for (i = 1, pfn = 0; i < max_sect_len; i++) {
3763
info->valid_pages[i] = info->valid_pages[i - 1];
3764
for (j = 0; j < BITMAP_SECT_LEN; j++, pfn++)
3765
if (is_dumpable(info->bitmap_memory, pfn))
3766
info->valid_pages[i]++;
3775
if (!(vt.mem_flags & MEMORY_XEN) && info->flag_exclude_xen_dom) {
3776
MSG("'-X' option is disable,");
3777
MSG("because %s is not Xen's memory core image.\n", info->name_memory);
3778
MSG("Commandline parameter is invalid.\n");
3779
MSG("Try `makedumpfile --help' for more information.\n");
3783
if (info->flag_refiltering) {
3784
if (info->flag_elf_dumpfile) {
3785
MSG("'-E' option is disable, ");
3786
MSG("because %s is kdump compressed format.\n",
3790
info->phys_base = info->kh_memory->phys_base;
3791
info->max_dump_level |= info->kh_memory->dump_level;
3793
if (!initialize_bitmap_memory())
3796
} else if (!get_phys_base())
3800
* Get the debug information for analysis from the vmcoreinfo file
3802
if (info->flag_read_vmcoreinfo) {
3803
if (!read_vmcoreinfo())
3807
* Get the debug information for analysis from the kernel file
3809
} else if (info->name_vmlinux) {
3810
dwarf_info.fd_debuginfo = info->fd_vmlinux;
3811
dwarf_info.name_debuginfo = info->name_vmlinux;
3813
if (!get_symbol_info())
3816
if (!get_structure_info())
3819
if (!get_srcfile_info())
3823
* Check whether /proc/vmcore contains vmcoreinfo,
3824
* and get both the offset and the size.
3826
if (!info->offset_vmcoreinfo || !info->size_vmcoreinfo) {
3827
if (info->max_dump_level <= DL_EXCLUDE_ZERO)
3830
MSG("%s doesn't contain vmcoreinfo.\n",
3832
MSG("Specify '-x' option or '-i' option.\n");
3833
MSG("Commandline parameter is invalid.\n");
3834
MSG("Try `makedumpfile --help' for more information.\n");
3840
* Get the debug information from /proc/vmcore.
3841
* NOTE: Don't move this code to the above, because the debugging
3842
* information token by -x/-i option is overwritten by vmcoreinfo
3843
* in /proc/vmcore. vmcoreinfo in /proc/vmcore is more reliable
3844
* than -x/-i option.
3846
if (info->offset_vmcoreinfo && info->size_vmcoreinfo) {
3847
if (!read_vmcoreinfo_from_vmcore(info->offset_vmcoreinfo,
3848
info->size_vmcoreinfo, FALSE))
3852
if (!get_value_for_old_linux())
3855
if (!info->page_size) {
3857
* If we cannot get page_size from a vmcoreinfo file,
3858
* fall back to the current kernel page size.
3860
if (!fallback_to_current_page_size())
3863
if (!get_max_mapnr())
3866
if ((info->max_dump_level <= DL_EXCLUDE_ZERO) && !info->flag_dmesg) {
3868
* The debugging information is unnecessary, because the memory
3869
* management system will not be analazed.
3871
if (!get_mem_map_without_mm())
3877
if (!get_machdep_info())
3880
if (!check_release())
3883
if (!get_versiondep_info())
3886
if (!get_numnodes())
3896
initialize_bitmap(struct dump_bitmap *bitmap)
3898
bitmap->fd = info->fd_bitmap;
3899
bitmap->file_name = info->name_bitmap;
3900
bitmap->no_block = -1;
3901
memset(bitmap->buf, 0, BUFSIZE_BITMAP);
3905
initialize_1st_bitmap(struct dump_bitmap *bitmap)
3907
initialize_bitmap(bitmap);
3912
initialize_2nd_bitmap(struct dump_bitmap *bitmap)
3914
initialize_bitmap(bitmap);
3915
bitmap->offset = info->len_bitmap / 2;
3919
set_bitmap(struct dump_bitmap *bitmap, unsigned long long pfn,
3923
off_t old_offset, new_offset;
3924
old_offset = bitmap->offset + BUFSIZE_BITMAP * bitmap->no_block;
3925
new_offset = bitmap->offset + BUFSIZE_BITMAP * (pfn / PFN_BUFBITMAP);
3927
if (0 <= bitmap->no_block && old_offset != new_offset) {
3928
if (lseek(bitmap->fd, old_offset, SEEK_SET) < 0 ) {
3929
ERRMSG("Can't seek the bitmap(%s). %s\n",
3930
bitmap->file_name, strerror(errno));
3933
if (write(bitmap->fd, bitmap->buf, BUFSIZE_BITMAP)
3934
!= BUFSIZE_BITMAP) {
3935
ERRMSG("Can't write the bitmap(%s). %s\n",
3936
bitmap->file_name, strerror(errno));
3940
if (old_offset != new_offset) {
3941
if (lseek(bitmap->fd, new_offset, SEEK_SET) < 0 ) {
3942
ERRMSG("Can't seek the bitmap(%s). %s\n",
3943
bitmap->file_name, strerror(errno));
3946
if (read(bitmap->fd, bitmap->buf, BUFSIZE_BITMAP)
3947
!= BUFSIZE_BITMAP) {
3948
ERRMSG("Can't read the bitmap(%s). %s\n",
3949
bitmap->file_name, strerror(errno));
3952
bitmap->no_block = pfn / PFN_BUFBITMAP;
3955
* If val is 0, clear bit on the bitmap.
3957
byte = (pfn%PFN_BUFBITMAP)>>3;
3958
bit = (pfn%PFN_BUFBITMAP) & 7;
3960
bitmap->buf[byte] |= 1<<bit;
3962
bitmap->buf[byte] &= ~(1<<bit);
3968
sync_bitmap(struct dump_bitmap *bitmap)
3971
offset = bitmap->offset + BUFSIZE_BITMAP * bitmap->no_block;
3974
* The bitmap buffer is not dirty, and it is not necessary
3977
if (bitmap->no_block < 0)
3980
if (lseek(bitmap->fd, offset, SEEK_SET) < 0 ) {
3981
ERRMSG("Can't seek the bitmap(%s). %s\n",
3982
bitmap->file_name, strerror(errno));
3985
if (write(bitmap->fd, bitmap->buf, BUFSIZE_BITMAP)
3986
!= BUFSIZE_BITMAP) {
3987
ERRMSG("Can't write the bitmap(%s). %s\n",
3988
bitmap->file_name, strerror(errno));
3995
sync_1st_bitmap(void)
3997
return sync_bitmap(info->bitmap1);
4001
sync_2nd_bitmap(void)
4003
return sync_bitmap(info->bitmap2);
4007
set_bit_on_1st_bitmap(unsigned long long pfn)
4009
return set_bitmap(info->bitmap1, pfn, 1);
4013
clear_bit_on_2nd_bitmap(unsigned long long pfn)
4015
return set_bitmap(info->bitmap2, pfn, 0);
4019
clear_bit_on_2nd_bitmap_for_kernel(unsigned long long pfn)
4021
unsigned long long maddr;
4023
if (vt.mem_flags & MEMORY_XEN) {
4024
maddr = ptom_xen(pfn_to_paddr(pfn));
4025
if (maddr == NOT_PADDR) {
4026
ERRMSG("Can't convert a physical address(%llx) to machine address.\n",
4030
pfn = paddr_to_pfn(maddr);
4032
return clear_bit_on_2nd_bitmap(pfn);
4036
is_on(char *bitmap, int i)
4038
return bitmap[i>>3] & (1 << (i & 7));
4042
is_dumpable(struct dump_bitmap *bitmap, unsigned long long pfn)
4045
if (pfn == 0 || bitmap->no_block != pfn/PFN_BUFBITMAP) {
4046
offset = bitmap->offset + BUFSIZE_BITMAP*(pfn/PFN_BUFBITMAP);
4047
lseek(bitmap->fd, offset, SEEK_SET);
4048
read(bitmap->fd, bitmap->buf, BUFSIZE_BITMAP);
4050
bitmap->no_block = 0;
4052
bitmap->no_block = pfn/PFN_BUFBITMAP;
4054
return is_on(bitmap->buf, pfn%PFN_BUFBITMAP);
4058
is_in_segs(unsigned long long paddr)
4060
if (info->flag_refiltering) {
4061
static struct dump_bitmap bitmap1 = {0};
4063
if (bitmap1.fd == 0)
4064
initialize_1st_bitmap(&bitmap1);
4066
return is_dumpable(&bitmap1, paddr_to_pfn(paddr));
4069
if (paddr_to_offset(paddr))
4076
is_zero_page(unsigned char *buf, long page_size)
4080
for (i = 0; i < page_size; i++)
4087
read_cache(struct cache_data *cd)
4089
const off_t failed = (off_t)-1;
4091
if (lseek(cd->fd, cd->offset, SEEK_SET) == failed) {
4092
ERRMSG("Can't seek the dump file(%s). %s\n",
4093
cd->file_name, strerror(errno));
4096
if (read(cd->fd, cd->buf, cd->cache_size) != cd->cache_size) {
4097
ERRMSG("Can't read the dump file(%s). %s\n",
4098
cd->file_name, strerror(errno));
4101
cd->offset += cd->cache_size;
4110
if (*(char *)&i == 0x12)
4117
write_and_check_space(int fd, void *buf, size_t buf_size, char *file_name)
4119
int status, written_size = 0;
4121
while (written_size < buf_size) {
4122
status = write(fd, buf + written_size,
4123
buf_size - written_size);
4125
written_size += status;
4128
if (errno == ENOSPC)
4129
info->flag_nospace = TRUE;
4130
MSG("\nCan't write the dump file(%s). %s\n",
4131
file_name, strerror(errno));
4138
write_buffer(int fd, off_t offset, void *buf, size_t buf_size, char *file_name)
4140
struct makedumpfile_data_header fdh;
4141
const off_t failed = (off_t)-1;
4143
if (fd == STDOUT_FILENO) {
4145
* Output a header of flattened format instead of
4146
* lseek(). For sending dump data to a different
4147
* architecture, change the values to big endian.
4149
if (is_bigendian()){
4150
fdh.offset = offset;
4151
fdh.buf_size = buf_size;
4153
fdh.offset = bswap_64(offset);
4154
fdh.buf_size = bswap_64(buf_size);
4156
if (!write_and_check_space(fd, &fdh, sizeof(fdh), file_name))
4159
if (lseek(fd, offset, SEEK_SET) == failed) {
4160
ERRMSG("Can't seek the dump file(%s). %s\n",
4161
file_name, strerror(errno));
4165
if (!write_and_check_space(fd, buf, buf_size, file_name))
4172
write_cache(struct cache_data *cd, void *buf, size_t size)
4174
memcpy(cd->buf + cd->buf_size, buf, size);
4175
cd->buf_size += size;
4177
if (cd->buf_size < cd->cache_size)
4180
if (!write_buffer(cd->fd, cd->offset, cd->buf, cd->cache_size,
4184
cd->buf_size -= cd->cache_size;
4185
memcpy(cd->buf, cd->buf + cd->cache_size, cd->buf_size);
4186
cd->offset += cd->cache_size;
4191
write_cache_bufsz(struct cache_data *cd)
4196
if (!write_buffer(cd->fd, cd->offset, cd->buf, cd->buf_size,
4200
cd->offset += cd->buf_size;
4206
read_buf_from_stdin(void *buf, int buf_size)
4208
int read_size = 0, tmp_read_size = 0;
4209
time_t last_time, tm;
4211
last_time = time(NULL);
4213
while (read_size != buf_size) {
4215
tmp_read_size = read(STDIN_FILENO, buf + read_size,
4216
buf_size - read_size);
4218
if (tmp_read_size < 0) {
4219
ERRMSG("Can't read STDIN. %s\n", strerror(errno));
4222
} else if (0 == tmp_read_size) {
4224
* If it cannot get any data from a standard input
4225
* for a long time, break this loop.
4228
if (TIMEOUT_STDIN < (tm - last_time)) {
4229
ERRMSG("Can't get any data from STDIN.\n");
4233
read_size += tmp_read_size;
4234
last_time = time(NULL);
4241
read_start_flat_header(void)
4243
char buf[MAX_SIZE_MDF_HEADER];
4244
struct makedumpfile_header fh;
4249
if (!read_buf_from_stdin(buf, MAX_SIZE_MDF_HEADER)) {
4250
ERRMSG("Can't get header of flattened format.\n");
4253
memcpy(&fh, buf, sizeof(fh));
4255
if (!is_bigendian()){
4256
fh.type = bswap_64(fh.type);
4257
fh.version = bswap_64(fh.version);
4261
* Check flat header.
4263
if (strcmp(fh.signature, MAKEDUMPFILE_SIGNATURE)) {
4264
ERRMSG("Can't get signature of flattened format.\n");
4267
if (fh.type != TYPE_FLAT_HEADER) {
4268
ERRMSG("Can't get type of flattened format.\n");
4276
read_flat_data_header(struct makedumpfile_data_header *fdh)
4278
if (!read_buf_from_stdin(fdh,
4279
sizeof(struct makedumpfile_data_header))) {
4280
ERRMSG("Can't get header of flattened format.\n");
4283
if (!is_bigendian()){
4284
fdh->offset = bswap_64(fdh->offset);
4285
fdh->buf_size = bswap_64(fdh->buf_size);
4291
rearrange_dumpdata(void)
4293
int read_size, tmp_read_size;
4294
char buf[SIZE_BUF_STDIN];
4295
struct makedumpfile_data_header fdh;
4300
if (!read_start_flat_header()) {
4301
ERRMSG("Can't get header of flattened format.\n");
4306
* Read the first data header.
4308
if (!read_flat_data_header(&fdh)) {
4309
ERRMSG("Can't get header of flattened format.\n");
4315
while (read_size < fdh.buf_size) {
4316
if (sizeof(buf) < (fdh.buf_size - read_size))
4317
tmp_read_size = sizeof(buf);
4319
tmp_read_size = fdh.buf_size - read_size;
4321
if (!read_buf_from_stdin(buf, tmp_read_size)) {
4322
ERRMSG("Can't get data of flattened format.\n");
4325
if (!write_buffer(info->fd_dumpfile,
4326
fdh.offset + read_size, buf, tmp_read_size,
4327
info->name_dumpfile))
4330
read_size += tmp_read_size;
4333
* Read the next header.
4335
if (!read_flat_data_header(&fdh)) {
4336
ERRMSG("Can't get data header of flattened format.\n");
4340
} while ((0 <= fdh.offset) && (0 < fdh.buf_size));
4342
if ((fdh.offset != END_FLAG_FLAT_HEADER)
4343
|| (fdh.buf_size != END_FLAG_FLAT_HEADER)) {
4344
ERRMSG("Can't get valid end header of flattened format.\n");
4352
* Same as paddr_to_offset() but makes sure that the specified offset (hint)
4356
paddr_to_offset2(unsigned long long paddr, off_t hint)
4360
unsigned long long len;
4361
struct pt_load_segment *pls;
4363
for (i = offset = 0; i < info->num_load_memory; i++) {
4364
pls = &info->pt_load_segments[i];
4365
len = pls->phys_end - pls->phys_start;
4366
if ((paddr >= pls->phys_start)
4367
&& (paddr < pls->phys_end)
4368
&& (hint >= pls->file_offset)
4369
&& (hint < pls->file_offset + len)) {
4370
offset = (off_t)(paddr - pls->phys_start) +
4379
page_to_pfn(unsigned long page)
4382
unsigned long long pfn = 0, index = 0;
4383
struct mem_map_data *mmd;
4385
mmd = info->mem_map_data;
4386
for (num = 0; num < info->num_mem_map; num++, mmd++) {
4387
if (mmd->mem_map == NOT_MEMMAP_ADDR)
4389
if (page < mmd->mem_map)
4391
index = (page - mmd->mem_map) / SIZE(page);
4392
if (index > mmd->pfn_end - mmd->pfn_start)
4394
pfn = mmd->pfn_start + index;
4398
ERRMSG("Can't convert the address of page descriptor (%lx) to pfn.\n", page);
4399
return ULONGLONG_MAX;
4405
reset_bitmap_of_free_pages(unsigned long node_zones)
4408
int order, i, migrate_type, migrate_types;
4409
unsigned long curr, previous, head, curr_page, curr_prev;
4410
unsigned long addr_free_pages, free_pages = 0, found_free_pages = 0;
4411
unsigned long long pfn, start_pfn;
4414
* On linux-2.6.24 or later, free_list is divided into the array.
4416
migrate_types = ARRAY_LENGTH(free_area.free_list);
4417
if (migrate_types == NOT_FOUND_STRUCTURE)
4420
for (order = (ARRAY_LENGTH(zone.free_area) - 1); order >= 0; --order) {
4421
for (migrate_type = 0; migrate_type < migrate_types;
4423
head = node_zones + OFFSET(zone.free_area)
4424
+ SIZE(free_area) * order
4425
+ OFFSET(free_area.free_list)
4426
+ SIZE(list_head) * migrate_type;
4428
if (!readmem(VADDR, head + OFFSET(list_head.next),
4429
&curr, sizeof curr)) {
4430
ERRMSG("Can't get next list_head.\n");
4433
for (;curr != head;) {
4434
curr_page = curr - OFFSET(page.lru);
4435
start_pfn = page_to_pfn(curr_page);
4436
if (start_pfn == ULONGLONG_MAX)
4439
if (!readmem(VADDR, curr+OFFSET(list_head.prev),
4440
&curr_prev, sizeof curr_prev)) {
4441
ERRMSG("Can't get prev list_head.\n");
4444
if (previous != curr_prev) {
4445
ERRMSG("The free list is broken.\n");
4446
retcd = ANALYSIS_FAILED;
4449
for (i = 0; i < (1<<order); i++) {
4450
pfn = start_pfn + i;
4451
clear_bit_on_2nd_bitmap_for_kernel(pfn);
4453
found_free_pages += i;
4456
if (!readmem(VADDR, curr+OFFSET(list_head.next),
4457
&curr, sizeof curr)) {
4458
ERRMSG("Can't get next list_head.\n");
4466
* Check the number of free pages.
4468
if (OFFSET(zone.free_pages) != NOT_FOUND_STRUCTURE) {
4469
addr_free_pages = node_zones + OFFSET(zone.free_pages);
4471
} else if (OFFSET(zone.vm_stat) != NOT_FOUND_STRUCTURE) {
4473
* On linux-2.6.21 or later, the number of free_pages is
4474
* in vm_stat[NR_FREE_PAGES].
4476
addr_free_pages = node_zones + OFFSET(zone.vm_stat)
4477
+ sizeof(long) * NUMBER(NR_FREE_PAGES);
4480
ERRMSG("Can't get addr_free_pages.\n");
4483
if (!readmem(VADDR, addr_free_pages, &free_pages, sizeof free_pages)) {
4484
ERRMSG("Can't get free_pages.\n");
4487
if (free_pages != found_free_pages) {
4489
* On linux-2.6.21 or later, the number of free_pages is
4490
* sometimes different from the one of the list "free_area",
4491
* because the former is flushed asynchronously.
4493
DEBUG_MSG("The number of free_pages is invalid.\n");
4494
DEBUG_MSG(" free_pages = %ld\n", free_pages);
4495
DEBUG_MSG(" found_free_pages = %ld\n", found_free_pages);
4497
pfn_free += found_free_pages;
4505
int log_buf_len, length_log, length_oldlog, ret = FALSE;
4506
unsigned long log_buf, log_end, index;
4507
char *log_buffer = NULL;
4509
if (!open_files_for_creating_dumpfile())
4512
if (!info->flag_refiltering) {
4513
if (!get_elf_info())
4519
if ((SYMBOL(log_buf) == NOT_FOUND_SYMBOL)
4520
|| (SYMBOL(log_buf_len) == NOT_FOUND_SYMBOL)
4521
|| (SYMBOL(log_end) == NOT_FOUND_SYMBOL)) {
4522
ERRMSG("Can't find some symbols for log_buf.\n");
4525
if (!readmem(VADDR, SYMBOL(log_buf), &log_buf, sizeof(log_buf))) {
4526
ERRMSG("Can't get log_buf.\n");
4529
if (!readmem(VADDR, SYMBOL(log_end), &log_end, sizeof(log_end))) {
4530
ERRMSG("Can't to get log_end.\n");
4533
if (!readmem(VADDR, SYMBOL(log_buf_len), &log_buf_len,
4534
sizeof(log_buf_len))) {
4535
ERRMSG("Can't get log_buf_len.\n");
4539
DEBUG_MSG("log_buf : %lx\n", log_buf);
4540
DEBUG_MSG("log_end : %lx\n", log_end);
4541
DEBUG_MSG("log_buf_len : %d\n", log_buf_len);
4543
if ((log_buffer = malloc(log_buf_len)) == NULL) {
4544
ERRMSG("Can't allocate memory for log_buf. %s\n",
4549
if (log_end < log_buf_len) {
4550
length_log = log_end;
4551
if(!readmem(VADDR, log_buf, log_buffer, length_log)) {
4552
ERRMSG("Can't read dmesg log.\n");
4556
index = log_end & (log_buf_len - 1);
4557
DEBUG_MSG("index : %lx\n", index);
4558
length_log = log_buf_len;
4559
length_oldlog = log_buf_len - index;
4560
if(!readmem(VADDR, log_buf + index, log_buffer, length_oldlog)) {
4561
ERRMSG("Can't read old dmesg log.\n");
4564
if(!readmem(VADDR, log_buf, log_buffer + length_oldlog, index)) {
4565
ERRMSG("Can't read new dmesg log.\n");
4569
DEBUG_MSG("length_log : %d\n", length_log);
4571
if (!open_dump_file()) {
4572
ERRMSG("Can't open output file.\n");
4575
if (write(info->fd_dumpfile, log_buffer, length_log) < 0)
4578
if (!close_files_for_creating_dumpfile())
4591
_exclude_free_page(void)
4593
int i, nr_zones, num_nodes, node;
4594
unsigned long node_zones, zone, spanned_pages, pgdat;
4596
if ((node = next_online_node(0)) < 0) {
4597
ERRMSG("Can't get next online node.\n");
4600
if (!(pgdat = next_online_pgdat(node))) {
4601
ERRMSG("Can't get pgdat list.\n");
4604
for (num_nodes = 1; num_nodes <= vt.numnodes; num_nodes++) {
4606
print_progress(PROGRESS_FREE_PAGES, num_nodes - 1, vt.numnodes);
4608
node_zones = pgdat + OFFSET(pglist_data.node_zones);
4610
if (!readmem(VADDR, pgdat + OFFSET(pglist_data.nr_zones),
4611
&nr_zones, sizeof(nr_zones))) {
4612
ERRMSG("Can't get nr_zones.\n");
4616
for (i = 0; i < nr_zones; i++) {
4618
print_progress(PROGRESS_FREE_PAGES, i + nr_zones * (num_nodes - 1),
4619
nr_zones * vt.numnodes);
4621
zone = node_zones + (i * SIZE(zone));
4622
if (!readmem(VADDR, zone + OFFSET(zone.spanned_pages),
4623
&spanned_pages, sizeof spanned_pages)) {
4624
ERRMSG("Can't get spanned_pages.\n");
4629
if (!reset_bitmap_of_free_pages(zone))
4632
if (num_nodes < vt.numnodes) {
4633
if ((node = next_online_node(node + 1)) < 0) {
4634
ERRMSG("Can't get next online node.\n");
4636
} else if (!(pgdat = next_online_pgdat(node))) {
4637
ERRMSG("Can't determine pgdat list (node %d).\n",
4647
print_progress(PROGRESS_FREE_PAGES, vt.numnodes, vt.numnodes);
4653
exclude_free_page(void)
4656
* Check having necessary information.
4658
if ((SYMBOL(node_data) == NOT_FOUND_SYMBOL)
4659
&& (SYMBOL(pgdat_list) == NOT_FOUND_SYMBOL)
4660
&& (SYMBOL(contig_page_data) == NOT_FOUND_SYMBOL)) {
4661
ERRMSG("Can't get necessary symbols for excluding free pages.\n");
4664
if ((SIZE(zone) == NOT_FOUND_STRUCTURE)
4665
|| ((OFFSET(zone.free_pages) == NOT_FOUND_STRUCTURE)
4666
&& (OFFSET(zone.vm_stat) == NOT_FOUND_STRUCTURE))
4667
|| (OFFSET(zone.free_area) == NOT_FOUND_STRUCTURE)
4668
|| (OFFSET(zone.spanned_pages) == NOT_FOUND_STRUCTURE)
4669
|| (OFFSET(pglist_data.node_zones) == NOT_FOUND_STRUCTURE)
4670
|| (OFFSET(pglist_data.nr_zones) == NOT_FOUND_STRUCTURE)
4671
|| (SIZE(free_area) == NOT_FOUND_STRUCTURE)
4672
|| (OFFSET(free_area.free_list) == NOT_FOUND_STRUCTURE)
4673
|| (OFFSET(list_head.next) == NOT_FOUND_STRUCTURE)
4674
|| (OFFSET(list_head.prev) == NOT_FOUND_STRUCTURE)
4675
|| (OFFSET(page.lru) == NOT_FOUND_STRUCTURE)
4676
|| (ARRAY_LENGTH(zone.free_area) == NOT_FOUND_STRUCTURE)) {
4677
ERRMSG("Can't get necessary structures for excluding free pages.\n");
4682
* Detect free pages and update 2nd-bitmap.
4684
if (!_exclude_free_page())
4691
* If using a dumpfile in kdump-compressed format as a source file
4692
* instead of /proc/vmcore, 1st-bitmap of a new dumpfile must be
4693
* the same as the one of a source file.
4696
copy_1st_bitmap_from_memory(void)
4698
char buf[info->dh_memory->block_size];
4700
off_t bitmap_offset;
4701
struct disk_dump_header *dh = info->dh_memory;
4703
bitmap_offset = (DISKDUMP_HEADER_BLOCKS + dh->sub_hdr_size)
4706
if (lseek(info->fd_memory, bitmap_offset, SEEK_SET) < 0) {
4707
ERRMSG("Can't seek %s. %s\n",
4708
info->name_memory, strerror(errno));
4711
if (lseek(info->bitmap1->fd, info->bitmap1->offset, SEEK_SET) < 0) {
4712
ERRMSG("Can't seek the bitmap(%s). %s\n",
4713
info->bitmap1->file_name, strerror(errno));
4717
while (offset_page < (info->len_bitmap / 2)) {
4718
if (read(info->fd_memory, buf, sizeof(buf)) != sizeof(buf)) {
4719
ERRMSG("Can't read %s. %s\n",
4720
info->name_memory, strerror(errno));
4723
if (write(info->bitmap1->fd, buf, sizeof(buf)) != sizeof(buf)) {
4724
ERRMSG("Can't write the bitmap(%s). %s\n",
4725
info->bitmap1->file_name, strerror(errno));
4728
offset_page += sizeof(buf);
4734
create_1st_bitmap(void)
4737
char buf[info->page_size];
4738
unsigned long long pfn, pfn_start, pfn_end, pfn_bitmap1;
4739
struct pt_load_segment *pls;
4742
if (info->flag_refiltering)
4743
return copy_1st_bitmap_from_memory();
4746
* At first, clear all the bits on the 1st-bitmap.
4748
memset(buf, 0, sizeof(buf));
4750
if (lseek(info->bitmap1->fd, info->bitmap1->offset, SEEK_SET) < 0) {
4751
ERRMSG("Can't seek the bitmap(%s). %s\n",
4752
info->bitmap1->file_name, strerror(errno));
4756
while (offset_page < (info->len_bitmap / 2)) {
4757
if (write(info->bitmap1->fd, buf, info->page_size)
4758
!= info->page_size) {
4759
ERRMSG("Can't write the bitmap(%s). %s\n",
4760
info->bitmap1->file_name, strerror(errno));
4763
offset_page += info->page_size;
4767
* If page is on memory hole, set bit on the 1st-bitmap.
4769
for (i = pfn_bitmap1 = 0; i < info->num_load_memory; i++) {
4771
print_progress(PROGRESS_HOLES, i, info->num_load_memory);
4773
pls = &info->pt_load_segments[i];
4774
pfn_start = paddr_to_pfn(pls->phys_start);
4775
pfn_end = paddr_to_pfn(pls->phys_end);
4777
if (!is_in_segs(pfn_to_paddr(pfn_start)))
4779
for (pfn = pfn_start; pfn < pfn_end; pfn++) {
4780
set_bit_on_1st_bitmap(pfn);
4784
pfn_memhole = info->max_mapnr - pfn_bitmap1;
4789
print_progress(PROGRESS_HOLES, info->max_mapnr, info->max_mapnr);
4791
if (!sync_1st_bitmap())
4798
* Exclude the page filled with zero in case of creating an elf dumpfile.
4801
exclude_zero_pages(void)
4803
unsigned long long pfn, paddr;
4804
struct dump_bitmap bitmap2;
4805
unsigned char buf[info->page_size];
4807
initialize_2nd_bitmap(&bitmap2);
4809
for (pfn = paddr = 0; pfn < info->max_mapnr;
4810
pfn++, paddr += info->page_size) {
4812
print_progress(PROGRESS_ZERO_PAGES, pfn, info->max_mapnr);
4814
if (!is_in_segs(paddr))
4817
if (!is_dumpable(&bitmap2, pfn))
4820
if (vt.mem_flags & MEMORY_XEN) {
4821
if (!readmem(MADDR_XEN, paddr, buf, info->page_size)) {
4822
ERRMSG("Can't get the page data(pfn:%llx, max_mapnr:%llx).\n",
4823
pfn, info->max_mapnr);
4827
if (!readmem(PADDR, paddr, buf, info->page_size)) {
4828
ERRMSG("Can't get the page data(pfn:%llx, max_mapnr:%llx).\n",
4829
pfn, info->max_mapnr);
4833
if (is_zero_page(buf, info->page_size)) {
4834
clear_bit_on_2nd_bitmap(pfn);
4842
print_progress(PROGRESS_ZERO_PAGES, info->max_mapnr, info->max_mapnr);
4848
__exclude_unnecessary_pages(unsigned long mem_map,
4849
unsigned long long pfn_start, unsigned long long pfn_end)
4851
unsigned long long pfn, pfn_mm, maddr;
4852
unsigned long long pfn_read_start, pfn_read_end, index_pg;
4853
unsigned char page_cache[SIZE(page) * PGMM_CACHED];
4854
unsigned char *pcache;
4855
unsigned int _count;
4856
unsigned long flags, mapping;
4859
* Refresh the buffer of struct page, when changing mem_map.
4861
pfn_read_start = ULONGLONG_MAX;
4864
for (pfn = pfn_start; pfn < pfn_end; pfn++, mem_map += SIZE(page)) {
4867
* Exclude the memory hole.
4869
if (vt.mem_flags & MEMORY_XEN) {
4870
maddr = ptom_xen(pfn_to_paddr(pfn));
4871
if (maddr == NOT_PADDR) {
4872
ERRMSG("Can't convert a physical address(%llx) to machine address.\n",
4876
if (!is_in_segs(maddr))
4879
if (!is_in_segs(pfn_to_paddr(pfn)))
4883
index_pg = pfn % PGMM_CACHED;
4884
if (pfn < pfn_read_start || pfn_read_end < pfn) {
4885
if (roundup(pfn + 1, PGMM_CACHED) < pfn_end)
4886
pfn_mm = PGMM_CACHED - index_pg;
4888
pfn_mm = pfn_end - pfn;
4890
if (!readmem(VADDR, mem_map,
4891
page_cache + (index_pg * SIZE(page)),
4892
SIZE(page) * pfn_mm)) {
4893
ERRMSG("Can't read the buffer of struct page.\n");
4896
pfn_read_start = pfn;
4897
pfn_read_end = pfn + pfn_mm - 1;
4899
pcache = page_cache + (index_pg * SIZE(page));
4901
flags = ULONG(pcache + OFFSET(page.flags));
4902
_count = UINT(pcache + OFFSET(page._count));
4903
mapping = ULONG(pcache + OFFSET(page.mapping));
4906
* Exclude the cache page without the private page.
4908
if ((info->dump_level & DL_EXCLUDE_CACHE)
4909
&& (isLRU(flags) || isSwapCache(flags))
4910
&& !isPrivate(flags) && !isAnon(mapping)) {
4911
clear_bit_on_2nd_bitmap_for_kernel(pfn);
4915
* Exclude the cache page with the private page.
4917
else if ((info->dump_level & DL_EXCLUDE_CACHE_PRI)
4918
&& (isLRU(flags) || isSwapCache(flags))
4919
&& !isAnon(mapping)) {
4920
clear_bit_on_2nd_bitmap_for_kernel(pfn);
4921
pfn_cache_private++;
4924
* Exclude the data page of the user process.
4926
else if ((info->dump_level & DL_EXCLUDE_USER_DATA)
4927
&& isAnon(mapping)) {
4928
clear_bit_on_2nd_bitmap_for_kernel(pfn);
4936
exclude_unnecessary_pages(void)
4939
struct mem_map_data *mmd;
4941
for (mm = 0; mm < info->num_mem_map; mm++) {
4942
print_progress(PROGRESS_UNN_PAGES, mm, info->num_mem_map);
4944
mmd = &info->mem_map_data[mm];
4946
if (mmd->mem_map == NOT_MEMMAP_ADDR)
4949
if (!__exclude_unnecessary_pages(mmd->mem_map,
4950
mmd->pfn_start, mmd->pfn_end))
4957
print_progress(PROGRESS_UNN_PAGES, info->num_mem_map, info->num_mem_map);
4959
if (info->dump_level & DL_EXCLUDE_FREE)
4960
if (!exclude_free_page())
4970
unsigned char buf[info->page_size];
4971
const off_t failed = (off_t)-1;
4974
while (offset < (info->len_bitmap / 2)) {
4975
if (lseek(info->bitmap1->fd, info->bitmap1->offset + offset,
4976
SEEK_SET) == failed) {
4977
ERRMSG("Can't seek the bitmap(%s). %s\n",
4978
info->name_bitmap, strerror(errno));
4981
if (read(info->bitmap1->fd, buf, sizeof(buf)) != sizeof(buf)) {
4982
ERRMSG("Can't read the dump memory(%s). %s\n",
4983
info->name_memory, strerror(errno));
4986
if (lseek(info->bitmap2->fd, info->bitmap2->offset + offset,
4987
SEEK_SET) == failed) {
4988
ERRMSG("Can't seek the bitmap(%s). %s\n",
4989
info->name_bitmap, strerror(errno));
4992
if (write(info->bitmap2->fd, buf, sizeof(buf)) != sizeof(buf)) {
4993
ERRMSG("Can't write the bitmap(%s). %s\n",
4994
info->name_bitmap, strerror(errno));
4997
offset += sizeof(buf);
5004
create_2nd_bitmap(void)
5007
* Copy 1st-bitmap to 2nd-bitmap.
5009
if (!copy_bitmap()) {
5010
ERRMSG("Can't copy 1st-bitmap to 2nd-bitmap.\n");
5015
* Exclude unnecessary pages (free pages, cache pages, etc.)
5017
if (DL_EXCLUDE_ZERO < info->dump_level) {
5018
if (!exclude_unnecessary_pages()) {
5019
ERRMSG("Can't exclude unnecessary pages.\n");
5025
* Exclude Xen user domain.
5027
if (info->flag_exclude_xen_dom) {
5028
if (!exclude_xen_user_domain()) {
5029
ERRMSG("Can't exclude xen user domain.\n");
5035
* Exclude pages filled with zero for creating an ELF dumpfile.
5037
* Note: If creating a kdump-compressed dumpfile, makedumpfile
5038
* checks zero-pages while copying dumpable pages to a
5039
* dumpfile from /proc/vmcore. That is valuable for the
5040
* speed, because each page is read one time only.
5041
* Otherwise (if creating an ELF dumpfile), makedumpfile
5042
* should check zero-pages at this time because 2nd-bitmap
5043
* should be fixed for creating an ELF header. That is slow
5044
* due to reading each page two times, but it is necessary.
5046
if ((info->dump_level & DL_EXCLUDE_ZERO) && info->flag_elf_dumpfile) {
5048
* 2nd-bitmap should be flushed at this time, because
5049
* exclude_zero_pages() checks 2nd-bitmap.
5051
if (!sync_2nd_bitmap())
5054
if (!exclude_zero_pages()) {
5055
ERRMSG("Can't exclude pages filled with zero for creating an ELF dumpfile.\n");
5060
if (!sync_2nd_bitmap())
5067
prepare_bitmap_buffer(void)
5072
* Create 2 bitmaps (1st-bitmap & 2nd-bitmap) on block_size boundary.
5073
* The crash utility requires both of them to be aligned to block_size
5076
tmp = divideup(divideup(info->max_mapnr, BITPERBYTE), info->page_size);
5077
info->len_bitmap = tmp*info->page_size*2;
5080
* Prepare bitmap buffers for creating dump bitmap.
5082
if ((info->bitmap1 = malloc(sizeof(struct dump_bitmap))) == NULL) {
5083
ERRMSG("Can't allocate memory for the 1st-bitmap. %s\n",
5087
if ((info->bitmap2 = malloc(sizeof(struct dump_bitmap))) == NULL) {
5088
ERRMSG("Can't allocate memory for the 2nd-bitmap. %s\n",
5092
initialize_1st_bitmap(info->bitmap1);
5093
initialize_2nd_bitmap(info->bitmap2);
5099
free_bitmap_buffer(void)
5101
if (info->bitmap1) {
5102
free(info->bitmap1);
5103
info->bitmap1 = NULL;
5105
if (info->bitmap2) {
5106
free(info->bitmap2);
5107
info->bitmap2 = NULL;
5114
create_dump_bitmap(void)
5118
if (!prepare_bitmap_buffer())
5121
if (!create_1st_bitmap())
5124
if (!create_2nd_bitmap())
5129
free_bitmap_buffer();
5135
get_phnum_memory(void)
5141
if (info->flag_elf64_memory) { /* ELF64 */
5142
if (!get_elf64_ehdr(&ehdr64)) {
5143
ERRMSG("Can't get ehdr64.\n");
5146
phnum = ehdr64.e_phnum;
5147
} else { /* ELF32 */
5148
if (!get_elf32_ehdr(&ehdr32)) {
5149
ERRMSG("Can't get ehdr32.\n");
5152
phnum = ehdr32.e_phnum;
5159
get_loads_dumpfile(void)
5161
int i, phnum, num_new_load = 0;
5162
long page_size = info->page_size;
5163
unsigned long long pfn, pfn_start, pfn_end, num_excluded;
5164
unsigned long frac_head, frac_tail;
5166
struct dump_bitmap bitmap2;
5168
initialize_2nd_bitmap(&bitmap2);
5170
if (!(phnum = get_phnum_memory()))
5173
for (i = 0; i < phnum; i++) {
5174
if (!get_elf_phdr_memory(i, &load))
5176
if (load.p_type != PT_LOAD)
5179
pfn_start = paddr_to_pfn(load.p_paddr);
5180
pfn_end = paddr_to_pfn(load.p_paddr + load.p_memsz);
5181
frac_head = page_size - (load.p_paddr % page_size);
5182
frac_tail = (load.p_paddr + load.p_memsz) % page_size;
5187
if (frac_head && (frac_head != page_size))
5192
for (pfn = pfn_start; pfn < pfn_end; pfn++) {
5193
if (!is_dumpable(&bitmap2, pfn)) {
5199
* If the number of the contiguous pages to be excluded
5200
* is 256 or more, those pages are excluded really.
5201
* And a new PT_LOAD segment is created.
5203
if (num_excluded >= PFN_EXCLUDED) {
5209
return num_new_load;
5213
prepare_cache_data(struct cache_data *cd)
5215
cd->fd = info->fd_dumpfile;
5216
cd->file_name = info->name_dumpfile;
5217
cd->cache_size = info->page_size << info->block_order;
5221
if ((cd->buf = malloc(cd->cache_size + info->page_size)) == NULL) {
5222
ERRMSG("Can't allocate memory for the data buffer. %s\n",
5230
free_cache_data(struct cache_data *cd)
5237
write_start_flat_header()
5239
char buf[MAX_SIZE_MDF_HEADER];
5240
struct makedumpfile_header fh;
5242
if (!info->flag_flatten)
5245
strcpy(fh.signature, MAKEDUMPFILE_SIGNATURE);
5248
* For sending dump data to a different architecture, change the values
5251
if (is_bigendian()){
5252
fh.type = TYPE_FLAT_HEADER;
5253
fh.version = VERSION_FLAT_HEADER;
5255
fh.type = bswap_64(TYPE_FLAT_HEADER);
5256
fh.version = bswap_64(VERSION_FLAT_HEADER);
5259
memset(buf, 0, sizeof(buf));
5260
memcpy(buf, &fh, sizeof(fh));
5262
if (!write_and_check_space(info->fd_dumpfile, buf, MAX_SIZE_MDF_HEADER,
5263
info->name_dumpfile))
5270
write_end_flat_header(void)
5272
struct makedumpfile_data_header fdh;
5274
if (!info->flag_flatten)
5277
fdh.offset = END_FLAG_FLAT_HEADER;
5278
fdh.buf_size = END_FLAG_FLAT_HEADER;
5280
if (!write_and_check_space(info->fd_dumpfile, &fdh, sizeof(fdh),
5281
info->name_dumpfile))
5288
write_elf_phdr(struct cache_data *cd_hdr, Elf64_Phdr *load)
5292
if (info->flag_elf64_memory) { /* ELF64 */
5293
if (!write_cache(cd_hdr, load, sizeof(Elf64_Phdr)))
5297
memset(&load32, 0, sizeof(Elf32_Phdr));
5298
load32.p_type = load->p_type;
5299
load32.p_flags = load->p_flags;
5300
load32.p_offset = load->p_offset;
5301
load32.p_vaddr = load->p_vaddr;
5302
load32.p_paddr = load->p_paddr;
5303
load32.p_filesz = load->p_filesz;
5304
load32.p_memsz = load->p_memsz;
5305
load32.p_align = load->p_align;
5307
if (!write_cache(cd_hdr, &load32, sizeof(Elf32_Phdr)))
5314
write_elf_header(struct cache_data *cd_header)
5316
int i, num_loads_dumpfile, phnum;
5317
off_t offset_note_memory, offset_note_dumpfile;
5324
const off_t failed = (off_t)-1;
5328
if (!info->flag_elf_dumpfile)
5332
* Get the PT_LOAD number of the dumpfile.
5334
if (!(num_loads_dumpfile = get_loads_dumpfile())) {
5335
ERRMSG("Can't get a number of PT_LOAD.\n");
5339
if (info->flag_elf64_memory) { /* ELF64 */
5340
if (!get_elf64_ehdr(&ehdr64)) {
5341
ERRMSG("Can't get ehdr64.\n");
5345
* PT_NOTE(1) + PT_LOAD(1+)
5347
ehdr64.e_phnum = 1 + num_loads_dumpfile;
5348
} else { /* ELF32 */
5349
if (!get_elf32_ehdr(&ehdr32)) {
5350
ERRMSG("Can't get ehdr32.\n");
5354
* PT_NOTE(1) + PT_LOAD(1+)
5356
ehdr32.e_phnum = 1 + num_loads_dumpfile;
5360
* Write an ELF header.
5362
if (info->flag_elf64_memory) { /* ELF64 */
5363
if (!write_buffer(info->fd_dumpfile, 0, &ehdr64, sizeof(ehdr64),
5364
info->name_dumpfile))
5367
} else { /* ELF32 */
5368
if (!write_buffer(info->fd_dumpfile, 0, &ehdr32, sizeof(ehdr32),
5369
info->name_dumpfile))
5374
* Write a PT_NOTE header.
5376
if (!(phnum = get_phnum_memory()))
5379
for (i = 0; i < phnum; i++) {
5380
if (!get_elf_phdr_memory(i, ¬e))
5382
if (note.p_type == PT_NOTE)
5385
if (note.p_type != PT_NOTE) {
5386
ERRMSG("Can't get a PT_NOTE header.\n");
5390
if (info->flag_elf64_memory) { /* ELF64 */
5391
cd_header->offset = sizeof(ehdr64);
5392
offset_note_dumpfile = sizeof(ehdr64)
5393
+ sizeof(Elf64_Phdr) * ehdr64.e_phnum;
5395
cd_header->offset = sizeof(ehdr32);
5396
offset_note_dumpfile = sizeof(ehdr32)
5397
+ sizeof(Elf32_Phdr) * ehdr32.e_phnum;
5399
offset_note_memory = note.p_offset;
5400
note.p_offset = offset_note_dumpfile;
5401
size_note = note.p_filesz;
5403
if (!write_elf_phdr(cd_header, ¬e))
5407
* Write a PT_NOTE segment.
5408
* PT_LOAD header will be written later.
5410
if ((buf = malloc(size_note)) == NULL) {
5411
ERRMSG("Can't allocate memory for PT_NOTE segment. %s\n",
5415
if (lseek(info->fd_memory, offset_note_memory, SEEK_SET) == failed) {
5416
ERRMSG("Can't seek the dump memory(%s). %s\n",
5417
info->name_memory, strerror(errno));
5420
if (read(info->fd_memory, buf, size_note) != size_note) {
5421
ERRMSG("Can't read the dump memory(%s). %s\n",
5422
info->name_memory, strerror(errno));
5425
if (!write_buffer(info->fd_dumpfile, offset_note_dumpfile, buf,
5426
size_note, info->name_dumpfile))
5430
* Set an offset of PT_LOAD segment.
5432
info->offset_load_dumpfile = offset_note_dumpfile + size_note;
5443
write_kdump_header(void)
5447
struct disk_dump_header *dh = info->dump_header;
5448
struct kdump_sub_header kh;
5451
if (info->flag_elf_dumpfile)
5455
* Write common header
5457
strcpy(dh->signature, KDUMP_SIGNATURE);
5458
dh->header_version = 3;
5459
dh->block_size = info->page_size;
5460
dh->sub_hdr_size = sizeof(kh) + info->size_vmcoreinfo;
5461
dh->sub_hdr_size = divideup(dh->sub_hdr_size, dh->block_size);
5462
dh->max_mapnr = info->max_mapnr;
5464
dh->bitmap_blocks = divideup(info->len_bitmap, dh->block_size);
5465
memcpy(&dh->timestamp, &info->timestamp, sizeof(dh->timestamp));
5466
memcpy(&dh->utsname, &info->system_utsname, sizeof(dh->utsname));
5468
size = sizeof(struct disk_dump_header);
5469
if (!write_buffer(info->fd_dumpfile, 0, dh, size, info->name_dumpfile))
5475
size = sizeof(struct kdump_sub_header);
5476
memset(&kh, 0, size);
5477
kh.phys_base = info->phys_base;
5478
kh.dump_level = info->dump_level;
5479
if (info->flag_split) {
5481
kh.start_pfn = info->split_start_pfn;
5482
kh.end_pfn = info->split_end_pfn;
5484
if (info->offset_vmcoreinfo && info->size_vmcoreinfo) {
5486
* Write vmcoreinfo data
5488
kh.offset_vmcoreinfo
5489
= DISKDUMP_HEADER_BLOCKS * dh->block_size + sizeof(kh);
5490
kh.size_vmcoreinfo = info->size_vmcoreinfo;
5492
buf = malloc(info->size_vmcoreinfo);
5494
ERRMSG("Can't allocate memory for vmcoreinfo. %s\n",
5498
if (lseek(info->fd_memory, info->offset_vmcoreinfo, SEEK_SET)
5500
ERRMSG("Can't seek the dump memory(%s). %s\n",
5501
info->name_memory, strerror(errno));
5504
if (read(info->fd_memory, buf, info->size_vmcoreinfo)
5505
!= info->size_vmcoreinfo) {
5506
ERRMSG("Can't read the dump memory(%s). %s\n",
5507
info->name_memory, strerror(errno));
5510
if (!write_buffer(info->fd_dumpfile, kh.offset_vmcoreinfo, buf,
5511
kh.size_vmcoreinfo, info->name_dumpfile))
5514
if (!write_buffer(info->fd_dumpfile, dh->block_size, &kh,
5515
size, info->name_dumpfile))
5518
info->offset_bitmap1
5519
= (DISKDUMP_HEADER_BLOCKS + dh->sub_hdr_size) * dh->block_size;
5530
print_progress(const char *msg, unsigned long current, unsigned long end)
5534
static time_t last_time = 0;
5536
if (current < end) {
5538
if (tm - last_time < 1)
5541
progress = current * 100 / end;
5546
PROGRESS_MSG("%-" PROGRESS_MAXLEN "s: [%3d %%] ", msg, progress);
5550
get_num_dumpable(void)
5552
unsigned long long pfn, num_dumpable;
5553
struct dump_bitmap bitmap2;
5555
initialize_2nd_bitmap(&bitmap2);
5557
for (pfn = 0, num_dumpable = 0; pfn < info->max_mapnr; pfn++) {
5558
if (is_dumpable(&bitmap2, pfn))
5561
return num_dumpable;
5565
write_elf_load_segment(struct cache_data *cd_page, unsigned long long paddr,
5566
off_t off_memory, long long size)
5568
long page_size = info->page_size;
5569
long long bufsz_write;
5570
char buf[info->page_size];
5572
off_memory = paddr_to_offset2(paddr, off_memory);
5574
ERRMSG("Can't convert physaddr(%llx) to an offset.\n",
5578
if (lseek(info->fd_memory, off_memory, SEEK_SET) < 0) {
5579
ERRMSG("Can't seek the dump memory(%s). %s\n",
5580
info->name_memory, strerror(errno));
5585
if (size >= page_size)
5586
bufsz_write = page_size;
5590
if (read(info->fd_memory, buf, bufsz_write) != bufsz_write) {
5591
ERRMSG("Can't read the dump memory(%s). %s\n",
5592
info->name_memory, strerror(errno));
5595
if (!write_cache(cd_page, buf, bufsz_write))
5604
write_elf_pages(struct cache_data *cd_header, struct cache_data *cd_page)
5607
long page_size = info->page_size;
5608
unsigned long long pfn, pfn_start, pfn_end, paddr, num_excluded;
5609
unsigned long long num_dumpable, num_dumped = 0, per;
5610
unsigned long long memsz, filesz;
5611
unsigned long frac_head, frac_tail;
5612
off_t off_seg_load, off_memory;
5614
struct dump_bitmap bitmap2;
5616
if (!info->flag_elf_dumpfile)
5619
initialize_2nd_bitmap(&bitmap2);
5621
num_dumpable = get_num_dumpable();
5622
per = num_dumpable / 100;
5624
off_seg_load = info->offset_load_dumpfile;
5625
cd_page->offset = info->offset_load_dumpfile;
5627
if (!(phnum = get_phnum_memory()))
5630
for (i = 0; i < phnum; i++) {
5631
if (!get_elf_phdr_memory(i, &load))
5634
if (load.p_type != PT_LOAD)
5637
off_memory= load.p_offset;
5638
paddr = load.p_paddr;
5639
pfn_start = paddr_to_pfn(load.p_paddr);
5640
pfn_end = paddr_to_pfn(load.p_paddr + load.p_memsz);
5641
frac_head = page_size - (load.p_paddr % page_size);
5642
frac_tail = (load.p_paddr + load.p_memsz)%page_size;
5647
if (frac_head && (frac_head != page_size)) {
5656
for (pfn = pfn_start; pfn < pfn_end; pfn++) {
5657
if (!is_dumpable(&bitmap2, pfn)) {
5659
if ((pfn == pfn_end - 1) && frac_tail)
5666
if ((num_dumped % per) == 0)
5667
print_progress(PROGRESS_COPY, num_dumped, num_dumpable);
5672
* The dumpable pages are continuous.
5674
if (!num_excluded) {
5675
if ((pfn == pfn_end - 1) && frac_tail) {
5677
filesz += frac_tail;
5680
filesz += page_size;
5684
* If the number of the contiguous pages to be excluded
5685
* is 255 or less, those pages are not excluded.
5687
} else if (num_excluded < PFN_EXCLUDED) {
5688
if ((pfn == pfn_end - 1) && frac_tail) {
5690
filesz += (page_size*num_excluded
5694
filesz += (page_size*num_excluded
5702
* If the number of the contiguous pages to be excluded
5703
* is 256 or more, those pages are excluded really.
5704
* And a new PT_LOAD segment is created.
5706
load.p_memsz = memsz;
5707
load.p_filesz = filesz;
5708
load.p_offset = off_seg_load;
5711
* Write a PT_LOAD header.
5713
if (!write_elf_phdr(cd_header, &load))
5717
* Write a PT_LOAD segment.
5719
if (!write_elf_load_segment(cd_page, paddr, off_memory,
5723
load.p_paddr += load.p_memsz;
5727
* (x86) Fill PT_LOAD headers with appropriate
5728
* virtual addresses.
5730
if (load.p_paddr < MAXMEM)
5731
load.p_vaddr += load.p_memsz;
5733
load.p_vaddr += load.p_memsz;
5735
paddr = load.p_paddr;
5736
off_seg_load += load.p_filesz;
5743
* Write the last PT_LOAD.
5745
load.p_memsz = memsz;
5746
load.p_filesz = filesz;
5747
load.p_offset = off_seg_load;
5750
* Write a PT_LOAD header.
5752
if (!write_elf_phdr(cd_header, &load))
5756
* Write a PT_LOAD segment.
5758
if (!write_elf_load_segment(cd_page, paddr, off_memory, load.p_filesz))
5761
off_seg_load += load.p_filesz;
5763
if (!write_cache_bufsz(cd_header))
5765
if (!write_cache_bufsz(cd_page))
5771
print_progress(PROGRESS_COPY, num_dumpable, num_dumpable);
5778
* This function is specific for reading page.
5780
* If reading the separated page on different PT_LOAD segments,
5781
* this function gets the page data from both segments. This is
5782
* worthy of ia64 /proc/vmcore. In ia64 /proc/vmcore, region 5
5783
* segment is overlapping to region 7 segment. The following is
5784
* example (page_size is 16KBytes):
5786
* region | paddr | memsz
5787
* --------+--------------------+--------------------
5788
* 5 | 0x0000000004000000 | 0x0000000000638ce0
5789
* 7 | 0x0000000004000000 | 0x0000000000db3000
5791
* In the above example, the last page of region 5 is 0x4638000
5792
* and the segment does not contain complete data of this page.
5793
* Then this function gets the data of 0x4638000 - 0x4638ce0
5794
* from region 5, and gets the remaining data from region 7.
5797
read_pfn(unsigned long long pfn, unsigned char *buf)
5799
unsigned long long paddr;
5800
off_t offset1, offset2;
5801
size_t size1, size2;
5803
paddr = pfn_to_paddr(pfn);
5804
if (info->flag_refiltering) {
5805
if (!readmem(PADDR, paddr, buf, info->page_size)) {
5806
ERRMSG("Can't get the page data.\n");
5812
offset1 = paddr_to_offset(paddr);
5813
offset2 = paddr_to_offset(paddr + info->page_size);
5816
* Check the separated page on different PT_LOAD segments.
5818
if (offset1 + info->page_size == offset2) {
5819
size1 = info->page_size;
5821
for (size1 = 1; size1 < info->page_size; size1++) {
5822
offset2 = paddr_to_offset(paddr + size1);
5823
if (offset1 + size1 != offset2)
5827
if (!readmem(PADDR, paddr, buf, size1)) {
5828
ERRMSG("Can't get the page data.\n");
5831
if (size1 != info->page_size) {
5832
size2 = info->page_size - size1;
5834
memset(buf + size1, 0, size2);
5836
if (!readmem(PADDR, paddr + size1, buf + size1, size2)) {
5837
ERRMSG("Can't get the page data.\n");
5846
write_kdump_pages(struct cache_data *cd_header, struct cache_data *cd_page)
5848
unsigned long long pfn, per, num_dumpable, num_dumped = 0;
5849
unsigned long long start_pfn, end_pfn;
5850
unsigned long size_out;
5851
struct page_desc pd, pd_zero;
5852
off_t offset_data = 0;
5853
struct disk_dump_header *dh = info->dump_header;
5854
unsigned char buf[info->page_size], *buf_out = NULL;
5855
unsigned long len_buf_out;
5856
struct dump_bitmap bitmap2;
5857
const off_t failed = (off_t)-1;
5861
if (info->flag_elf_dumpfile)
5864
initialize_2nd_bitmap(&bitmap2);
5866
len_buf_out = compressBound(info->page_size);
5867
if ((buf_out = malloc(len_buf_out)) == NULL) {
5868
ERRMSG("Can't allocate memory for the compression buffer. %s\n",
5873
num_dumpable = get_num_dumpable();
5874
per = num_dumpable / 100;
5877
* Calculate the offset of the page data.
5880
= (DISKDUMP_HEADER_BLOCKS + dh->sub_hdr_size + dh->bitmap_blocks)
5882
cd_page->offset = cd_header->offset + sizeof(page_desc_t)*num_dumpable;
5883
offset_data = cd_page->offset;
5886
* Set a fileoffset of Physical Address 0x0.
5888
if (lseek(info->fd_memory, info->offset_load_memory, SEEK_SET)
5890
ERRMSG("Can't seek the dump memory(%s). %s\n",
5891
info->name_memory, strerror(errno));
5896
* Write the data of zero-filled page.
5898
if (info->dump_level & DL_EXCLUDE_ZERO) {
5899
pd_zero.size = info->page_size;
5901
pd_zero.offset = offset_data;
5902
pd_zero.page_flags = 0;
5903
memset(buf, 0, pd_zero.size);
5904
if (!write_cache(cd_page, buf, pd_zero.size))
5906
offset_data += pd_zero.size;
5908
if (info->flag_split) {
5909
start_pfn = info->split_start_pfn;
5910
end_pfn = info->split_end_pfn;
5914
end_pfn = info->max_mapnr;
5916
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
5918
if ((num_dumped % per) == 0)
5919
print_progress(PROGRESS_COPY, num_dumped, num_dumpable);
5922
* Check the excluded page.
5924
if (!is_dumpable(&bitmap2, pfn))
5929
if (!read_pfn(pfn, buf))
5933
* Exclude the page filled with zeros.
5935
if ((info->dump_level & DL_EXCLUDE_ZERO)
5936
&& is_zero_page(buf, info->page_size)) {
5937
if (!write_cache(cd_header, &pd_zero, sizeof(page_desc_t)))
5943
* Compress the page data.
5945
size_out = len_buf_out;
5946
if (info->flag_compress
5947
&& (compress2(buf_out, &size_out, buf,
5948
info->page_size, Z_BEST_SPEED) == Z_OK)
5949
&& (size_out < info->page_size)) {
5952
memcpy(buf, buf_out, pd.size);
5955
pd.size = info->page_size;
5958
pd.offset = offset_data;
5959
offset_data += pd.size;
5962
* Write the page header.
5964
if (!write_cache(cd_header, &pd, sizeof(page_desc_t)))
5968
* Write the page data.
5970
if (!write_cache(cd_page, buf, pd.size))
5975
* Write the remainder.
5977
if (!write_cache_bufsz(cd_page))
5979
if (!write_cache_bufsz(cd_header))
5985
print_progress(PROGRESS_COPY, num_dumpable, num_dumpable);
5990
if (buf_out != NULL)
5997
write_kdump_bitmap(void)
5999
struct cache_data bm;
6005
if (info->flag_elf_dumpfile)
6008
bm.fd = info->fd_bitmap;
6009
bm.file_name = info->name_bitmap;
6013
if ((bm.buf = calloc(1, BUFSIZE_BITMAP)) == NULL) {
6014
ERRMSG("Can't allocate memory for dump bitmap buffer. %s\n",
6018
offset = info->offset_bitmap1;
6019
buf_size = info->len_bitmap;
6021
while (buf_size > 0) {
6022
if (buf_size >= BUFSIZE_BITMAP)
6023
bm.cache_size = BUFSIZE_BITMAP;
6025
bm.cache_size = buf_size;
6027
if(!read_cache(&bm))
6030
if (!write_buffer(info->fd_dumpfile, offset,
6031
bm.buf, bm.cache_size, info->name_dumpfile))
6034
offset += bm.cache_size;
6035
buf_size -= BUFSIZE_BITMAP;
6046
close_vmcoreinfo(void)
6048
if(fclose(info->file_vmcoreinfo) < 0)
6049
ERRMSG("Can't close the vmcoreinfo file(%s). %s\n",
6050
info->name_vmcoreinfo, strerror(errno));
6054
close_dump_memory(void)
6056
if ((info->fd_memory = close(info->fd_memory)) < 0)
6057
ERRMSG("Can't close the dump memory(%s). %s\n",
6058
info->name_memory, strerror(errno));
6062
close_dump_file(void)
6064
if (info->flag_flatten)
6067
if ((info->fd_dumpfile = close(info->fd_dumpfile)) < 0)
6068
ERRMSG("Can't close the dump file(%s). %s\n",
6069
info->name_dumpfile, strerror(errno));
6073
close_dump_bitmap(void)
6075
if ((info->fd_bitmap = close(info->fd_bitmap)) < 0)
6076
ERRMSG("Can't close the bitmap file(%s). %s\n",
6077
info->name_bitmap, strerror(errno));
6078
free(info->name_bitmap);
6079
info->name_bitmap = NULL;
6083
close_kernel_file(void)
6085
if (info->name_vmlinux) {
6086
if ((info->fd_vmlinux = close(info->fd_vmlinux)) < 0) {
6087
ERRMSG("Can't close the kernel file(%s). %s\n",
6088
info->name_vmlinux, strerror(errno));
6091
if (info->name_xen_syms) {
6092
if ((info->fd_xen_syms = close(info->fd_xen_syms)) < 0) {
6093
ERRMSG("Can't close the kernel file(%s). %s\n",
6094
info->name_xen_syms, strerror(errno));
6100
* Close the following files when it generates the vmcoreinfo file.
6105
close_files_for_generating_vmcoreinfo(void)
6107
close_kernel_file();
6115
* Close the following file when it rearranges the dump data.
6119
close_files_for_rearranging_dumpdata(void)
6127
* Close the following files when it creates the dump file.
6131
* if it reads the vmcoreinfo file
6137
close_files_for_creating_dumpfile(void)
6139
if (info->max_dump_level > DL_EXCLUDE_ZERO)
6140
close_kernel_file();
6142
/* free name for vmcoreinfo */
6143
if (info->offset_vmcoreinfo && info->size_vmcoreinfo) {
6144
free(info->name_vmcoreinfo);
6145
info->name_vmcoreinfo = NULL;
6147
close_dump_memory();
6149
close_dump_bitmap();
6155
* for Xen extraction
6158
get_symbol_info_xen(void)
6163
SYMBOL_INIT(dom_xen, "dom_xen");
6164
SYMBOL_INIT(dom_io, "dom_io");
6165
SYMBOL_INIT(domain_list, "domain_list");
6166
SYMBOL_INIT(frame_table, "frame_table");
6167
SYMBOL_INIT(alloc_bitmap, "alloc_bitmap");
6168
SYMBOL_INIT(max_page, "max_page");
6169
SYMBOL_INIT(xenheap_phys_end, "xenheap_phys_end");
6172
* Architecture specific
6174
SYMBOL_INIT(pgd_l2, "idle_pg_table_l2"); /* x86 */
6175
SYMBOL_INIT(pgd_l3, "idle_pg_table_l3"); /* x86-PAE */
6176
if (SYMBOL(pgd_l3) == NOT_FOUND_SYMBOL)
6177
SYMBOL_INIT(pgd_l3, "idle_pg_table"); /* x86-PAE */
6178
SYMBOL_INIT(pgd_l4, "idle_pg_table_4"); /* x86_64 */
6179
if (SYMBOL(pgd_l4) == NOT_FOUND_SYMBOL)
6180
SYMBOL_INIT(pgd_l4, "idle_pg_table"); /* x86_64 */
6182
SYMBOL_INIT(xen_heap_start, "xen_heap_start"); /* ia64 */
6183
SYMBOL_INIT(xen_pstart, "xen_pstart"); /* ia64 */
6184
SYMBOL_INIT(frametable_pg_dir, "frametable_pg_dir"); /* ia64 */
6190
get_structure_info_xen(void)
6192
SIZE_INIT(page_info, "page_info");
6193
OFFSET_INIT(page_info.count_info, "page_info", "count_info");
6195
* _domain is the first member of union u
6197
OFFSET_INIT(page_info._domain, "page_info", "u");
6199
SIZE_INIT(domain, "domain");
6200
OFFSET_INIT(domain.domain_id, "domain", "domain_id");
6201
OFFSET_INIT(domain.next_in_list, "domain", "next_in_list");
6207
get_xen_phys_start(void)
6210
unsigned long xen_phys_start;
6211
const off_t failed = (off_t)-1;
6213
if (info->xen_phys_start)
6216
if (info->size_xen_crash_info >= SIZE_XEN_CRASH_INFO_V2) {
6217
offset = info->offset_xen_crash_info + info->size_xen_crash_info
6218
- sizeof(unsigned long) * 2;
6219
if (lseek(info->fd_memory, offset, SEEK_SET) == failed) {
6220
ERRMSG("Can't seek the dump memory(%s). %s\n",
6221
info->name_memory, strerror(errno));
6224
if (read(info->fd_memory, &xen_phys_start, sizeof(unsigned long))
6225
!= sizeof(unsigned long)) {
6226
ERRMSG("Can't read the dump memory(%s). %s\n",
6227
info->name_memory, strerror(errno));
6230
info->xen_phys_start = xen_phys_start;
6239
unsigned long domain;
6240
unsigned int domain_id;
6243
if (SYMBOL(alloc_bitmap) == NOT_FOUND_SYMBOL) {
6244
ERRMSG("Can't get the symbol of alloc_bitmap.\n");
6247
if (!readmem(VADDR_XEN, SYMBOL(alloc_bitmap), &info->alloc_bitmap,
6248
sizeof(info->alloc_bitmap))) {
6249
ERRMSG("Can't get the value of alloc_bitmap.\n");
6252
if (SYMBOL(max_page) == NOT_FOUND_SYMBOL) {
6253
ERRMSG("Can't get the symbol of max_page.\n");
6256
if (!readmem(VADDR_XEN, SYMBOL(max_page), &info->max_page,
6257
sizeof(info->max_page))) {
6258
ERRMSG("Can't get the value of max_page.\n");
6263
* Walk through domain_list
6265
if (SYMBOL(domain_list) == NOT_FOUND_SYMBOL) {
6266
ERRMSG("Can't get the symbol of domain_list.\n");
6269
if (!readmem(VADDR_XEN, SYMBOL(domain_list), &domain, sizeof(domain))){
6270
ERRMSG("Can't get the value of domain_list.\n");
6275
* Get numbers of domain first
6280
if (!readmem(VADDR_XEN, domain + OFFSET(domain.next_in_list),
6281
&domain, sizeof(domain))) {
6282
ERRMSG("Can't get through the domain_list.\n");
6287
if ((info->domain_list = (struct domain_list *)
6288
malloc(sizeof(struct domain_list) * (num_domain + 2))) == NULL) {
6289
ERRMSG("Can't allcate memory for domain_list.\n");
6293
info->num_domain = num_domain + 2;
6295
if (!readmem(VADDR_XEN, SYMBOL(domain_list), &domain, sizeof(domain))) {
6296
ERRMSG("Can't get the value of domain_list.\n");
6301
if (!readmem(VADDR_XEN, domain + OFFSET(domain.domain_id),
6302
&domain_id, sizeof(domain_id))) {
6303
ERRMSG("Can't get the domain_id.\n");
6306
info->domain_list[num_domain].domain_addr = domain;
6307
info->domain_list[num_domain].domain_id = domain_id;
6309
* pickled_id is set by architecture specific
6313
if (!readmem(VADDR_XEN, domain + OFFSET(domain.next_in_list),
6314
&domain, sizeof(domain))) {
6315
ERRMSG("Can't get through the domain_list.\n");
6323
if (SYMBOL(dom_xen) == NOT_FOUND_SYMBOL) {
6324
ERRMSG("Can't get the symbol of dom_xen.\n");
6327
if (!readmem(VADDR_XEN, SYMBOL(dom_xen), &domain, sizeof(domain))) {
6328
ERRMSG("Can't get the value of dom_xen.\n");
6331
if (!readmem(VADDR_XEN, domain + OFFSET(domain.domain_id), &domain_id,
6332
sizeof(domain_id))) {
6333
ERRMSG( "Can't get the value of dom_xen domain_id.\n");
6336
info->domain_list[num_domain].domain_addr = domain;
6337
info->domain_list[num_domain].domain_id = domain_id;
6340
if (SYMBOL(dom_io) == NOT_FOUND_SYMBOL) {
6341
ERRMSG("Can't get the symbol of dom_io.\n");
6344
if (!readmem(VADDR_XEN, SYMBOL(dom_io), &domain, sizeof(domain))) {
6345
ERRMSG("Can't get the value of dom_io.\n");
6348
if (!readmem(VADDR_XEN, domain + OFFSET(domain.domain_id), &domain_id,
6349
sizeof(domain_id))) {
6350
ERRMSG( "Can't get the value of dom_io domain_id.\n");
6353
info->domain_list[num_domain].domain_addr = domain;
6354
info->domain_list[num_domain].domain_id = domain_id;
6357
* Get architecture specific data
6359
if (!get_xen_info_arch())
6371
* Show data for debug
6374
MSG("SYMBOL(dom_xen): %llx\n", SYMBOL(dom_xen));
6375
MSG("SYMBOL(dom_io): %llx\n", SYMBOL(dom_io));
6376
MSG("SYMBOL(domain_list): %llx\n", SYMBOL(domain_list));
6377
MSG("SYMBOL(xen_heap_start): %llx\n", SYMBOL(xen_heap_start));
6378
MSG("SYMBOL(frame_table): %llx\n", SYMBOL(frame_table));
6379
MSG("SYMBOL(alloc_bitmap): %llx\n", SYMBOL(alloc_bitmap));
6380
MSG("SYMBOL(max_page): %llx\n", SYMBOL(max_page));
6381
MSG("SYMBOL(pgd_l2): %llx\n", SYMBOL(pgd_l2));
6382
MSG("SYMBOL(pgd_l3): %llx\n", SYMBOL(pgd_l3));
6383
MSG("SYMBOL(pgd_l4): %llx\n", SYMBOL(pgd_l4));
6384
MSG("SYMBOL(xenheap_phys_end): %llx\n", SYMBOL(xenheap_phys_end));
6385
MSG("SYMBOL(xen_pstart): %llx\n", SYMBOL(xen_pstart));
6386
MSG("SYMBOL(frametable_pg_dir): %llx\n", SYMBOL(frametable_pg_dir));
6388
MSG("SIZE(page_info): %ld\n", SIZE(page_info));
6389
MSG("OFFSET(page_info.count_info): %ld\n", OFFSET(page_info.count_info));
6390
MSG("OFFSET(page_info._domain): %ld\n", OFFSET(page_info._domain));
6391
MSG("SIZE(domain): %ld\n", SIZE(domain));
6392
MSG("OFFSET(domain.domain_id): %ld\n", OFFSET(domain.domain_id));
6393
MSG("OFFSET(domain.next_in_list): %ld\n", OFFSET(domain.next_in_list));
6396
MSG("xen_phys_start: %lx\n", info->xen_phys_start);
6397
MSG("frame_table_vaddr: %lx\n", info->frame_table_vaddr);
6398
MSG("xen_heap_start: %lx\n", info->xen_heap_start);
6399
MSG("xen_heap_end:%lx\n", info->xen_heap_end);
6400
MSG("alloc_bitmap: %lx\n", info->alloc_bitmap);
6401
MSG("max_page: %lx\n", info->max_page);
6402
MSG("num_domain: %d\n", info->num_domain);
6403
for (i = 0; i < info->num_domain; i++) {
6404
MSG(" %u: %x: %lx\n", info->domain_list[i].domain_id,
6405
info->domain_list[i].pickled_id,
6406
info->domain_list[i].domain_addr);
6411
generate_vmcoreinfo_xen(void)
6413
if ((info->page_size = sysconf(_SC_PAGE_SIZE)) <= 0) {
6414
ERRMSG("Can't get the size of page.\n");
6417
dwarf_info.fd_debuginfo = info->fd_xen_syms;
6418
dwarf_info.name_debuginfo = info->name_xen_syms;
6420
if (!get_symbol_info_xen())
6423
if (!get_structure_info_xen())
6427
* write 1st kernel's PAGESIZE
6429
fprintf(info->file_vmcoreinfo, "%s%ld\n", STR_PAGESIZE,
6433
* write the symbol of 1st kernel
6435
WRITE_SYMBOL("dom_xen", dom_xen);
6436
WRITE_SYMBOL("dom_io", dom_io);
6437
WRITE_SYMBOL("domain_list", domain_list);
6438
WRITE_SYMBOL("xen_heap_start", xen_heap_start);
6439
WRITE_SYMBOL("frame_table", frame_table);
6440
WRITE_SYMBOL("alloc_bitmap", alloc_bitmap);
6441
WRITE_SYMBOL("max_page", max_page);
6442
WRITE_SYMBOL("pgd_l2", pgd_l2);
6443
WRITE_SYMBOL("pgd_l3", pgd_l3);
6444
WRITE_SYMBOL("pgd_l4", pgd_l4);
6445
WRITE_SYMBOL("xenheap_phys_end", xenheap_phys_end);
6446
WRITE_SYMBOL("xen_pstart", xen_pstart);
6447
WRITE_SYMBOL("frametable_pg_dir", frametable_pg_dir);
6450
* write the structure size of 1st kernel
6452
WRITE_STRUCTURE_SIZE("page_info", page_info);
6453
WRITE_STRUCTURE_SIZE("domain", domain);
6456
* write the member offset of 1st kernel
6458
WRITE_MEMBER_OFFSET("page_info.count_info", page_info.count_info);
6459
WRITE_MEMBER_OFFSET("page_info._domain", page_info._domain);
6460
WRITE_MEMBER_OFFSET("domain.domain_id", domain.domain_id);
6461
WRITE_MEMBER_OFFSET("domain.next_in_list", domain.next_in_list);
6467
read_vmcoreinfo_basic_info_xen(void)
6469
long page_size = FALSE;
6470
char buf[BUFSIZE_FGETS], *endp;
6473
if (fseek(info->file_vmcoreinfo, 0, SEEK_SET) < 0) {
6474
ERRMSG("Can't seek the vmcoreinfo file(%s). %s\n",
6475
info->name_vmcoreinfo, strerror(errno));
6479
while (fgets(buf, BUFSIZE_FGETS, info->file_vmcoreinfo)) {
6483
if (buf[i - 1] == '\n')
6485
if (strncmp(buf, STR_PAGESIZE, strlen(STR_PAGESIZE)) == 0) {
6486
page_size = strtol(buf+strlen(STR_PAGESIZE),&endp,10);
6487
if ((!page_size || page_size == LONG_MAX)
6488
|| strlen(endp) != 0) {
6489
ERRMSG("Invalid data in %s: %s",
6490
info->name_vmcoreinfo, buf);
6493
if (!set_page_size(page_size)) {
6494
ERRMSG("Invalid data in %s: %s",
6495
info->name_vmcoreinfo, buf);
6501
if (!info->page_size) {
6502
ERRMSG("Invalid format in %s", info->name_vmcoreinfo);
6509
read_vmcoreinfo_xen(void)
6511
if (!read_vmcoreinfo_basic_info_xen())
6514
READ_SYMBOL("dom_xen", dom_xen);
6515
READ_SYMBOL("dom_io", dom_io);
6516
READ_SYMBOL("domain_list", domain_list);
6517
READ_SYMBOL("xen_heap_start", xen_heap_start);
6518
READ_SYMBOL("frame_table", frame_table);
6519
READ_SYMBOL("alloc_bitmap", alloc_bitmap);
6520
READ_SYMBOL("max_page", max_page);
6521
READ_SYMBOL("pgd_l2", pgd_l2);
6522
READ_SYMBOL("pgd_l3", pgd_l3);
6523
READ_SYMBOL("pgd_l4", pgd_l4);
6524
READ_SYMBOL("xenheap_phys_end", xenheap_phys_end);
6525
READ_SYMBOL("xen_pstart", xen_pstart);
6526
READ_SYMBOL("frametable_pg_dir", frametable_pg_dir);
6528
READ_STRUCTURE_SIZE("page_info", page_info);
6529
READ_STRUCTURE_SIZE("domain", domain);
6531
READ_MEMBER_OFFSET("page_info.count_info", page_info.count_info);
6532
READ_MEMBER_OFFSET("page_info._domain", page_info._domain);
6533
READ_MEMBER_OFFSET("domain.domain_id", domain.domain_id);
6534
READ_MEMBER_OFFSET("domain.next_in_list", domain.next_in_list);
6540
allocated_in_map(unsigned long long pfn)
6542
static unsigned long long cur_idx = -1;
6543
static unsigned long cur_word;
6544
unsigned long long idx;
6546
idx = pfn / PAGES_PER_MAPWORD;
6547
if (idx != cur_idx) {
6548
if (!readmem(VADDR_XEN,
6549
info->alloc_bitmap + idx * sizeof(unsigned long),
6550
&cur_word, sizeof(cur_word))) {
6551
ERRMSG("Can't access alloc_bitmap.\n");
6557
return !!(cur_word & (1UL << (pfn & (PAGES_PER_MAPWORD - 1))));
6561
is_select_domain(unsigned int id)
6565
/* selected domain is fix to dom0 only now !!
6566
(yes... domain_list is not necessary right now,
6567
it can get from "dom0" directly) */
6569
for (i = 0; i < info->num_domain; i++) {
6570
if (info->domain_list[i].domain_id == 0 &&
6571
info->domain_list[i].pickled_id == id)
6579
exclude_xen_user_domain(void)
6582
unsigned int count_info, _domain;
6583
unsigned long page_info_addr;
6584
unsigned long long pfn, pfn_end;
6585
unsigned long long j, size;
6586
struct pt_load_segment *pls;
6589
* NOTE: the first half of bitmap is not used for Xen extraction
6591
for (i = 0; i < info->num_load_memory; i++) {
6593
print_progress(PROGRESS_XEN_DOMAIN, i, info->num_load_memory);
6595
pls = &info->pt_load_segments[i];
6596
pfn = paddr_to_pfn(pls->phys_start);
6597
pfn_end = paddr_to_pfn(pls->phys_end);
6598
size = pfn_end - pfn;
6600
for (j = 0; pfn < pfn_end; pfn++, j++) {
6601
print_progress(PROGRESS_XEN_DOMAIN, j + (size * i),
6602
size * info->num_load_memory);
6604
if (!allocated_in_map(pfn)) {
6605
clear_bit_on_2nd_bitmap(pfn);
6609
page_info_addr = info->frame_table_vaddr + pfn * SIZE(page_info);
6610
if (!readmem(VADDR_XEN,
6611
page_info_addr + OFFSET(page_info.count_info),
6612
&count_info, sizeof(count_info))) {
6613
clear_bit_on_2nd_bitmap(pfn);
6614
continue; /* page_info may not exist */
6616
if (!readmem(VADDR_XEN,
6617
page_info_addr + OFFSET(page_info._domain),
6618
&_domain, sizeof(_domain))) {
6619
ERRMSG("Can't get page_info._domain.\n");
6624
* - anonymous (_domain == 0), or
6625
* - xen heap area, or
6626
* - selected domain page
6630
if (info->xen_heap_start <= pfn && pfn < info->xen_heap_end)
6632
if ((count_info & 0xffff) && is_select_domain(_domain))
6634
clear_bit_on_2nd_bitmap(pfn);
6641
print_progress(PROGRESS_XEN_DOMAIN, info->num_load_memory, info->num_load_memory);
6651
MSG("ppc64 xen is not supported.\n");
6654
if(!info->flag_elf_dumpfile) {
6655
MSG("Specify '-E' option for Xen.\n");
6656
MSG("Commandline parameter is invalid.\n");
6657
MSG("Try `makedumpfile --help' for more information.\n");
6661
if (DL_EXCLUDE_ZERO < info->max_dump_level) {
6662
MSG("Dump_level is invalid. It should be 0 or 1.\n");
6663
MSG("Commandline parameter is invalid.\n");
6664
MSG("Try `makedumpfile --help' for more information.\n");
6668
if (!fallback_to_current_page_size())
6671
* Get the debug information for analysis from the vmcoreinfo file
6673
if (info->flag_read_vmcoreinfo) {
6674
if (!read_vmcoreinfo_xen())
6678
* Get the debug information for analysis from the xen-syms file
6680
} else if (info->name_xen_syms) {
6681
dwarf_info.fd_debuginfo = info->fd_xen_syms;
6682
dwarf_info.name_debuginfo = info->name_xen_syms;
6684
if (!get_symbol_info_xen())
6686
if (!get_structure_info_xen())
6689
* Get the debug information for analysis from /proc/vmcore
6693
* Check whether /proc/vmcore contains vmcoreinfo,
6694
* and get both the offset and the size.
6696
if (!info->offset_vmcoreinfo_xen || !info->size_vmcoreinfo_xen){
6697
if (!info->flag_exclude_xen_dom)
6700
MSG("%s doesn't contain a vmcoreinfo for Xen.\n",
6702
MSG("Specify '--xen-syms' option or '--xen-vmcoreinfo' option.\n");
6703
MSG("Commandline parameter is invalid.\n");
6704
MSG("Try `makedumpfile --help' for more information.\n");
6708
* Get the debug information from /proc/vmcore
6710
if (!read_vmcoreinfo_from_vmcore(info->offset_vmcoreinfo_xen,
6711
info->size_vmcoreinfo_xen, TRUE))
6714
if (!get_xen_phys_start())
6716
if (!get_xen_info())
6719
if (message_level & ML_PRINT_DEBUG_MSG)
6722
if (!get_max_mapnr())
6732
unsigned long long paddr;
6734
if (!info->vaddr_for_vtop)
6738
MSG("Translating virtual address %lx to physical address.\n", info->vaddr_for_vtop);
6740
paddr = vaddr_to_paddr(info->vaddr_for_vtop);
6742
MSG("VIRTUAL PHYSICAL\n");
6743
MSG("%16lx %llx\n", info->vaddr_for_vtop, paddr);
6746
info->vaddr_for_vtop = 0;
6754
unsigned long long pfn_original, pfn_excluded, shrinking;
6757
* /proc/vmcore doesn't contain the memory hole area.
6759
pfn_original = info->max_mapnr - pfn_memhole;
6761
pfn_excluded = pfn_zero + pfn_cache + pfn_cache_private
6762
+ pfn_user + pfn_free;
6763
shrinking = (pfn_original - pfn_excluded) * 100;
6764
shrinking = shrinking / pfn_original;
6766
REPORT_MSG("Original pages : 0x%016llx\n", pfn_original);
6767
REPORT_MSG(" Excluded pages : 0x%016llx\n", pfn_excluded);
6768
REPORT_MSG(" Pages filled with zero : 0x%016llx\n", pfn_zero);
6769
REPORT_MSG(" Cache pages : 0x%016llx\n", pfn_cache);
6770
REPORT_MSG(" Cache pages + private : 0x%016llx\n",
6772
REPORT_MSG(" User process data pages : 0x%016llx\n", pfn_user);
6773
REPORT_MSG(" Free pages : 0x%016llx\n", pfn_free);
6774
REPORT_MSG(" Remaining pages : 0x%016llx\n",
6775
pfn_original - pfn_excluded);
6776
REPORT_MSG(" (The number of pages is reduced to %lld%%.)\n",
6778
REPORT_MSG("Memory Hole : 0x%016llx\n", pfn_memhole);
6779
REPORT_MSG("--------------------------------------------------\n");
6780
REPORT_MSG("Total pages : 0x%016llx\n", info->max_mapnr);
6785
writeout_dumpfile(void)
6788
struct cache_data cd_header, cd_page;
6790
info->flag_nospace = FALSE;
6792
if (!open_dump_file())
6795
if (info->flag_flatten) {
6796
if (!write_start_flat_header())
6799
if (!prepare_cache_data(&cd_header))
6802
if (!prepare_cache_data(&cd_page)) {
6803
free_cache_data(&cd_header);
6806
if (info->flag_elf_dumpfile) {
6807
if (!write_elf_header(&cd_header))
6809
if (!write_elf_pages(&cd_header, &cd_page))
6812
if (!write_kdump_header())
6814
if (!write_kdump_pages(&cd_header, &cd_page))
6816
if (!write_kdump_bitmap())
6819
if (info->flag_flatten) {
6820
if (!write_end_flat_header())
6826
free_cache_data(&cd_header);
6827
free_cache_data(&cd_page);
6831
if ((ret == FALSE) && info->flag_nospace)
6838
setup_splitting(void)
6841
unsigned long long j, pfn_per_dumpfile;
6842
unsigned long long start_pfn, end_pfn;
6843
unsigned long long num_dumpable = get_num_dumpable();
6844
struct dump_bitmap bitmap2;
6846
if (info->num_dumpfile <= 1)
6849
initialize_2nd_bitmap(&bitmap2);
6851
pfn_per_dumpfile = num_dumpable / info->num_dumpfile;
6852
start_pfn = end_pfn = 0;
6853
for (i = 0; i < info->num_dumpfile; i++) {
6854
start_pfn = end_pfn;
6855
if (i == (info->num_dumpfile - 1)) {
6856
end_pfn = info->max_mapnr;
6858
for (j = 0; j < pfn_per_dumpfile; end_pfn++) {
6859
if (is_dumpable(&bitmap2, end_pfn))
6863
SPLITTING_START_PFN(i) = start_pfn;
6864
SPLITTING_END_PFN(i) = end_pfn;
6871
* This function is for creating split dumpfiles by multiple
6872
* processes. Each child process should re-open a /proc/vmcore
6873
* file, because it prevents each other from affectting the file
6874
* offset due to read(2) call.
6877
reopen_dump_memory()
6879
close_dump_memory();
6881
if ((info->fd_memory = open(info->name_memory, O_RDONLY)) < 0) {
6882
ERRMSG("Can't open the dump memory(%s). %s\n",
6883
info->name_memory, strerror(errno));
6890
get_next_dump_level(int index)
6892
if (info->num_dump_level <= index)
6895
return info->array_dump_level[index];
6899
delete_dumpfile(void)
6903
if (info->flag_flatten)
6906
if (info->flag_split) {
6907
for (i = 0; i < info->num_dumpfile; i++)
6908
unlink(SPLITTING_DUMPFILE(i));
6910
unlink(info->name_dumpfile);
6916
writeout_multiple_dumpfiles(void)
6918
int i, status, ret = TRUE;
6920
pid_t array_pid[info->num_dumpfile];
6922
if (!setup_splitting())
6925
for (i = 0; i < info->num_dumpfile; i++) {
6926
if ((pid = fork()) < 0) {
6929
} else if (pid == 0) { /* Child */
6930
info->name_dumpfile = SPLITTING_DUMPFILE(i);
6931
info->fd_bitmap = SPLITTING_FD_BITMAP(i);
6932
info->split_start_pfn = SPLITTING_START_PFN(i);
6933
info->split_end_pfn = SPLITTING_END_PFN(i);
6935
if (!reopen_dump_memory())
6937
if ((status = writeout_dumpfile()) == FALSE)
6939
else if (status == NOSPACE)
6945
for (i = 0; i < info->num_dumpfile; i++) {
6946
waitpid(array_pid[i], &status, WUNTRACED);
6947
if (!WIFEXITED(status) || WEXITSTATUS(status) == 1) {
6948
ERRMSG("Child process(%d) finished imcompletely.(%d)\n",
6949
array_pid[i], status);
6951
} else if ((ret == TRUE) && (WEXITSTATUS(status) == 2))
6958
create_dumpfile(void)
6960
int num_retry, status, new_level;
6962
if (!open_files_for_creating_dumpfile())
6965
if (!info->flag_refiltering) {
6966
if (!get_elf_info())
6969
if (vt.mem_flags & MEMORY_XEN) {
6980
if (info->flag_refiltering) {
6981
/* Change dump level */
6982
new_level = info->dump_level | info->kh_memory->dump_level;
6983
if (new_level != info->dump_level) {
6984
info->dump_level = new_level;
6985
MSG("dump_level is changed to %d, " \
6986
"because %s was created by dump_level(%d).",
6987
new_level, info->name_memory,
6988
info->kh_memory->dump_level);
6992
if (!create_dump_bitmap())
6995
if (info->flag_split) {
6996
if ((status = writeout_multiple_dumpfiles()) == FALSE)
6999
if ((status = writeout_dumpfile()) == FALSE)
7002
if (status == NOSPACE) {
7004
* If specifying the other dump_level, makedumpfile tries
7005
* to create a dumpfile with it again.
7008
if ((info->dump_level = get_next_dump_level(num_retry)) < 0)
7010
MSG("Retry to create a dumpfile by dump_level(%d).\n",
7012
if (!delete_dumpfile())
7018
if (!close_files_for_creating_dumpfile())
7025
__read_disk_dump_header(struct disk_dump_header *dh, char *filename)
7027
int fd, ret = FALSE;
7029
if ((fd = open(filename, O_RDONLY)) < 0) {
7030
ERRMSG("Can't open a file(%s). %s\n",
7031
filename, strerror(errno));
7034
if (lseek(fd, 0x0, SEEK_SET) < 0) {
7035
ERRMSG("Can't seek a file(%s). %s\n",
7036
filename, strerror(errno));
7039
if (read(fd, dh, sizeof(struct disk_dump_header))
7040
!= sizeof(struct disk_dump_header)) {
7041
ERRMSG("Can't read a file(%s). %s\n",
7042
filename, strerror(errno));
7053
read_disk_dump_header(struct disk_dump_header *dh, char *filename)
7055
if (!__read_disk_dump_header(dh, filename))
7058
if (strncmp(dh->signature, KDUMP_SIGNATURE, strlen(KDUMP_SIGNATURE))) {
7059
ERRMSG("%s is not the kdump-compressed format.\n",
7067
read_kdump_sub_header(struct kdump_sub_header *kh, char *filename)
7069
int fd, ret = FALSE;
7070
struct disk_dump_header dh;
7073
if (!read_disk_dump_header(&dh, filename))
7076
offset = DISKDUMP_HEADER_BLOCKS * dh.block_size;
7078
if ((fd = open(filename, O_RDONLY)) < 0) {
7079
ERRMSG("Can't open a file(%s). %s\n",
7080
filename, strerror(errno));
7083
if (lseek(fd, offset, SEEK_SET) < 0) {
7084
ERRMSG("Can't seek a file(%s). %s\n",
7085
filename, strerror(errno));
7088
if (read(fd, kh, sizeof(struct kdump_sub_header))
7089
!= sizeof(struct kdump_sub_header)) {
7090
ERRMSG("Can't read a file(%s). %s\n",
7091
filename, strerror(errno));
7102
store_splitting_info(void)
7105
struct disk_dump_header dh, tmp_dh;
7106
struct kdump_sub_header kh;
7108
for (i = 0; i < info->num_dumpfile; i++) {
7109
if (!read_disk_dump_header(&tmp_dh, SPLITTING_DUMPFILE(i)))
7113
memcpy(&dh, &tmp_dh, sizeof(tmp_dh));
7114
info->max_mapnr = dh.max_mapnr;
7115
if (!set_page_size(dh.block_size))
7117
DEBUG_MSG("max_mapnr : %llx\n", info->max_mapnr);
7118
DEBUG_MSG("page_size : %ld\n", info->page_size);
7122
* Check whether multiple dumpfiles are parts of
7123
* the same /proc/vmcore.
7125
if (memcmp(&dh, &tmp_dh, sizeof(tmp_dh))) {
7126
ERRMSG("Invalid dumpfile(%s).\n",
7127
SPLITTING_DUMPFILE(i));
7130
if (!read_kdump_sub_header(&kh, SPLITTING_DUMPFILE(i)))
7134
info->dump_level = kh.dump_level;
7135
DEBUG_MSG("dump_level : %d\n", info->dump_level);
7137
SPLITTING_START_PFN(i) = kh.start_pfn;
7138
SPLITTING_END_PFN(i) = kh.end_pfn;
7144
sort_splitting_info(void)
7147
unsigned long long start_pfn, end_pfn;
7148
char *name_dumpfile;
7151
* Sort splitting_info by start_pfn.
7153
for (i = 0; i < (info->num_dumpfile - 1); i++) {
7154
for (j = i; j < info->num_dumpfile; j++) {
7155
if (SPLITTING_START_PFN(i) < SPLITTING_START_PFN(j))
7157
start_pfn = SPLITTING_START_PFN(i);
7158
end_pfn = SPLITTING_END_PFN(i);
7159
name_dumpfile = SPLITTING_DUMPFILE(i);
7161
SPLITTING_START_PFN(i) = SPLITTING_START_PFN(j);
7162
SPLITTING_END_PFN(i) = SPLITTING_END_PFN(j);
7163
SPLITTING_DUMPFILE(i) = SPLITTING_DUMPFILE(j);
7165
SPLITTING_START_PFN(j) = start_pfn;
7166
SPLITTING_END_PFN(j) = end_pfn;
7167
SPLITTING_DUMPFILE(j) = name_dumpfile;
7171
DEBUG_MSG("num_dumpfile : %d\n", info->num_dumpfile);
7172
for (i = 0; i < info->num_dumpfile; i++) {
7173
DEBUG_MSG("dumpfile (%s)\n", SPLITTING_DUMPFILE(i));
7174
DEBUG_MSG(" start_pfn : %llx\n", SPLITTING_START_PFN(i));
7175
DEBUG_MSG(" end_pfn : %llx\n", SPLITTING_END_PFN(i));
7180
check_splitting_info(void)
7183
unsigned long long end_pfn;
7186
* Check whether there are not lack of /proc/vmcore.
7188
if (SPLITTING_START_PFN(0) != 0) {
7189
ERRMSG("There is not dumpfile corresponding to pfn 0x%x - 0x%llx.\n",
7190
0x0, SPLITTING_START_PFN(0));
7193
end_pfn = SPLITTING_END_PFN(0);
7195
for (i = 1; i < info->num_dumpfile; i++) {
7196
if (end_pfn != SPLITTING_START_PFN(i)) {
7197
ERRMSG("There is not dumpfile corresponding to pfn 0x%llx - 0x%llx.\n",
7198
end_pfn, SPLITTING_START_PFN(i));
7201
end_pfn = SPLITTING_END_PFN(i);
7203
if (end_pfn != info->max_mapnr) {
7204
ERRMSG("There is not dumpfile corresponding to pfn 0x%llx - 0x%llx.\n",
7205
end_pfn, info->max_mapnr);
7213
get_splitting_info(void)
7215
if (!store_splitting_info())
7218
sort_splitting_info();
7220
if (!check_splitting_info())
7227
reassemble_kdump_header(void)
7229
int fd, ret = FALSE;
7230
off_t offset_bitmap;
7231
struct disk_dump_header dh;
7232
struct kdump_sub_header kh;
7236
* Write common header.
7238
if (!read_disk_dump_header(&dh, SPLITTING_DUMPFILE(0)))
7241
if (lseek(info->fd_dumpfile, 0x0, SEEK_SET) < 0) {
7242
ERRMSG("Can't seek a file(%s). %s\n",
7243
info->name_dumpfile, strerror(errno));
7246
if (write(info->fd_dumpfile, &dh, sizeof(dh)) != sizeof(dh)) {
7247
ERRMSG("Can't write a file(%s). %s\n",
7248
info->name_dumpfile, strerror(errno));
7255
if (!read_kdump_sub_header(&kh, SPLITTING_DUMPFILE(0)))
7262
if (lseek(info->fd_dumpfile, info->page_size, SEEK_SET) < 0) {
7263
ERRMSG("Can't seek a file(%s). %s\n",
7264
info->name_dumpfile, strerror(errno));
7267
if (write(info->fd_dumpfile, &kh, sizeof(kh)) != sizeof(kh)) {
7268
ERRMSG("Can't write a file(%s). %s\n",
7269
info->name_dumpfile, strerror(errno));
7274
* Write dump bitmap to both a dumpfile and a bitmap file.
7277
= (DISKDUMP_HEADER_BLOCKS + dh.sub_hdr_size) * dh.block_size;
7278
info->len_bitmap = dh.bitmap_blocks * dh.block_size;
7279
if ((buf_bitmap = malloc(info->len_bitmap)) == NULL) {
7280
ERRMSG("Can't allcate memory for bitmap.\n");
7284
if ((fd = open(SPLITTING_DUMPFILE(0), O_RDONLY)) < 0) {
7285
ERRMSG("Can't open a file(%s). %s\n",
7286
SPLITTING_DUMPFILE(0), strerror(errno));
7290
if (lseek(fd, offset_bitmap, SEEK_SET) < 0) {
7291
ERRMSG("Can't seek a file(%s). %s\n",
7292
SPLITTING_DUMPFILE(0), strerror(errno));
7295
if (read(fd, buf_bitmap, info->len_bitmap) != info->len_bitmap) {
7296
ERRMSG("Can't read a file(%s). %s\n",
7297
SPLITTING_DUMPFILE(0), strerror(errno));
7301
if (lseek(info->fd_dumpfile, offset_bitmap, SEEK_SET) < 0) {
7302
ERRMSG("Can't seek a file(%s). %s\n",
7303
info->name_dumpfile, strerror(errno));
7306
if (write(info->fd_dumpfile, buf_bitmap, info->len_bitmap)
7307
!= info->len_bitmap) {
7308
ERRMSG("Can't write a file(%s). %s\n",
7309
info->name_dumpfile, strerror(errno));
7313
if (lseek(info->fd_bitmap, 0x0, SEEK_SET) < 0) {
7314
ERRMSG("Can't seek a file(%s). %s\n",
7315
info->name_bitmap, strerror(errno));
7318
if (write(info->fd_bitmap, buf_bitmap, info->len_bitmap)
7319
!= info->len_bitmap) {
7320
ERRMSG("Can't write a file(%s). %s\n",
7321
info->name_bitmap, strerror(errno));
7333
reassemble_kdump_pages(void)
7335
int i, fd = 0, ret = FALSE;
7336
off_t offset_first_ph, offset_ph_org;
7337
off_t offset_data_new, offset_zero_page = 0;
7338
unsigned long long pfn, start_pfn, end_pfn;
7339
unsigned long long num_dumpable, num_dumped;
7340
struct dump_bitmap bitmap2;
7341
struct disk_dump_header dh;
7342
struct page_desc pd, pd_zero;
7343
struct cache_data cd_pd, cd_data;
7346
initialize_2nd_bitmap(&bitmap2);
7348
if (!read_disk_dump_header(&dh, SPLITTING_DUMPFILE(0)))
7351
if (!prepare_cache_data(&cd_pd))
7354
if (!prepare_cache_data(&cd_data)) {
7355
free_cache_data(&cd_pd);
7358
if ((data = malloc(info->page_size)) == NULL) {
7359
ERRMSG("Can't allcate memory for page data.\n");
7360
free_cache_data(&cd_pd);
7361
free_cache_data(&cd_data);
7364
num_dumpable = get_num_dumpable();
7368
= (DISKDUMP_HEADER_BLOCKS + dh.sub_hdr_size + dh.bitmap_blocks)
7370
cd_pd.offset = offset_first_ph;
7371
offset_data_new = offset_first_ph + sizeof(page_desc_t) * num_dumpable;
7372
cd_data.offset = offset_data_new;
7375
* Write page header of zero-filled page.
7377
if (info->dump_level & DL_EXCLUDE_ZERO) {
7379
* makedumpfile outputs the data of zero-filled page at first
7380
* if excluding zero-filled page, so the offset of first data
7381
* is for zero-filled page in all dumpfiles.
7383
offset_zero_page = offset_data_new;
7385
pd_zero.size = info->page_size;
7387
pd_zero.offset = offset_data_new;
7388
pd_zero.page_flags = 0;
7389
memset(data, 0, pd_zero.size);
7390
if (!write_cache(&cd_data, data, pd_zero.size))
7392
offset_data_new += pd_zero.size;
7394
for (i = 0; i < info->num_dumpfile; i++) {
7395
if ((fd = open(SPLITTING_DUMPFILE(i), O_RDONLY)) < 0) {
7396
ERRMSG("Can't open a file(%s). %s\n",
7397
SPLITTING_DUMPFILE(i), strerror(errno));
7400
start_pfn = SPLITTING_START_PFN(i);
7401
end_pfn = SPLITTING_END_PFN(i);
7403
offset_ph_org = offset_first_ph;
7404
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
7405
if (!is_dumpable(&bitmap2, pfn))
7410
print_progress(PROGRESS_COPY, num_dumped, num_dumpable);
7412
if (lseek(fd, offset_ph_org, SEEK_SET) < 0) {
7413
ERRMSG("Can't seek a file(%s). %s\n",
7414
SPLITTING_DUMPFILE(i), strerror(errno));
7417
if (read(fd, &pd, sizeof(pd)) != sizeof(pd)) {
7418
ERRMSG("Can't read a file(%s). %s\n",
7419
SPLITTING_DUMPFILE(i), strerror(errno));
7422
if (lseek(fd, pd.offset, SEEK_SET) < 0) {
7423
ERRMSG("Can't seek a file(%s). %s\n",
7424
SPLITTING_DUMPFILE(i), strerror(errno));
7427
if (read(fd, data, pd.size) != pd.size) {
7428
ERRMSG("Can't read a file(%s). %s\n",
7429
SPLITTING_DUMPFILE(i), strerror(errno));
7432
if ((info->dump_level & DL_EXCLUDE_ZERO)
7433
&& (pd.offset == offset_zero_page)) {
7435
* Handle the data of zero-filled page.
7437
if (!write_cache(&cd_pd, &pd_zero,
7440
offset_ph_org += sizeof(pd);
7443
pd.offset = offset_data_new;
7444
if (!write_cache(&cd_pd, &pd, sizeof(pd)))
7446
offset_ph_org += sizeof(pd);
7448
if (!write_cache(&cd_data, data, pd.size))
7451
offset_data_new += pd.size;
7456
if (!write_cache_bufsz(&cd_pd))
7458
if (!write_cache_bufsz(&cd_data))
7461
print_progress(PROGRESS_COPY, num_dumpable, num_dumpable);
7464
free_cache_data(&cd_pd);
7465
free_cache_data(&cd_data);
7476
reassemble_dumpfile(void)
7478
if (!get_splitting_info())
7481
if (!open_dump_bitmap())
7484
if (!open_dump_file())
7487
if (!reassemble_kdump_header())
7490
if (!reassemble_kdump_pages())
7494
close_dump_bitmap();
7500
check_param_for_generating_vmcoreinfo(int argc, char *argv[])
7505
if (info->flag_compress || info->dump_level
7506
|| info->flag_elf_dumpfile || info->flag_read_vmcoreinfo
7507
|| info->flag_flatten || info->flag_rearrange
7508
|| info->flag_exclude_xen_dom
7509
|| (!info->name_vmlinux && !info->name_xen_syms))
7517
* Parameters for creating dumpfile from the dump data
7518
* of flattened format by rearranging the dump data.
7521
check_param_for_rearranging_dumpdata(int argc, char *argv[])
7523
if (argc != optind + 1)
7526
if (info->flag_compress || info->dump_level
7527
|| info->flag_elf_dumpfile || info->flag_read_vmcoreinfo
7528
|| info->name_vmlinux || info->name_xen_syms
7529
|| info->flag_flatten || info->flag_generate_vmcoreinfo
7530
|| info->flag_exclude_xen_dom)
7533
info->name_dumpfile = argv[optind];
7538
* Parameters for reassembling multiple dumpfiles into one dumpfile.
7541
check_param_for_reassembling_dumpfile(int argc, char *argv[])
7545
info->num_dumpfile = argc - optind - 1;
7546
info->name_dumpfile = argv[argc - 1];
7548
DEBUG_MSG("num_dumpfile : %d\n", info->num_dumpfile);
7550
if (info->flag_compress || info->dump_level
7551
|| info->flag_elf_dumpfile || info->flag_read_vmcoreinfo
7552
|| info->name_vmlinux || info->name_xen_syms
7553
|| info->flag_flatten || info->flag_generate_vmcoreinfo
7554
|| info->flag_exclude_xen_dom || info->flag_split)
7557
if ((info->splitting_info
7558
= malloc(sizeof(splitting_info_t) * info->num_dumpfile))
7560
MSG("Can't allocate memory for splitting_info.\n");
7563
for (i = 0; i < info->num_dumpfile; i++)
7564
SPLITTING_DUMPFILE(i) = argv[optind + i];
7570
* Check parameters to create the dump file.
7573
check_param_for_creating_dumpfile(int argc, char *argv[])
7577
if (info->flag_generate_vmcoreinfo || info->flag_rearrange)
7580
if ((message_level < MIN_MSG_LEVEL)
7581
|| (MAX_MSG_LEVEL < message_level)) {
7582
message_level = DEFAULT_MSG_LEVEL;
7583
MSG("Message_level is invalid.\n");
7586
if ((info->flag_compress && info->flag_elf_dumpfile)
7587
|| (info->flag_read_vmcoreinfo && info->name_vmlinux)
7588
|| (info->flag_read_vmcoreinfo && info->name_xen_syms))
7591
if (info->flag_flatten && info->flag_split)
7594
if ((argc == optind + 2) && !info->flag_flatten
7595
&& !info->flag_split) {
7597
* Parameters for creating the dumpfile from vmcore.
7599
info->name_memory = argv[optind];
7600
info->name_dumpfile = argv[optind+1];
7602
} else if ((argc > optind + 2) && info->flag_split) {
7604
* Parameters for creating multiple dumpfiles from vmcore.
7606
info->num_dumpfile = argc - optind - 1;
7607
info->name_memory = argv[optind];
7609
if (info->flag_elf_dumpfile) {
7610
MSG("Options for splitting dumpfile cannot be used with Elf format.\n");
7613
if ((info->splitting_info
7614
= malloc(sizeof(splitting_info_t) * info->num_dumpfile))
7616
MSG("Can't allocate memory for splitting_info.\n");
7619
for (i = 0; i < info->num_dumpfile; i++)
7620
SPLITTING_DUMPFILE(i) = argv[optind + 1 + i];
7622
} else if ((argc == optind + 1) && info->flag_flatten) {
7624
* Parameters for outputting the dump data of the
7625
* flattened format to STDOUT.
7627
info->name_memory = argv[optind];
7636
parse_dump_level(char *str_dump_level)
7641
if (!(buf = strdup(str_dump_level))) {
7642
MSG("Can't duplicate strings(%s).\n", str_dump_level);
7645
info->max_dump_level = 0;
7646
info->num_dump_level = 0;
7649
ptr = strtok(ptr, ",");
7654
if ((i < MIN_DUMP_LEVEL) || (MAX_DUMP_LEVEL < i)) {
7655
MSG("Dump_level(%d) is invalid.\n", i);
7658
if (NUM_ARRAY_DUMP_LEVEL <= info->num_dump_level) {
7659
MSG("Dump_level is invalid.\n");
7662
if (info->max_dump_level < i)
7663
info->max_dump_level = i;
7664
if (info->num_dump_level == 0)
7665
info->dump_level = i;
7666
info->array_dump_level[info->num_dump_level] = i;
7667
info->num_dump_level++;
7677
static struct option longopts[] = {
7678
{"split", no_argument, NULL, 's'},
7679
{"reassemble", no_argument, NULL, 'r'},
7680
{"xen-syms", required_argument, NULL, 'y'},
7681
{"xen-vmcoreinfo", required_argument, NULL, 'z'},
7682
{"xen_phys_start", required_argument, NULL, 'P'},
7683
{"message-level", required_argument, NULL, 'm'},
7684
{"vtop", required_argument, NULL, 'V'},
7685
{"dump-dmesg", no_argument, NULL, 'M'},
7686
{"help", no_argument, NULL, 'h'},
7691
main(int argc, char *argv[])
7693
int i, opt, flag_debug = FALSE;
7695
if ((info = calloc(1, sizeof(struct DumpInfo))) == NULL) {
7696
ERRMSG("Can't allocate memory for the pagedesc cache. %s.\n",
7700
if ((info->dump_header = calloc(1, sizeof(struct disk_dump_header)))
7702
ERRMSG("Can't allocate memory for the dump header. %s\n",
7706
initialize_tables();
7708
info->block_order = DEFAULT_ORDER;
7709
message_level = DEFAULT_MSG_LEVEL;
7710
while ((opt = getopt_long(argc, argv, "b:cDd:EFfg:hi:MRrsVvXx:", longopts,
7714
info->block_order = atoi(optarg);
7717
info->flag_compress = 1;
7723
if (!parse_dump_level(optarg))
7727
info->flag_elf_dumpfile = 1;
7730
info->flag_flatten = 1;
7733
info->flag_force = 1;
7736
info->flag_generate_vmcoreinfo = 1;
7737
info->name_vmcoreinfo = optarg;
7740
info->flag_show_usage = 1;
7743
info->flag_read_vmcoreinfo = 1;
7744
info->name_vmcoreinfo = optarg;
7747
message_level = atoi(optarg);
7750
info->flag_dmesg = 1;
7753
info->xen_phys_start = strtoul(optarg, NULL, 0);
7756
info->flag_rearrange = 1;
7759
info->flag_split = 1;
7762
info->flag_reassemble = 1;
7765
info->vaddr_for_vtop = strtoul(optarg, NULL, 0);
7768
info->flag_show_version = 1;
7771
info->flag_exclude_xen_dom = 1;
7774
info->name_vmlinux = optarg;
7777
info->name_xen_syms = optarg;
7780
info->flag_read_vmcoreinfo = 1;
7781
info->name_vmcoreinfo = optarg;
7784
MSG("Commandline parameter is invalid.\n");
7785
MSG("Try `makedumpfile --help' for more information.\n");
7790
message_level |= ML_PRINT_DEBUG_MSG;
7792
if (info->flag_show_usage) {
7796
if (info->flag_show_version) {
7801
if (elf_version(EV_CURRENT) == EV_NONE ) {
7803
* library out of date
7805
ERRMSG("Elf library out of date!\n");
7808
if (info->flag_generate_vmcoreinfo) {
7809
if (!check_param_for_generating_vmcoreinfo(argc, argv)) {
7810
MSG("Commandline parameter is invalid.\n");
7811
MSG("Try `makedumpfile --help' for more information.\n");
7814
if (!open_files_for_generating_vmcoreinfo())
7817
if (info->name_xen_syms) {
7818
if (!generate_vmcoreinfo_xen())
7821
if (!generate_vmcoreinfo())
7825
if (!close_files_for_generating_vmcoreinfo())
7829
MSG("The vmcoreinfo is saved to %s.\n", info->name_vmcoreinfo);
7831
} else if (info->flag_rearrange) {
7832
if (!check_param_for_rearranging_dumpdata(argc, argv)) {
7833
MSG("Commandline parameter is invalid.\n");
7834
MSG("Try `makedumpfile --help' for more information.\n");
7837
if (!open_files_for_rearranging_dumpdata())
7840
if (!rearrange_dumpdata())
7843
if (!close_files_for_rearranging_dumpdata())
7847
MSG("The dumpfile is saved to %s.\n", info->name_dumpfile);
7848
} else if (info->flag_reassemble) {
7849
if (!check_param_for_reassembling_dumpfile(argc, argv)) {
7850
MSG("Commandline parameter is invalid.\n");
7851
MSG("Try `makedumpfile --help' for more information.\n");
7854
if (!reassemble_dumpfile())
7858
MSG("The dumpfile is saved to %s.\n", info->name_dumpfile);
7859
} else if (info->flag_dmesg) {
7860
if (!check_param_for_creating_dumpfile(argc, argv)) {
7861
MSG("Commandline parameter is invalid.\n");
7862
MSG("Try `makedumpfile --help' for more information.\n");
7869
MSG("The dmesg log is saved to %s.\n", info->name_dumpfile);
7871
if (!check_param_for_creating_dumpfile(argc, argv)) {
7872
MSG("Commandline parameter is invalid.\n");
7873
MSG("Try `makedumpfile --help' for more information.\n");
7876
if (!create_dumpfile())
7880
if (info->flag_split) {
7881
MSG("The dumpfiles are saved to ");
7882
for (i = 0; i < info->num_dumpfile; i++) {
7883
if (i != (info->num_dumpfile - 1))
7884
MSG("%s, ", SPLITTING_DUMPFILE(i));
7886
MSG("and %s.\n", SPLITTING_DUMPFILE(i));
7889
MSG("The dumpfile is saved to %s.\n", info->name_dumpfile);
7895
if (retcd == COMPLETED)
7896
MSG("makedumpfile Completed.\n");
7898
MSG("makedumpfile Failed.\n");
7901
if (info->dh_memory)
7902
free(info->dh_memory);
7903
if (info->kh_memory)
7904
free(info->kh_memory);
7905
if (info->valid_pages)
7906
free(info->valid_pages);
7907
if (info->bitmap_memory)
7908
free(info->bitmap_memory);
7909
if (info->fd_memory)
7910
close(info->fd_memory);
7911
if (info->fd_dumpfile)
7912
close(info->fd_dumpfile);
7913
if (info->fd_bitmap)
7914
close(info->fd_bitmap);
7915
if (info->pt_load_segments != NULL)
7916
free(info->pt_load_segments);
7917
if (vt.node_online_map != NULL)
7918
free(vt.node_online_map);
7919
if (info->mem_map_data != NULL)
7920
free(info->mem_map_data);
7921
if (info->dump_header != NULL)
7922
free(info->dump_header);
7923
if (info->splitting_info != NULL)
7924
free(info->splitting_info);
7925
if (info->p2m_mfn_frame_list != NULL)
7926
free(info->p2m_mfn_frame_list);