4
* Copyright (C) 2006, 2007, 2008, 2009, 2011 NEC Corporation
6
* This program is free software; you can redistribute it and/or modify
7
* it under the terms of the GNU General Public License as published by
8
* the Free Software Foundation; either version 2 of the License, or
9
* (at your option) any later version.
11
* This program is distributed in the hope that it will be useful,
12
* but WITHOUT ANY WARRANTY; without even the implied warranty of
13
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
* GNU General Public License for more details.
16
#include "makedumpfile.h"
17
#include "print_info.h"
18
#include "dwarf_info.h"
20
#include "erase_info.h"
21
#include "sadump_info.h"
25
struct symbol_table symbol_table;
26
struct size_table size_table;
27
struct offset_table offset_table;
28
struct array_table array_table;
29
struct number_table number_table;
30
struct srcfile_table srcfile_table;
32
struct vm_table vt = { 0 };
33
struct DumpInfo *info = NULL;
35
char filename_stdout[] = FILENAME_STDOUT;
38
* The numbers of the excluded pages
40
unsigned long long pfn_zero;
41
unsigned long long pfn_memhole;
42
unsigned long long pfn_cache;
43
unsigned long long pfn_cache_private;
44
unsigned long long pfn_user;
45
unsigned long long pfn_free;
47
unsigned long long num_dumped;
49
int retcd = FAILED; /* return code */
51
#define INITIALIZE_LONG_TABLE(table, value) \
53
size_member = sizeof(long); \
54
num_member = sizeof(table) / size_member; \
55
ptr_long_table = (long *)&table; \
56
for (i = 0; i < num_member; i++, ptr_long_table++) \
57
*ptr_long_table = value; \
60
static void check_cyclic_buffer_overrun(void);
61
static void setup_page_is_buddy(void);
64
initialize_tables(void)
66
int i, size_member, num_member;
67
unsigned long long *ptr_symtable;
71
* Initialize the symbol table.
73
size_member = sizeof(symbol_table.mem_map);
74
num_member = sizeof(symbol_table) / size_member;
76
ptr_symtable = (unsigned long long *)&symbol_table;
78
for (i = 0; i < num_member; i++, ptr_symtable++)
79
*ptr_symtable = NOT_FOUND_SYMBOL;
81
INITIALIZE_LONG_TABLE(size_table, NOT_FOUND_STRUCTURE);
82
INITIALIZE_LONG_TABLE(offset_table, NOT_FOUND_STRUCTURE);
83
INITIALIZE_LONG_TABLE(array_table, NOT_FOUND_STRUCTURE);
84
INITIALIZE_LONG_TABLE(number_table, NOT_FOUND_NUMBER);
88
* Translate a domain-0's physical address to machine address.
91
ptom_xen(unsigned long long paddr)
94
unsigned long long maddr, pfn, mfn_idx, frame_idx;
96
pfn = paddr_to_pfn(paddr);
97
mfn_idx = pfn / MFNS_PER_FRAME;
98
frame_idx = pfn % MFNS_PER_FRAME;
100
if (mfn_idx >= info->p2m_frames) {
101
ERRMSG("Invalid mfn_idx(%llu).\n", mfn_idx);
104
maddr = pfn_to_paddr(info->p2m_mfn_frame_list[mfn_idx])
105
+ sizeof(unsigned long) * frame_idx;
106
if (!readmem(MADDR_XEN, maddr, &mfn, sizeof(mfn))) {
107
ERRMSG("Can't get mfn.\n");
110
maddr = pfn_to_paddr(mfn);
111
maddr |= PAGEOFFSET(paddr);
117
* Get the number of the page descriptors from the ELF info.
122
unsigned long long max_paddr;
124
if (info->flag_refiltering) {
125
info->max_mapnr = info->dh_memory->max_mapnr;
129
if (info->flag_sadump) {
130
info->max_mapnr = sadump_get_max_mapnr();
134
max_paddr = get_max_paddr();
135
info->max_mapnr = paddr_to_pfn(max_paddr);
138
DEBUG_MSG("max_mapnr : %llx\n", info->max_mapnr);
144
* Get the number of the page descriptors for Xen.
149
unsigned long max_pfn;
151
if (SYMBOL(max_pfn) != NOT_FOUND_SYMBOL) {
152
if (!readmem(VADDR, SYMBOL(max_pfn), &max_pfn, sizeof max_pfn)) {
153
ERRMSG("Can't read domain-0 max_pfn.\n");
157
info->dom0_mapnr = max_pfn;
158
DEBUG_MSG("domain-0 pfn : %llx\n", info->dom0_mapnr);
165
is_in_same_page(unsigned long vaddr1, unsigned long vaddr2)
167
if (round(vaddr1, info->page_size) == round(vaddr2, info->page_size))
173
#define BITMAP_SECT_LEN 4096
174
static inline int is_dumpable(struct dump_bitmap *, unsigned long long);
175
static inline int is_dumpable_cyclic(char *bitmap, unsigned long long);
177
pfn_to_pos(unsigned long long pfn)
179
unsigned long desc_pos, i;
181
desc_pos = info->valid_pages[pfn / BITMAP_SECT_LEN];
182
for (i = round(pfn, BITMAP_SECT_LEN); i < pfn; i++)
183
if (is_dumpable(info->bitmap_memory, i))
190
read_page_desc(unsigned long long paddr, page_desc_t *pd)
192
struct disk_dump_header *dh;
193
unsigned long desc_pos;
194
unsigned long long pfn;
198
* Find page descriptor
200
dh = info->dh_memory;
202
= (DISKDUMP_HEADER_BLOCKS + dh->sub_hdr_size + dh->bitmap_blocks)
204
pfn = paddr_to_pfn(paddr);
205
desc_pos = pfn_to_pos(pfn);
206
offset += (off_t)desc_pos * sizeof(page_desc_t);
207
if (lseek(info->fd_memory, offset, SEEK_SET) < 0) {
208
ERRMSG("Can't seek %s. %s\n",
209
info->name_memory, strerror(errno));
214
* Read page descriptor
216
if (read(info->fd_memory, pd, sizeof(*pd)) != sizeof(*pd)) {
217
ERRMSG("Can't read %s. %s\n",
218
info->name_memory, strerror(errno));
225
if (pd->size > dh->block_size)
232
readpmem_kdump_compressed(unsigned long long paddr, void *bufptr, size_t size)
235
char buf[info->page_size];
236
char buf2[info->page_size];
238
unsigned long retlen, page_offset;
240
page_offset = paddr % info->page_size;
242
if (!is_dumpable(info->bitmap_memory, paddr_to_pfn(paddr))) {
243
ERRMSG("pfn(%llx) is excluded from %s.\n",
244
paddr_to_pfn(paddr), info->name_memory);
248
if (!read_page_desc(paddr, &pd)) {
249
ERRMSG("Can't read page_desc: %llx\n", paddr);
253
if (lseek(info->fd_memory, pd.offset, SEEK_SET) < 0) {
254
ERRMSG("Can't seek %s. %s\n",
255
info->name_memory, strerror(errno));
262
if (read(info->fd_memory, buf, pd.size) != pd.size) {
263
ERRMSG("Can't read %s. %s\n",
264
info->name_memory, strerror(errno));
268
if (pd.flags & DUMP_DH_COMPRESSED_ZLIB) {
269
retlen = info->page_size;
270
ret = uncompress((unsigned char *)buf2, &retlen,
271
(unsigned char *)buf, pd.size);
272
if ((ret != Z_OK) || (retlen != info->page_size)) {
273
ERRMSG("Uncompress failed: %d\n", ret);
276
memcpy(bufptr, buf2 + page_offset, size);
278
} else if (info->flag_lzo_support
279
&& (pd.flags & DUMP_DH_COMPRESSED_LZO)) {
280
retlen = info->page_size;
281
ret = lzo1x_decompress_safe((unsigned char *)buf, pd.size,
282
(unsigned char *)buf2, &retlen,
283
LZO1X_MEM_DECOMPRESS);
284
if ((ret != LZO_E_OK) || (retlen != info->page_size)) {
285
ERRMSG("Uncompress failed: %d\n", ret);
288
memcpy(bufptr, buf2 + page_offset, size);
291
} else if ((pd.flags & DUMP_DH_COMPRESSED_SNAPPY)) {
293
ret = snappy_uncompressed_length(buf, pd.size, &retlen);
294
if (ret != SNAPPY_OK) {
295
ERRMSG("Uncompress failed: %d\n", ret);
299
ret = snappy_uncompress(buf, pd.size, buf2, &retlen);
300
if ((ret != SNAPPY_OK) || (retlen != info->page_size)) {
301
ERRMSG("Uncompress failed: %d\n", ret);
304
memcpy(bufptr, buf2 + page_offset, size);
307
memcpy(bufptr, buf + page_offset, size);
311
ERRMSG("type_addr: %d, addr:%llx, size:%zd\n", PADDR, paddr, size);
316
readmem(int type_addr, unsigned long long addr, void *bufptr, size_t size)
318
size_t read_size, next_size;
320
unsigned long long next_addr;
321
unsigned long long paddr, maddr = NOT_PADDR;
323
const off_t failed = (off_t)-1;
327
if ((paddr = vaddr_to_paddr(addr)) == NOT_PADDR) {
328
ERRMSG("Can't convert a virtual address(%llx) to physical address.\n",
332
if (is_xen_memory()) {
333
if ((maddr = ptom_xen(paddr)) == NOT_PADDR) {
334
ERRMSG("Can't convert a physical address(%llx) to machine address.\n",
343
if (is_xen_memory()) {
344
if ((maddr = ptom_xen(paddr)) == NOT_PADDR) {
345
ERRMSG("Can't convert a physical address(%llx) to machine address.\n",
353
if ((paddr = kvtop_xen(addr)) == NOT_PADDR) {
354
ERRMSG("Can't convert a virtual address(%llx) to machine address.\n",
363
ERRMSG("Invalid address type (%d).\n", type_addr);
370
* Read each page, because pages are not necessarily continuous.
371
* Ex) pages in vmalloc area
373
if (!is_in_same_page(addr, addr + size - 1)) {
374
read_size = info->page_size - (addr % info->page_size);
375
next_addr = roundup(addr + 1, info->page_size);
376
next_size = size - read_size;
377
next_ptr = (char *)bufptr + read_size;
379
if (!readmem(type_addr, next_addr, next_ptr, next_size))
383
if (info->flag_refiltering)
384
return readpmem_kdump_compressed(paddr, bufptr, read_size);
386
if (info->flag_sadump)
387
return readpmem_sadump(paddr, bufptr, read_size);
389
if (!(offset = paddr_to_offset(paddr))) {
390
ERRMSG("Can't convert a physical address(%llx) to offset.\n",
395
if (lseek(info->fd_memory, offset, SEEK_SET) == failed) {
396
ERRMSG("Can't seek the dump memory(%s). (offset: %llx) %s\n",
397
info->name_memory, (unsigned long long)offset, strerror(errno));
401
if (read(info->fd_memory, bufptr, read_size) != read_size) {
402
ERRMSG("Can't read the dump memory(%s). %s\n",
403
info->name_memory, strerror(errno));
409
ERRMSG("type_addr: %d, addr:%llx, size:%zd\n", type_addr, addr, size);
414
get_kernel_version(char *release)
421
* This method checks that vmlinux and vmcore are same kernel version.
424
maj = strtol(start, &end, 10);
429
min = strtol(start, &end, 10);
434
rel = strtol(start, &end, 10);
438
version = KERNEL_VERSION(maj, min, rel);
440
if ((version < OLDEST_VERSION) || (LATEST_VERSION < version)) {
441
MSG("The kernel version is not supported.\n");
442
MSG("The created dumpfile may be incomplete.\n");
448
is_page_size(long page_size)
451
* Page size is restricted to a hamming weight of 1.
453
if (page_size > 0 && !(page_size & (page_size - 1)))
460
set_page_size(long page_size)
462
if (!is_page_size(page_size)) {
463
ERRMSG("Invalid page_size: %ld", page_size);
466
info->page_size = page_size;
467
info->page_shift = ffs(info->page_size) - 1;
468
DEBUG_MSG("page_size : %ld\n", info->page_size);
474
fallback_to_current_page_size(void)
477
if (!set_page_size(sysconf(_SC_PAGE_SIZE)))
480
DEBUG_MSG("WARNING: Cannot determine page size (no vmcoreinfo).\n");
481
DEBUG_MSG("Using the dump kernel page size: %ld\n",
490
unsigned long utsname;
493
* Get the kernel version.
495
if (SYMBOL(system_utsname) != NOT_FOUND_SYMBOL) {
496
utsname = SYMBOL(system_utsname);
497
} else if (SYMBOL(init_uts_ns) != NOT_FOUND_SYMBOL) {
498
utsname = SYMBOL(init_uts_ns) + sizeof(int);
500
ERRMSG("Can't get the symbol of system_utsname.\n");
503
if (!readmem(VADDR, utsname, &info->system_utsname,
504
sizeof(struct utsname))) {
505
ERRMSG("Can't get the address of system_utsname.\n");
509
if (info->flag_read_vmcoreinfo) {
510
if (strcmp(info->system_utsname.release, info->release)) {
511
ERRMSG("%s and %s don't match.\n",
512
info->name_vmcoreinfo, info->name_memory);
513
retcd = WRONG_RELEASE;
518
info->kernel_version = get_kernel_version(info->system_utsname.release);
519
if (info->kernel_version == FALSE) {
520
ERRMSG("Can't get the kernel version.\n");
528
open_vmcoreinfo(char *mode)
530
FILE *file_vmcoreinfo;
532
if ((file_vmcoreinfo = fopen(info->name_vmcoreinfo, mode)) == NULL) {
533
ERRMSG("Can't open the vmcoreinfo file(%s). %s\n",
534
info->name_vmcoreinfo, strerror(errno));
537
info->file_vmcoreinfo = file_vmcoreinfo;
542
open_kernel_file(void)
546
if (info->name_vmlinux) {
547
if ((fd = open(info->name_vmlinux, O_RDONLY)) < 0) {
548
ERRMSG("Can't open the kernel file(%s). %s\n",
549
info->name_vmlinux, strerror(errno));
552
info->fd_vmlinux = fd;
554
if (info->name_xen_syms) {
555
if ((fd = open(info->name_xen_syms, O_RDONLY)) < 0) {
556
ERRMSG("Can't open the kernel file(%s). %s\n",
557
info->name_xen_syms, strerror(errno));
560
info->fd_xen_syms = fd;
566
check_kdump_compressed(char *filename)
568
struct disk_dump_header dh;
570
if (!__read_disk_dump_header(&dh, filename))
573
if (strncmp(dh.signature, KDUMP_SIGNATURE, SIG_LEN))
580
get_kdump_compressed_header_info(char *filename)
582
struct disk_dump_header dh;
583
struct kdump_sub_header kh;
585
if (!read_disk_dump_header(&dh, filename))
588
if (!read_kdump_sub_header(&kh, filename))
591
if (dh.header_version < 1) {
592
ERRMSG("header does not have dump_level member\n");
595
DEBUG_MSG("diskdump main header\n");
596
DEBUG_MSG(" signature : %s\n", dh.signature);
597
DEBUG_MSG(" header_version : %d\n", dh.header_version);
598
DEBUG_MSG(" status : %d\n", dh.status);
599
DEBUG_MSG(" block_size : %d\n", dh.block_size);
600
DEBUG_MSG(" sub_hdr_size : %d\n", dh.sub_hdr_size);
601
DEBUG_MSG(" bitmap_blocks : %d\n", dh.bitmap_blocks);
602
DEBUG_MSG(" max_mapnr : 0x%x\n", dh.max_mapnr);
603
DEBUG_MSG(" total_ram_blocks : %d\n", dh.total_ram_blocks);
604
DEBUG_MSG(" device_blocks : %d\n", dh.device_blocks);
605
DEBUG_MSG(" written_blocks : %d\n", dh.written_blocks);
606
DEBUG_MSG(" current_cpu : %d\n", dh.current_cpu);
607
DEBUG_MSG(" nr_cpus : %d\n", dh.nr_cpus);
608
DEBUG_MSG("kdump sub header\n");
609
DEBUG_MSG(" phys_base : 0x%lx\n", kh.phys_base);
610
DEBUG_MSG(" dump_level : %d\n", kh.dump_level);
611
DEBUG_MSG(" split : %d\n", kh.split);
612
DEBUG_MSG(" start_pfn : 0x%lx\n", kh.start_pfn);
613
DEBUG_MSG(" end_pfn : 0x%lx\n", kh.end_pfn);
615
info->dh_memory = malloc(sizeof(dh));
616
if (info->dh_memory == NULL) {
617
ERRMSG("Can't allocate memory for the header. %s\n",
621
memcpy(info->dh_memory, &dh, sizeof(dh));
622
memcpy(&info->timestamp, &dh.timestamp, sizeof(dh.timestamp));
624
info->kh_memory = malloc(sizeof(kh));
625
if (info->kh_memory == NULL) {
626
ERRMSG("Can't allocate memory for the sub header. %s\n",
630
memcpy(info->kh_memory, &kh, sizeof(kh));
631
set_nr_cpus(dh.nr_cpus);
633
if (dh.header_version >= 3) {
634
/* A dumpfile contains vmcoreinfo data. */
635
set_vmcoreinfo(kh.offset_vmcoreinfo, kh.size_vmcoreinfo);
636
DEBUG_MSG(" offset_vmcoreinfo: 0x%llx\n",
637
(unsigned long long)kh.offset_vmcoreinfo);
638
DEBUG_MSG(" size_vmcoreinfo : 0x%ld\n", kh.size_vmcoreinfo);
640
if (dh.header_version >= 4) {
641
/* A dumpfile contains ELF note section. */
642
set_pt_note(kh.offset_note, kh.size_note);
643
DEBUG_MSG(" offset_note : 0x%llx\n",
644
(unsigned long long)kh.offset_note);
645
DEBUG_MSG(" size_note : 0x%ld\n", kh.size_note);
647
if (dh.header_version >= 5) {
648
/* A dumpfile contains erased information. */
649
set_eraseinfo(kh.offset_eraseinfo, kh.size_eraseinfo);
650
DEBUG_MSG(" offset_eraseinfo : 0x%llx\n",
651
(unsigned long long)kh.offset_eraseinfo);
652
DEBUG_MSG(" size_eraseinfo : 0x%ld\n", kh.size_eraseinfo);
656
free(info->dh_memory);
657
info->dh_memory = NULL;
663
open_dump_memory(void)
667
if ((fd = open(info->name_memory, O_RDONLY)) < 0) {
668
ERRMSG("Can't open the dump memory(%s). %s\n",
669
info->name_memory, strerror(errno));
672
info->fd_memory = fd;
674
status = check_kdump_compressed(info->name_memory);
675
if (status == TRUE) {
676
info->flag_refiltering = TRUE;
677
return get_kdump_compressed_header_info(info->name_memory);
680
status = check_and_get_sadump_header_info(info->name_memory);
694
int open_flags = O_RDWR|O_CREAT|O_TRUNC;
696
if (!info->flag_force)
697
open_flags |= O_EXCL;
699
if (info->flag_flatten) {
701
info->name_dumpfile = filename_stdout;
702
} else if ((fd = open(info->name_dumpfile, open_flags,
703
S_IRUSR|S_IWUSR)) < 0) {
704
ERRMSG("Can't open the dump file(%s). %s\n",
705
info->name_dumpfile, strerror(errno));
708
info->fd_dumpfile = fd;
713
open_dump_bitmap(void)
718
tmpname = getenv("TMPDIR");
722
if ((info->name_bitmap = (char *)malloc(sizeof(FILENAME_BITMAP) +
723
strlen(tmpname) + 1)) == NULL) {
724
ERRMSG("Can't allocate memory for the filename. %s\n",
728
strcpy(info->name_bitmap, tmpname);
729
strcat(info->name_bitmap, "/");
730
strcat(info->name_bitmap, FILENAME_BITMAP);
731
if ((fd = mkstemp(info->name_bitmap)) < 0) {
732
ERRMSG("Can't open the bitmap file(%s). %s\n",
733
info->name_bitmap, strerror(errno));
736
info->fd_bitmap = fd;
738
if (info->flag_split) {
740
* Reserve file descriptors of bitmap for creating split
741
* dumpfiles by multiple processes, because a bitmap file will
742
* be unlinked just after this and it is not possible to open
743
* a bitmap file later.
745
for (i = 0; i < info->num_dumpfile; i++) {
746
if ((fd = open(info->name_bitmap, O_RDONLY)) < 0) {
747
ERRMSG("Can't open the bitmap file(%s). %s\n",
748
info->name_bitmap, strerror(errno));
751
SPLITTING_FD_BITMAP(i) = fd;
754
unlink(info->name_bitmap);
760
* Open the following files when it generates the vmcoreinfo file.
765
open_files_for_generating_vmcoreinfo(void)
767
if (!open_kernel_file())
770
if (!open_vmcoreinfo("w"))
777
* Open the following file when it rearranges the dump data.
781
open_files_for_rearranging_dumpdata(void)
783
if (!open_dump_file())
790
* Open the following files when it creates the dump file.
794
* if it reads the vmcoreinfo file
800
open_files_for_creating_dumpfile(void)
802
if (info->flag_read_vmcoreinfo) {
803
if (!open_vmcoreinfo("r"))
806
if (!open_kernel_file())
809
if (!open_dump_memory())
812
if (!open_dump_bitmap())
819
is_kvaddr(unsigned long long addr)
821
return (addr >= (unsigned long long)(KVBASE));
825
get_symbol_info(void)
830
SYMBOL_INIT(mem_map, "mem_map");
831
SYMBOL_INIT(vmem_map, "vmem_map");
832
SYMBOL_INIT(mem_section, "mem_section");
833
SYMBOL_INIT(pkmap_count, "pkmap_count");
834
SYMBOL_INIT_NEXT(pkmap_count_next, "pkmap_count");
835
SYMBOL_INIT(system_utsname, "system_utsname");
836
SYMBOL_INIT(init_uts_ns, "init_uts_ns");
837
SYMBOL_INIT(_stext, "_stext");
838
SYMBOL_INIT(swapper_pg_dir, "swapper_pg_dir");
839
SYMBOL_INIT(init_level4_pgt, "init_level4_pgt");
840
SYMBOL_INIT(vmlist, "vmlist");
841
SYMBOL_INIT(phys_base, "phys_base");
842
SYMBOL_INIT(node_online_map, "node_online_map");
843
SYMBOL_INIT(node_states, "node_states");
844
SYMBOL_INIT(node_memblk, "node_memblk");
845
SYMBOL_INIT(node_data, "node_data");
846
SYMBOL_INIT(pgdat_list, "pgdat_list");
847
SYMBOL_INIT(contig_page_data, "contig_page_data");
848
SYMBOL_INIT(log_buf, "log_buf");
849
SYMBOL_INIT(log_buf_len, "log_buf_len");
850
SYMBOL_INIT(log_end, "log_end");
851
SYMBOL_INIT(max_pfn, "max_pfn");
852
SYMBOL_INIT(modules, "modules");
853
SYMBOL_INIT(high_memory, "high_memory");
854
SYMBOL_INIT(linux_banner, "linux_banner");
855
SYMBOL_INIT(bios_cpu_apicid, "bios_cpu_apicid");
856
SYMBOL_INIT(x86_bios_cpu_apicid, "x86_bios_cpu_apicid");
857
if (SYMBOL(x86_bios_cpu_apicid) == NOT_FOUND_SYMBOL)
858
SYMBOL_INIT(x86_bios_cpu_apicid,
859
"per_cpu__x86_bios_cpu_apicid");
860
SYMBOL_INIT(x86_bios_cpu_apicid_early_ptr,
861
"x86_bios_cpu_apicid_early_ptr");
862
SYMBOL_INIT(x86_bios_cpu_apicid_early_map,
863
"x86_bios_cpu_apicid_early_map");
864
SYMBOL_INIT(crash_notes, "crash_notes");
865
SYMBOL_INIT(__per_cpu_load, "__per_cpu_load");
866
SYMBOL_INIT(__per_cpu_offset, "__per_cpu_offset");
867
SYMBOL_INIT(cpu_online_mask, "cpu_online_mask");
868
if (SYMBOL(cpu_online_mask) == NOT_FOUND_SYMBOL)
869
SYMBOL_INIT(cpu_online_mask, "cpu_online_map");
870
SYMBOL_INIT(kexec_crash_image, "kexec_crash_image");
871
SYMBOL_INIT(node_remap_start_vaddr, "node_remap_start_vaddr");
872
SYMBOL_INIT(node_remap_end_vaddr, "node_remap_end_vaddr");
873
SYMBOL_INIT(node_remap_start_pfn, "node_remap_start_pfn");
875
if (SYMBOL(node_data) != NOT_FOUND_SYMBOL)
876
SYMBOL_ARRAY_TYPE_INIT(node_data, "node_data");
877
if (SYMBOL(pgdat_list) != NOT_FOUND_SYMBOL)
878
SYMBOL_ARRAY_LENGTH_INIT(pgdat_list, "pgdat_list");
879
if (SYMBOL(mem_section) != NOT_FOUND_SYMBOL)
880
SYMBOL_ARRAY_LENGTH_INIT(mem_section, "mem_section");
881
if (SYMBOL(node_memblk) != NOT_FOUND_SYMBOL)
882
SYMBOL_ARRAY_LENGTH_INIT(node_memblk, "node_memblk");
883
if (SYMBOL(__per_cpu_offset) != NOT_FOUND_SYMBOL)
884
SYMBOL_ARRAY_LENGTH_INIT(__per_cpu_offset, "__per_cpu_offset");
885
if (SYMBOL(node_remap_start_pfn) != NOT_FOUND_SYMBOL)
886
SYMBOL_ARRAY_LENGTH_INIT(node_remap_start_pfn,
887
"node_remap_start_pfn");
893
get_structure_info(void)
896
* Get offsets of the page_discriptor's members.
898
SIZE_INIT(page, "page");
899
OFFSET_INIT(page.flags, "page", "flags");
900
OFFSET_INIT(page._count, "page", "_count");
901
OFFSET_INIT(page.mapping, "page", "mapping");
902
OFFSET_INIT(page._mapcount, "page", "_mapcount");
903
OFFSET_INIT(page.private, "page", "private");
906
* Some vmlinux(s) don't have debugging information about
907
* page.mapping. Then, makedumpfile assumes that there is
908
* "mapping" next to "private(unsigned long)" in the first
911
if (OFFSET(page.mapping) == NOT_FOUND_STRUCTURE) {
912
OFFSET(page.mapping) = get_member_offset("page", NULL,
913
DWARF_INFO_GET_MEMBER_OFFSET_1ST_UNION);
914
if (OFFSET(page.mapping) == FAILED_DWARFINFO)
916
if (OFFSET(page.mapping) != NOT_FOUND_STRUCTURE)
917
OFFSET(page.mapping) += sizeof(unsigned long);
920
OFFSET_INIT(page.lru, "page", "lru");
923
* Get offsets of the mem_section's members.
925
SIZE_INIT(mem_section, "mem_section");
926
OFFSET_INIT(mem_section.section_mem_map, "mem_section",
930
* Get offsets of the pglist_data's members.
932
SIZE_INIT(pglist_data, "pglist_data");
933
OFFSET_INIT(pglist_data.node_zones, "pglist_data", "node_zones");
934
OFFSET_INIT(pglist_data.nr_zones, "pglist_data", "nr_zones");
935
OFFSET_INIT(pglist_data.node_mem_map, "pglist_data", "node_mem_map");
936
OFFSET_INIT(pglist_data.node_start_pfn, "pglist_data","node_start_pfn");
937
OFFSET_INIT(pglist_data.node_spanned_pages, "pglist_data",
938
"node_spanned_pages");
939
OFFSET_INIT(pglist_data.pgdat_next, "pglist_data", "pgdat_next");
942
* Get offsets of the zone's members.
944
SIZE_INIT(zone, "zone");
945
OFFSET_INIT(zone.free_pages, "zone", "free_pages");
946
OFFSET_INIT(zone.free_area, "zone", "free_area");
947
OFFSET_INIT(zone.vm_stat, "zone", "vm_stat");
948
OFFSET_INIT(zone.spanned_pages, "zone", "spanned_pages");
949
MEMBER_ARRAY_LENGTH_INIT(zone.free_area, "zone", "free_area");
952
* Get offsets of the free_area's members.
954
SIZE_INIT(free_area, "free_area");
955
OFFSET_INIT(free_area.free_list, "free_area", "free_list");
956
MEMBER_ARRAY_LENGTH_INIT(free_area.free_list, "free_area", "free_list");
959
* Get offsets of the list_head's members.
961
SIZE_INIT(list_head, "list_head");
962
OFFSET_INIT(list_head.next, "list_head", "next");
963
OFFSET_INIT(list_head.prev, "list_head", "prev");
966
* Get offsets of the node_memblk_s's members.
968
SIZE_INIT(node_memblk_s, "node_memblk_s");
969
OFFSET_INIT(node_memblk_s.start_paddr, "node_memblk_s", "start_paddr");
970
OFFSET_INIT(node_memblk_s.size, "node_memblk_s", "size");
971
OFFSET_INIT(node_memblk_s.nid, "node_memblk_s", "nid");
973
OFFSET_INIT(vm_struct.addr, "vm_struct", "addr");
976
* Get offset of the module members.
978
SIZE_INIT(module, "module");
979
OFFSET_INIT(module.strtab, "module", "strtab");
980
OFFSET_INIT(module.symtab, "module", "symtab");
981
OFFSET_INIT(module.num_symtab, "module", "num_symtab");
982
OFFSET_INIT(module.list, "module", "list");
983
OFFSET_INIT(module.name, "module", "name");
984
OFFSET_INIT(module.module_core, "module", "module_core");
985
OFFSET_INIT(module.core_size, "module", "core_size");
986
OFFSET_INIT(module.module_init, "module", "module_init");
987
OFFSET_INIT(module.init_size, "module", "init_size");
989
ENUM_NUMBER_INIT(NR_FREE_PAGES, "NR_FREE_PAGES");
990
ENUM_NUMBER_INIT(N_ONLINE, "N_ONLINE");
992
ENUM_NUMBER_INIT(PG_lru, "PG_lru");
993
ENUM_NUMBER_INIT(PG_private, "PG_private");
994
ENUM_NUMBER_INIT(PG_swapcache, "PG_swapcache");
995
ENUM_NUMBER_INIT(PG_buddy, "PG_buddy");
996
ENUM_NUMBER_INIT(PG_slab, "PG_slab");
998
ENUM_TYPE_SIZE_INIT(pageflags, "pageflags");
1000
TYPEDEF_SIZE_INIT(nodemask_t, "nodemask_t");
1002
SIZE_INIT(percpu_data, "percpu_data");
1005
* Get offset of the elf_prstatus members.
1007
SIZE_INIT(elf_prstatus, "elf_prstatus");
1008
OFFSET_INIT(elf_prstatus.pr_reg, "elf_prstatus", "pr_reg");
1011
* Get size of cpumask and cpumask_t.
1013
SIZE_INIT(cpumask, "cpumask");
1015
TYPEDEF_SIZE_INIT(cpumask_t, "cpumask_t");
1018
* Get offset of the user_regs_struct members.
1020
SIZE_INIT(user_regs_struct, "user_regs_struct");
1023
if (SIZE(user_regs_struct) != NOT_FOUND_STRUCTURE) {
1024
OFFSET_INIT(user_regs_struct.bx, "user_regs_struct", "bx");
1025
OFFSET_INIT(user_regs_struct.cx, "user_regs_struct", "cx");
1026
OFFSET_INIT(user_regs_struct.dx, "user_regs_struct", "dx");
1027
OFFSET_INIT(user_regs_struct.si, "user_regs_struct", "si");
1028
OFFSET_INIT(user_regs_struct.di, "user_regs_struct", "di");
1029
OFFSET_INIT(user_regs_struct.bp, "user_regs_struct", "bp");
1030
OFFSET_INIT(user_regs_struct.ax, "user_regs_struct", "ax");
1031
OFFSET_INIT(user_regs_struct.ds, "user_regs_struct", "ds");
1032
OFFSET_INIT(user_regs_struct.es, "user_regs_struct", "es");
1033
OFFSET_INIT(user_regs_struct.fs, "user_regs_struct", "fs");
1034
OFFSET_INIT(user_regs_struct.gs, "user_regs_struct", "gs");
1035
OFFSET_INIT(user_regs_struct.orig_ax, "user_regs_struct",
1037
OFFSET_INIT(user_regs_struct.ip, "user_regs_struct", "ip");
1038
OFFSET_INIT(user_regs_struct.cs, "user_regs_struct", "cs");
1039
OFFSET_INIT(user_regs_struct.flags, "user_regs_struct",
1041
OFFSET_INIT(user_regs_struct.sp, "user_regs_struct", "sp");
1042
OFFSET_INIT(user_regs_struct.ss, "user_regs_struct", "ss");
1044
if (OFFSET(user_regs_struct.bx) == NOT_FOUND_STRUCTURE)
1045
OFFSET_INIT(user_regs_struct.bx, "user_regs_struct", "ebx");
1046
if (OFFSET(user_regs_struct.cx) == NOT_FOUND_STRUCTURE)
1047
OFFSET_INIT(user_regs_struct.cx, "user_regs_struct", "ecx");
1048
if (OFFSET(user_regs_struct.dx) == NOT_FOUND_STRUCTURE)
1049
OFFSET_INIT(user_regs_struct.dx, "user_regs_struct", "edx");
1050
if (OFFSET(user_regs_struct.si) == NOT_FOUND_STRUCTURE)
1051
OFFSET_INIT(user_regs_struct.si, "user_regs_struct", "esi");
1052
if (OFFSET(user_regs_struct.di) == NOT_FOUND_STRUCTURE)
1053
OFFSET_INIT(user_regs_struct.di, "user_regs_struct", "edi");
1054
if (OFFSET(user_regs_struct.bp) == NOT_FOUND_STRUCTURE)
1055
OFFSET_INIT(user_regs_struct.bp, "user_regs_struct", "ebp");
1056
if (OFFSET(user_regs_struct.ax) == NOT_FOUND_STRUCTURE)
1057
OFFSET_INIT(user_regs_struct.ax, "user_regs_struct", "eax");
1058
if (OFFSET(user_regs_struct.orig_ax) == NOT_FOUND_STRUCTURE)
1059
OFFSET_INIT(user_regs_struct.orig_ax, "user_regs_struct", "orig_eax");
1060
if (OFFSET(user_regs_struct.ip) == NOT_FOUND_STRUCTURE)
1061
OFFSET_INIT(user_regs_struct.ip, "user_regs_struct", "eip");
1062
if (OFFSET(user_regs_struct.flags) == NOT_FOUND_STRUCTURE)
1063
OFFSET_INIT(user_regs_struct.flags, "user_regs_struct", "eflags");
1064
if (OFFSET(user_regs_struct.sp) == NOT_FOUND_STRUCTURE)
1065
OFFSET_INIT(user_regs_struct.sp, "user_regs_struct", "esp");
1068
* Note: Sometimes kernel debuginfo doesn't contain
1069
* user_regs_struct structure information. Instead, we
1070
* take offsets from actual datatype.
1072
OFFSET(user_regs_struct.bx) = offsetof(struct user_regs_struct, bx);
1073
OFFSET(user_regs_struct.cx) = offsetof(struct user_regs_struct, cx);
1074
OFFSET(user_regs_struct.dx) = offsetof(struct user_regs_struct, dx);
1075
OFFSET(user_regs_struct.si) = offsetof(struct user_regs_struct, si);
1076
OFFSET(user_regs_struct.di) = offsetof(struct user_regs_struct, di);
1077
OFFSET(user_regs_struct.bp) = offsetof(struct user_regs_struct, bp);
1078
OFFSET(user_regs_struct.ax) = offsetof(struct user_regs_struct, ax);
1079
OFFSET(user_regs_struct.ds) = offsetof(struct user_regs_struct, ds);
1080
OFFSET(user_regs_struct.es) = offsetof(struct user_regs_struct, es);
1081
OFFSET(user_regs_struct.fs) = offsetof(struct user_regs_struct, fs);
1082
OFFSET(user_regs_struct.gs) = offsetof(struct user_regs_struct, gs);
1083
OFFSET(user_regs_struct.orig_ax) = offsetof(struct user_regs_struct, orig_ax);
1084
OFFSET(user_regs_struct.ip) = offsetof(struct user_regs_struct, ip);
1085
OFFSET(user_regs_struct.cs) = offsetof(struct user_regs_struct, cs);
1086
OFFSET(user_regs_struct.flags) = offsetof(struct user_regs_struct, flags);
1087
OFFSET(user_regs_struct.sp) = offsetof(struct user_regs_struct, sp);
1088
OFFSET(user_regs_struct.ss) = offsetof(struct user_regs_struct, ss);
1090
#endif /* __x86__ */
1093
if (SIZE(user_regs_struct) != NOT_FOUND_STRUCTURE) {
1094
OFFSET_INIT(user_regs_struct.r15, "user_regs_struct", "r15");
1095
OFFSET_INIT(user_regs_struct.r14, "user_regs_struct", "r14");
1096
OFFSET_INIT(user_regs_struct.r13, "user_regs_struct", "r13");
1097
OFFSET_INIT(user_regs_struct.r12, "user_regs_struct", "r12");
1098
OFFSET_INIT(user_regs_struct.bp, "user_regs_struct", "bp");
1099
OFFSET_INIT(user_regs_struct.bx, "user_regs_struct", "bx");
1100
OFFSET_INIT(user_regs_struct.r11, "user_regs_struct", "r11");
1101
OFFSET_INIT(user_regs_struct.r10, "user_regs_struct", "r10");
1102
OFFSET_INIT(user_regs_struct.r9, "user_regs_struct", "r9");
1103
OFFSET_INIT(user_regs_struct.r8, "user_regs_struct", "r8");
1104
OFFSET_INIT(user_regs_struct.ax, "user_regs_struct", "ax");
1105
OFFSET_INIT(user_regs_struct.cx, "user_regs_struct", "cx");
1106
OFFSET_INIT(user_regs_struct.dx, "user_regs_struct", "dx");
1107
OFFSET_INIT(user_regs_struct.si, "user_regs_struct", "si");
1108
OFFSET_INIT(user_regs_struct.di, "user_regs_struct", "di");
1109
OFFSET_INIT(user_regs_struct.orig_ax, "user_regs_struct",
1111
OFFSET_INIT(user_regs_struct.ip, "user_regs_struct", "ip");
1112
OFFSET_INIT(user_regs_struct.cs, "user_regs_struct", "cs");
1113
OFFSET_INIT(user_regs_struct.flags, "user_regs_struct",
1115
OFFSET_INIT(user_regs_struct.sp, "user_regs_struct", "sp");
1116
OFFSET_INIT(user_regs_struct.ss, "user_regs_struct", "ss");
1117
OFFSET_INIT(user_regs_struct.fs_base, "user_regs_struct",
1119
OFFSET_INIT(user_regs_struct.gs_base, "user_regs_struct",
1121
OFFSET_INIT(user_regs_struct.ds, "user_regs_struct", "ds");
1122
OFFSET_INIT(user_regs_struct.es, "user_regs_struct", "es");
1123
OFFSET_INIT(user_regs_struct.fs, "user_regs_struct", "fs");
1124
OFFSET_INIT(user_regs_struct.gs, "user_regs_struct", "gs");
1127
* Note: Sometimes kernel debuginfo doesn't contain
1128
* user_regs_struct structure information. Instead, we
1129
* take offsets from actual datatype.
1131
OFFSET(user_regs_struct.r15) = offsetof(struct user_regs_struct, r15);
1132
OFFSET(user_regs_struct.r14) = offsetof(struct user_regs_struct, r14);
1133
OFFSET(user_regs_struct.r13) = offsetof(struct user_regs_struct, r13);
1134
OFFSET(user_regs_struct.r12) = offsetof(struct user_regs_struct, r12);
1135
OFFSET(user_regs_struct.bp) = offsetof(struct user_regs_struct, bp);
1136
OFFSET(user_regs_struct.bx) = offsetof(struct user_regs_struct, bx);
1137
OFFSET(user_regs_struct.r11) = offsetof(struct user_regs_struct, r11);
1138
OFFSET(user_regs_struct.r10) = offsetof(struct user_regs_struct, r10);
1139
OFFSET(user_regs_struct.r9) = offsetof(struct user_regs_struct, r9);
1140
OFFSET(user_regs_struct.r8) = offsetof(struct user_regs_struct, r8);
1141
OFFSET(user_regs_struct.ax) = offsetof(struct user_regs_struct, ax);
1142
OFFSET(user_regs_struct.cx) = offsetof(struct user_regs_struct, cx);
1143
OFFSET(user_regs_struct.dx) = offsetof(struct user_regs_struct, dx);
1144
OFFSET(user_regs_struct.si) = offsetof(struct user_regs_struct, si);
1145
OFFSET(user_regs_struct.di) = offsetof(struct user_regs_struct, di);
1146
OFFSET(user_regs_struct.orig_ax) = offsetof(struct user_regs_struct, orig_ax);
1147
OFFSET(user_regs_struct.ip) = offsetof(struct user_regs_struct, ip);
1148
OFFSET(user_regs_struct.cs) = offsetof(struct user_regs_struct, cs);
1149
OFFSET(user_regs_struct.flags) = offsetof(struct user_regs_struct, flags);
1150
OFFSET(user_regs_struct.sp) = offsetof(struct user_regs_struct, sp);
1151
OFFSET(user_regs_struct.ss) = offsetof(struct user_regs_struct, ss);
1152
OFFSET(user_regs_struct.fs_base) = offsetof(struct user_regs_struct, fs_base);
1153
OFFSET(user_regs_struct.gs_base) = offsetof(struct user_regs_struct, gs_base);
1154
OFFSET(user_regs_struct.ds) = offsetof(struct user_regs_struct, ds);
1155
OFFSET(user_regs_struct.es) = offsetof(struct user_regs_struct, es);
1156
OFFSET(user_regs_struct.fs) = offsetof(struct user_regs_struct, fs);
1157
OFFSET(user_regs_struct.gs) = offsetof(struct user_regs_struct, gs);
1159
#endif /* __x86_64__ */
1161
OFFSET_INIT(kimage.segment, "kimage", "segment");
1163
MEMBER_ARRAY_LENGTH_INIT(kimage.segment, "kimage", "segment");
1165
SIZE_INIT(kexec_segment, "kexec_segment");
1166
OFFSET_INIT(kexec_segment.mem, "kexec_segment", "mem");
1168
OFFSET_INIT(elf64_hdr.e_phnum, "elf64_hdr", "e_phnum");
1169
OFFSET_INIT(elf64_hdr.e_phentsize, "elf64_hdr", "e_phentsize");
1170
OFFSET_INIT(elf64_hdr.e_phoff, "elf64_hdr", "e_phoff");
1172
SIZE_INIT(elf64_hdr, "elf64_hdr");
1173
OFFSET_INIT(elf64_phdr.p_type, "elf64_phdr", "p_type");
1174
OFFSET_INIT(elf64_phdr.p_offset, "elf64_phdr", "p_offset");
1175
OFFSET_INIT(elf64_phdr.p_paddr, "elf64_phdr", "p_paddr");
1176
OFFSET_INIT(elf64_phdr.p_memsz, "elf64_phdr", "p_memsz");
1182
get_srcfile_info(void)
1184
TYPEDEF_SRCFILE_INIT(pud_t, "pud_t");
1190
get_value_for_old_linux(void)
1192
if (NUMBER(PG_lru) == NOT_FOUND_NUMBER)
1193
NUMBER(PG_lru) = PG_lru_ORIGINAL;
1194
if (NUMBER(PG_private) == NOT_FOUND_NUMBER)
1195
NUMBER(PG_private) = PG_private_ORIGINAL;
1196
if (NUMBER(PG_swapcache) == NOT_FOUND_NUMBER)
1197
NUMBER(PG_swapcache) = PG_swapcache_ORIGINAL;
1198
if (NUMBER(PG_slab) == NOT_FOUND_NUMBER)
1199
NUMBER(PG_slab) = PG_slab_ORIGINAL;
1201
* The values from here are for free page filtering based on
1202
* mem_map array. These are minimum effort to cover old
1205
* The logic also needs offset values for some members of page
1206
* structure. But it much depends on kernel versions. We avoid
1207
* to hard code the values.
1209
if (NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE) == NOT_FOUND_NUMBER) {
1210
if (info->kernel_version == KERNEL_VERSION(2, 6, 38))
1211
NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE) =
1212
PAGE_BUDDY_MAPCOUNT_VALUE_v2_6_38;
1213
if (info->kernel_version >= KERNEL_VERSION(2, 6, 39))
1214
NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE) =
1215
PAGE_BUDDY_MAPCOUNT_VALUE_v2_6_39_to_latest_version;
1217
if (SIZE(pageflags) == NOT_FOUND_STRUCTURE) {
1218
if (info->kernel_version >= KERNEL_VERSION(2, 6, 27))
1220
PAGE_FLAGS_SIZE_v2_6_27_to_latest_version;
1226
get_str_osrelease_from_vmlinux(void)
1230
struct utsname system_utsname;
1231
unsigned long long utsname;
1233
const off_t failed = (off_t)-1;
1236
* Get the kernel version.
1238
if (SYMBOL(system_utsname) != NOT_FOUND_SYMBOL) {
1239
utsname = SYMBOL(system_utsname);
1240
} else if (SYMBOL(init_uts_ns) != NOT_FOUND_SYMBOL) {
1241
utsname = SYMBOL(init_uts_ns) + sizeof(int);
1243
ERRMSG("Can't get the symbol of system_utsname.\n");
1246
get_fileinfo_of_debuginfo(&fd, &name);
1248
offset = vaddr_to_offset_slow(fd, name, utsname);
1250
ERRMSG("Can't convert vaddr (%llx) of utsname to an offset.\n",
1254
if (lseek(fd, offset, SEEK_SET) == failed) {
1255
ERRMSG("Can't seek %s. %s\n", name, strerror(errno));
1258
if (read(fd, &system_utsname, sizeof system_utsname)
1259
!= sizeof system_utsname) {
1260
ERRMSG("Can't read %s. %s\n", name, strerror(errno));
1263
if (!strncpy(info->release, system_utsname.release, STRLEN_OSRELEASE)){
1264
ERRMSG("Can't do strncpy for osrelease.");
1271
is_sparsemem_extreme(void)
1273
if (ARRAY_LENGTH(mem_section)
1274
== (NR_MEM_SECTIONS() / _SECTIONS_PER_ROOT_EXTREME()))
1285
if ((SIZE(page) == NOT_FOUND_STRUCTURE)
1286
|| (OFFSET(page.flags) == NOT_FOUND_STRUCTURE)
1287
|| (OFFSET(page._count) == NOT_FOUND_STRUCTURE)
1288
|| (OFFSET(page.mapping) == NOT_FOUND_STRUCTURE)) {
1289
ret = NOT_FOUND_MEMTYPE;
1290
} else if ((((SYMBOL(node_data) != NOT_FOUND_SYMBOL)
1291
&& (ARRAY_LENGTH(node_data) != NOT_FOUND_STRUCTURE))
1292
|| ((SYMBOL(pgdat_list) != NOT_FOUND_SYMBOL)
1293
&& (OFFSET(pglist_data.pgdat_next) != NOT_FOUND_STRUCTURE))
1294
|| ((SYMBOL(pgdat_list) != NOT_FOUND_SYMBOL)
1295
&& (ARRAY_LENGTH(pgdat_list) != NOT_FOUND_STRUCTURE)))
1296
&& (SIZE(pglist_data) != NOT_FOUND_STRUCTURE)
1297
&& (OFFSET(pglist_data.node_mem_map) != NOT_FOUND_STRUCTURE)
1298
&& (OFFSET(pglist_data.node_start_pfn) != NOT_FOUND_STRUCTURE)
1299
&& (OFFSET(pglist_data.node_spanned_pages) !=NOT_FOUND_STRUCTURE)){
1301
} else if ((SYMBOL(mem_section) != NOT_FOUND_SYMBOL)
1302
&& (SIZE(mem_section) != NOT_FOUND_STRUCTURE)
1303
&& (OFFSET(mem_section.section_mem_map) != NOT_FOUND_STRUCTURE)
1304
&& (ARRAY_LENGTH(mem_section) != NOT_FOUND_STRUCTURE)) {
1305
if (is_sparsemem_extreme())
1309
} else if (SYMBOL(mem_map) != NOT_FOUND_SYMBOL) {
1312
ret = NOT_FOUND_MEMTYPE;
1319
write_vmcoreinfo_data(void)
1322
* write 1st kernel's OSRELEASE
1324
fprintf(info->file_vmcoreinfo, "%s%s\n", STR_OSRELEASE,
1328
* write 1st kernel's PAGESIZE
1330
fprintf(info->file_vmcoreinfo, "%s%ld\n", STR_PAGESIZE,
1334
* write the symbol of 1st kernel
1336
WRITE_SYMBOL("mem_map", mem_map);
1337
WRITE_SYMBOL("vmem_map", vmem_map);
1338
WRITE_SYMBOL("mem_section", mem_section);
1339
WRITE_SYMBOL("pkmap_count", pkmap_count);
1340
WRITE_SYMBOL("pkmap_count_next", pkmap_count_next);
1341
WRITE_SYMBOL("system_utsname", system_utsname);
1342
WRITE_SYMBOL("init_uts_ns", init_uts_ns);
1343
WRITE_SYMBOL("_stext", _stext);
1344
WRITE_SYMBOL("swapper_pg_dir", swapper_pg_dir);
1345
WRITE_SYMBOL("init_level4_pgt", init_level4_pgt);
1346
WRITE_SYMBOL("vmlist", vmlist);
1347
WRITE_SYMBOL("phys_base", phys_base);
1348
WRITE_SYMBOL("node_online_map", node_online_map);
1349
WRITE_SYMBOL("node_states", node_states);
1350
WRITE_SYMBOL("node_data", node_data);
1351
WRITE_SYMBOL("pgdat_list", pgdat_list);
1352
WRITE_SYMBOL("contig_page_data", contig_page_data);
1353
WRITE_SYMBOL("log_buf", log_buf);
1354
WRITE_SYMBOL("log_buf_len", log_buf_len);
1355
WRITE_SYMBOL("log_end", log_end);
1356
WRITE_SYMBOL("max_pfn", max_pfn);
1357
WRITE_SYMBOL("high_memory", high_memory);
1358
WRITE_SYMBOL("node_remap_start_vaddr", node_remap_start_vaddr);
1359
WRITE_SYMBOL("node_remap_end_vaddr", node_remap_end_vaddr);
1360
WRITE_SYMBOL("node_remap_start_pfn", node_remap_start_pfn);
1363
* write the structure size of 1st kernel
1365
WRITE_STRUCTURE_SIZE("page", page);
1366
WRITE_STRUCTURE_SIZE("mem_section", mem_section);
1367
WRITE_STRUCTURE_SIZE("pglist_data", pglist_data);
1368
WRITE_STRUCTURE_SIZE("zone", zone);
1369
WRITE_STRUCTURE_SIZE("free_area", free_area);
1370
WRITE_STRUCTURE_SIZE("list_head", list_head);
1371
WRITE_STRUCTURE_SIZE("node_memblk_s", node_memblk_s);
1372
WRITE_STRUCTURE_SIZE("nodemask_t", nodemask_t);
1373
WRITE_STRUCTURE_SIZE("pageflags", pageflags);
1376
* write the member offset of 1st kernel
1378
WRITE_MEMBER_OFFSET("page.flags", page.flags);
1379
WRITE_MEMBER_OFFSET("page._count", page._count);
1380
WRITE_MEMBER_OFFSET("page.mapping", page.mapping);
1381
WRITE_MEMBER_OFFSET("page.lru", page.lru);
1382
WRITE_MEMBER_OFFSET("page._mapcount", page._mapcount);
1383
WRITE_MEMBER_OFFSET("page.private", page.private);
1384
WRITE_MEMBER_OFFSET("mem_section.section_mem_map",
1385
mem_section.section_mem_map);
1386
WRITE_MEMBER_OFFSET("pglist_data.node_zones", pglist_data.node_zones);
1387
WRITE_MEMBER_OFFSET("pglist_data.nr_zones", pglist_data.nr_zones);
1388
WRITE_MEMBER_OFFSET("pglist_data.node_mem_map",
1389
pglist_data.node_mem_map);
1390
WRITE_MEMBER_OFFSET("pglist_data.node_start_pfn",
1391
pglist_data.node_start_pfn);
1392
WRITE_MEMBER_OFFSET("pglist_data.node_spanned_pages",
1393
pglist_data.node_spanned_pages);
1394
WRITE_MEMBER_OFFSET("pglist_data.pgdat_next", pglist_data.pgdat_next);
1395
WRITE_MEMBER_OFFSET("zone.free_pages", zone.free_pages);
1396
WRITE_MEMBER_OFFSET("zone.free_area", zone.free_area);
1397
WRITE_MEMBER_OFFSET("zone.vm_stat", zone.vm_stat);
1398
WRITE_MEMBER_OFFSET("zone.spanned_pages", zone.spanned_pages);
1399
WRITE_MEMBER_OFFSET("free_area.free_list", free_area.free_list);
1400
WRITE_MEMBER_OFFSET("list_head.next", list_head.next);
1401
WRITE_MEMBER_OFFSET("list_head.prev", list_head.prev);
1402
WRITE_MEMBER_OFFSET("node_memblk_s.start_paddr", node_memblk_s.start_paddr);
1403
WRITE_MEMBER_OFFSET("node_memblk_s.size", node_memblk_s.size);
1404
WRITE_MEMBER_OFFSET("node_memblk_s.nid", node_memblk_s.nid);
1405
WRITE_MEMBER_OFFSET("vm_struct.addr", vm_struct.addr);
1407
if (SYMBOL(node_data) != NOT_FOUND_SYMBOL)
1408
WRITE_ARRAY_LENGTH("node_data", node_data);
1409
if (SYMBOL(pgdat_list) != NOT_FOUND_SYMBOL)
1410
WRITE_ARRAY_LENGTH("pgdat_list", pgdat_list);
1411
if (SYMBOL(mem_section) != NOT_FOUND_SYMBOL)
1412
WRITE_ARRAY_LENGTH("mem_section", mem_section);
1413
if (SYMBOL(node_memblk) != NOT_FOUND_SYMBOL)
1414
WRITE_ARRAY_LENGTH("node_memblk", node_memblk);
1415
if (SYMBOL(node_remap_start_pfn) != NOT_FOUND_SYMBOL)
1416
WRITE_ARRAY_LENGTH("node_remap_start_pfn",
1417
node_remap_start_pfn);
1419
WRITE_ARRAY_LENGTH("zone.free_area", zone.free_area);
1420
WRITE_ARRAY_LENGTH("free_area.free_list", free_area.free_list);
1422
WRITE_NUMBER("NR_FREE_PAGES", NR_FREE_PAGES);
1423
WRITE_NUMBER("N_ONLINE", N_ONLINE);
1425
WRITE_NUMBER("PG_lru", PG_lru);
1426
WRITE_NUMBER("PG_private", PG_private);
1427
WRITE_NUMBER("PG_swapcache", PG_swapcache);
1428
WRITE_NUMBER("PG_buddy", PG_buddy);
1429
WRITE_NUMBER("PG_slab", PG_slab);
1431
WRITE_NUMBER("PAGE_BUDDY_MAPCOUNT_VALUE", PAGE_BUDDY_MAPCOUNT_VALUE);
1434
* write the source file of 1st kernel
1436
WRITE_SRCFILE("pud_t", pud_t);
1440
generate_vmcoreinfo(void)
1442
if (!set_page_size(sysconf(_SC_PAGE_SIZE)))
1445
set_dwarf_debuginfo("vmlinux", NULL,
1446
info->name_vmlinux, info->fd_vmlinux);
1448
if (!get_symbol_info())
1451
if (!get_structure_info())
1454
if (!get_srcfile_info())
1457
if ((SYMBOL(system_utsname) == NOT_FOUND_SYMBOL)
1458
&& (SYMBOL(init_uts_ns) == NOT_FOUND_SYMBOL)) {
1459
ERRMSG("Can't get the symbol of system_utsname.\n");
1462
if (!get_str_osrelease_from_vmlinux())
1465
if (!(info->kernel_version = get_kernel_version(info->release)))
1468
if (get_mem_type() == NOT_FOUND_MEMTYPE) {
1469
ERRMSG("Can't find the memory type.\n");
1473
write_vmcoreinfo_data();
1479
read_vmcoreinfo_basic_info(void)
1482
long page_size = FALSE;
1483
char buf[BUFSIZE_FGETS], *endp;
1484
unsigned int get_release = FALSE, i;
1486
if (fseek(info->file_vmcoreinfo, 0, SEEK_SET) < 0) {
1487
ERRMSG("Can't seek the vmcoreinfo file(%s). %s\n",
1488
info->name_vmcoreinfo, strerror(errno));
1492
while (fgets(buf, BUFSIZE_FGETS, info->file_vmcoreinfo)) {
1496
if (buf[i - 1] == '\n')
1498
if (strncmp(buf, STR_OSRELEASE, strlen(STR_OSRELEASE)) == 0) {
1500
/* if the release have been stored, skip this time. */
1501
if (strlen(info->release))
1503
strcpy(info->release, buf + strlen(STR_OSRELEASE));
1505
if (strncmp(buf, STR_PAGESIZE, strlen(STR_PAGESIZE)) == 0) {
1506
page_size = strtol(buf+strlen(STR_PAGESIZE),&endp,10);
1507
if ((!page_size || page_size == LONG_MAX)
1508
|| strlen(endp) != 0) {
1509
ERRMSG("Invalid data in %s: %s",
1510
info->name_vmcoreinfo, buf);
1513
if (!set_page_size(page_size)) {
1514
ERRMSG("Invalid data in %s: %s",
1515
info->name_vmcoreinfo, buf);
1519
if (strncmp(buf, STR_CRASHTIME, strlen(STR_CRASHTIME)) == 0) {
1520
tv_sec = strtol(buf+strlen(STR_CRASHTIME),&endp,10);
1521
if ((!tv_sec || tv_sec == LONG_MAX)
1522
|| strlen(endp) != 0) {
1523
ERRMSG("Invalid data in %s: %s",
1524
info->name_vmcoreinfo, buf);
1527
info->timestamp.tv_sec = tv_sec;
1529
if (strncmp(buf, STR_CONFIG_X86_PAE,
1530
strlen(STR_CONFIG_X86_PAE)) == 0)
1531
vt.mem_flags |= MEMORY_X86_PAE;
1533
if (strncmp(buf, STR_CONFIG_PGTABLE_3,
1534
strlen(STR_CONFIG_PGTABLE_3)) == 0)
1535
vt.mem_flags |= MEMORY_PAGETABLE_3L;
1537
if (strncmp(buf, STR_CONFIG_PGTABLE_4,
1538
strlen(STR_CONFIG_PGTABLE_4)) == 0)
1539
vt.mem_flags |= MEMORY_PAGETABLE_4L;
1541
if (!get_release || !info->page_size) {
1542
ERRMSG("Invalid format in %s", info->name_vmcoreinfo);
1549
read_vmcoreinfo_symbol(char *str_symbol)
1551
unsigned long symbol = NOT_FOUND_SYMBOL;
1552
char buf[BUFSIZE_FGETS], *endp;
1555
if (fseek(info->file_vmcoreinfo, 0, SEEK_SET) < 0) {
1556
ERRMSG("Can't seek the vmcoreinfo file(%s). %s\n",
1557
info->name_vmcoreinfo, strerror(errno));
1558
return INVALID_SYMBOL_DATA;
1561
while (fgets(buf, BUFSIZE_FGETS, info->file_vmcoreinfo)) {
1565
if (buf[i - 1] == '\n')
1567
if (strncmp(buf, str_symbol, strlen(str_symbol)) == 0) {
1568
symbol = strtoul(buf + strlen(str_symbol), &endp, 16);
1569
if ((!symbol || symbol == ULONG_MAX)
1570
|| strlen(endp) != 0) {
1571
ERRMSG("Invalid data in %s: %s",
1572
info->name_vmcoreinfo, buf);
1573
return INVALID_SYMBOL_DATA;
1582
read_vmcoreinfo_long(char *str_structure)
1584
long data = NOT_FOUND_LONG_VALUE;
1585
char buf[BUFSIZE_FGETS], *endp;
1588
if (fseek(info->file_vmcoreinfo, 0, SEEK_SET) < 0) {
1589
ERRMSG("Can't seek the vmcoreinfo file(%s). %s\n",
1590
info->name_vmcoreinfo, strerror(errno));
1591
return INVALID_STRUCTURE_DATA;
1594
while (fgets(buf, BUFSIZE_FGETS, info->file_vmcoreinfo)) {
1598
if (buf[i - 1] == '\n')
1600
if (strncmp(buf, str_structure, strlen(str_structure)) == 0) {
1601
data = strtol(buf + strlen(str_structure), &endp, 10);
1602
if ((data == LONG_MAX) || strlen(endp) != 0) {
1603
ERRMSG("Invalid data in %s: %s",
1604
info->name_vmcoreinfo, buf);
1605
return INVALID_STRUCTURE_DATA;
1614
read_vmcoreinfo_string(char *str_in, char *str_out)
1616
char buf[BUFSIZE_FGETS];
1619
if (fseek(info->file_vmcoreinfo, 0, SEEK_SET) < 0) {
1620
ERRMSG("Can't seek the vmcoreinfo file(%s). %s\n",
1621
info->name_vmcoreinfo, strerror(errno));
1625
while (fgets(buf, BUFSIZE_FGETS, info->file_vmcoreinfo)) {
1629
if (buf[i - 1] == '\n')
1631
if (strncmp(buf, str_in, strlen(str_in)) == 0) {
1632
strncpy(str_out, buf + strlen(str_in), LEN_SRCFILE - strlen(str_in));
1640
read_vmcoreinfo(void)
1642
if (!read_vmcoreinfo_basic_info())
1645
READ_SYMBOL("mem_map", mem_map);
1646
READ_SYMBOL("vmem_map", vmem_map);
1647
READ_SYMBOL("mem_section", mem_section);
1648
READ_SYMBOL("pkmap_count", pkmap_count);
1649
READ_SYMBOL("pkmap_count_next", pkmap_count_next);
1650
READ_SYMBOL("system_utsname", system_utsname);
1651
READ_SYMBOL("init_uts_ns", init_uts_ns);
1652
READ_SYMBOL("_stext", _stext);
1653
READ_SYMBOL("swapper_pg_dir", swapper_pg_dir);
1654
READ_SYMBOL("init_level4_pgt", init_level4_pgt);
1655
READ_SYMBOL("vmlist", vmlist);
1656
READ_SYMBOL("phys_base", phys_base);
1657
READ_SYMBOL("node_online_map", node_online_map);
1658
READ_SYMBOL("node_states", node_states);
1659
READ_SYMBOL("node_data", node_data);
1660
READ_SYMBOL("pgdat_list", pgdat_list);
1661
READ_SYMBOL("contig_page_data", contig_page_data);
1662
READ_SYMBOL("log_buf", log_buf);
1663
READ_SYMBOL("log_buf_len", log_buf_len);
1664
READ_SYMBOL("log_end", log_end);
1665
READ_SYMBOL("max_pfn", max_pfn);
1666
READ_SYMBOL("high_memory", high_memory);
1667
READ_SYMBOL("node_remap_start_vaddr", node_remap_start_vaddr);
1668
READ_SYMBOL("node_remap_end_vaddr", node_remap_end_vaddr);
1669
READ_SYMBOL("node_remap_start_pfn", node_remap_start_pfn);
1671
READ_STRUCTURE_SIZE("page", page);
1672
READ_STRUCTURE_SIZE("mem_section", mem_section);
1673
READ_STRUCTURE_SIZE("pglist_data", pglist_data);
1674
READ_STRUCTURE_SIZE("zone", zone);
1675
READ_STRUCTURE_SIZE("free_area", free_area);
1676
READ_STRUCTURE_SIZE("list_head", list_head);
1677
READ_STRUCTURE_SIZE("node_memblk_s", node_memblk_s);
1678
READ_STRUCTURE_SIZE("nodemask_t", nodemask_t);
1679
READ_STRUCTURE_SIZE("pageflags", pageflags);
1681
READ_MEMBER_OFFSET("page.flags", page.flags);
1682
READ_MEMBER_OFFSET("page._count", page._count);
1683
READ_MEMBER_OFFSET("page.mapping", page.mapping);
1684
READ_MEMBER_OFFSET("page.lru", page.lru);
1685
READ_MEMBER_OFFSET("page._mapcount", page._mapcount);
1686
READ_MEMBER_OFFSET("page.private", page.private);
1687
READ_MEMBER_OFFSET("mem_section.section_mem_map",
1688
mem_section.section_mem_map);
1689
READ_MEMBER_OFFSET("pglist_data.node_zones", pglist_data.node_zones);
1690
READ_MEMBER_OFFSET("pglist_data.nr_zones", pglist_data.nr_zones);
1691
READ_MEMBER_OFFSET("pglist_data.node_mem_map",pglist_data.node_mem_map);
1692
READ_MEMBER_OFFSET("pglist_data.node_start_pfn",
1693
pglist_data.node_start_pfn);
1694
READ_MEMBER_OFFSET("pglist_data.node_spanned_pages",
1695
pglist_data.node_spanned_pages);
1696
READ_MEMBER_OFFSET("pglist_data.pgdat_next", pglist_data.pgdat_next);
1697
READ_MEMBER_OFFSET("zone.free_pages", zone.free_pages);
1698
READ_MEMBER_OFFSET("zone.free_area", zone.free_area);
1699
READ_MEMBER_OFFSET("zone.vm_stat", zone.vm_stat);
1700
READ_MEMBER_OFFSET("zone.spanned_pages", zone.spanned_pages);
1701
READ_MEMBER_OFFSET("free_area.free_list", free_area.free_list);
1702
READ_MEMBER_OFFSET("list_head.next", list_head.next);
1703
READ_MEMBER_OFFSET("list_head.prev", list_head.prev);
1704
READ_MEMBER_OFFSET("node_memblk_s.start_paddr", node_memblk_s.start_paddr);
1705
READ_MEMBER_OFFSET("node_memblk_s.size", node_memblk_s.size);
1706
READ_MEMBER_OFFSET("node_memblk_s.nid", node_memblk_s.nid);
1707
READ_MEMBER_OFFSET("vm_struct.addr", vm_struct.addr);
1709
READ_ARRAY_LENGTH("node_data", node_data);
1710
READ_ARRAY_LENGTH("pgdat_list", pgdat_list);
1711
READ_ARRAY_LENGTH("mem_section", mem_section);
1712
READ_ARRAY_LENGTH("node_memblk", node_memblk);
1713
READ_ARRAY_LENGTH("zone.free_area", zone.free_area);
1714
READ_ARRAY_LENGTH("free_area.free_list", free_area.free_list);
1715
READ_ARRAY_LENGTH("node_remap_start_pfn", node_remap_start_pfn);
1717
READ_NUMBER("NR_FREE_PAGES", NR_FREE_PAGES);
1718
READ_NUMBER("N_ONLINE", N_ONLINE);
1720
READ_NUMBER("PG_lru", PG_lru);
1721
READ_NUMBER("PG_private", PG_private);
1722
READ_NUMBER("PG_swapcache", PG_swapcache);
1723
READ_NUMBER("PG_slab", PG_slab);
1724
READ_NUMBER("PG_buddy", PG_buddy);
1725
READ_SRCFILE("pud_t", pud_t);
1727
READ_NUMBER("PAGE_BUDDY_MAPCOUNT_VALUE", PAGE_BUDDY_MAPCOUNT_VALUE);
1733
* Extract vmcoreinfo from /proc/vmcore and output it to /tmp/vmcoreinfo.tmp.
1736
copy_vmcoreinfo(off_t offset, unsigned long size)
1739
char buf[VMCOREINFO_BYTES];
1740
const off_t failed = (off_t)-1;
1742
if (!offset || !size)
1745
if ((fd = mkstemp(info->name_vmcoreinfo)) < 0) {
1746
ERRMSG("Can't open the vmcoreinfo file(%s). %s\n",
1747
info->name_vmcoreinfo, strerror(errno));
1750
if (lseek(info->fd_memory, offset, SEEK_SET) == failed) {
1751
ERRMSG("Can't seek the dump memory(%s). %s\n",
1752
info->name_memory, strerror(errno));
1755
if (read(info->fd_memory, &buf, size) != size) {
1756
ERRMSG("Can't read the dump memory(%s). %s\n",
1757
info->name_memory, strerror(errno));
1760
if (write(fd, &buf, size) != size) {
1761
ERRMSG("Can't write the vmcoreinfo file(%s). %s\n",
1762
info->name_vmcoreinfo, strerror(errno));
1765
if (close(fd) < 0) {
1766
ERRMSG("Can't close the vmcoreinfo file(%s). %s\n",
1767
info->name_vmcoreinfo, strerror(errno));
1774
read_vmcoreinfo_from_vmcore(off_t offset, unsigned long size, int flag_xen_hv)
1779
* Copy vmcoreinfo to /tmp/vmcoreinfoXXXXXX.
1781
if (!(info->name_vmcoreinfo = strdup(FILENAME_VMCOREINFO))) {
1782
MSG("Can't duplicate strings(%s).\n", FILENAME_VMCOREINFO);
1785
if (!copy_vmcoreinfo(offset, size))
1789
* Read vmcoreinfo from /tmp/vmcoreinfoXXXXXX.
1791
if (!open_vmcoreinfo("r"))
1794
unlink(info->name_vmcoreinfo);
1797
if (!read_vmcoreinfo_xen())
1800
if (!read_vmcoreinfo())
1807
free(info->name_vmcoreinfo);
1808
info->name_vmcoreinfo = NULL;
1814
* Get the number of online nodes.
1817
get_nodes_online(void)
1819
int len, i, j, online;
1820
unsigned long node_online_map = 0, bitbuf, *maskptr;
1822
if ((SYMBOL(node_online_map) == NOT_FOUND_SYMBOL)
1823
&& (SYMBOL(node_states) == NOT_FOUND_SYMBOL))
1826
if (SIZE(nodemask_t) == NOT_FOUND_STRUCTURE) {
1827
ERRMSG("Can't get the size of nodemask_t.\n");
1831
len = SIZE(nodemask_t);
1832
vt.node_online_map_len = len/sizeof(unsigned long);
1833
if (!(vt.node_online_map = (unsigned long *)malloc(len))) {
1834
ERRMSG("Can't allocate memory for the node online map. %s\n",
1838
if (SYMBOL(node_online_map) != NOT_FOUND_SYMBOL) {
1839
node_online_map = SYMBOL(node_online_map);
1840
} else if (SYMBOL(node_states) != NOT_FOUND_SYMBOL) {
1842
* For linux-2.6.23-rc4-mm1
1844
node_online_map = SYMBOL(node_states)
1845
+ (SIZE(nodemask_t) * NUMBER(N_ONLINE));
1847
if (!readmem(VADDR, node_online_map, vt.node_online_map, len)){
1848
ERRMSG("Can't get the node online map.\n");
1852
maskptr = (unsigned long *)vt.node_online_map;
1853
for (i = 0; i < vt.node_online_map_len; i++, maskptr++) {
1855
for (j = 0; j < sizeof(bitbuf) * 8; j++) {
1856
online += bitbuf & 1;
1857
bitbuf = bitbuf >> 1;
1866
if (!(vt.numnodes = get_nodes_online())) {
1870
DEBUG_MSG("num of NODEs : %d\n", vt.numnodes);
1877
next_online_node(int first)
1880
unsigned long mask, *maskptr;
1882
/* It cannot occur */
1883
if ((first/(sizeof(unsigned long) * 8)) >= vt.node_online_map_len) {
1884
ERRMSG("next_online_node: %d is too large!\n", first);
1888
maskptr = (unsigned long *)vt.node_online_map;
1889
for (i = node = 0; i < vt.node_online_map_len; i++, maskptr++) {
1891
for (j = 0; j < (sizeof(unsigned long) * 8); j++, node++) {
1903
next_online_pgdat(int node)
1906
unsigned long pgdat;
1909
* Get the pglist_data structure from symbol "node_data".
1910
* The array number of symbol "node_data" cannot be gotten
1911
* from vmlinux. Instead, check it is DW_TAG_array_type.
1913
if ((SYMBOL(node_data) == NOT_FOUND_SYMBOL)
1914
|| (ARRAY_LENGTH(node_data) == NOT_FOUND_STRUCTURE))
1917
if (!readmem(VADDR, SYMBOL(node_data) + (node * sizeof(void *)),
1918
&pgdat, sizeof pgdat))
1921
if (!is_kvaddr(pgdat))
1928
* Get the pglist_data structure from symbol "pgdat_list".
1930
if (SYMBOL(pgdat_list) == NOT_FOUND_SYMBOL)
1934
&& (ARRAY_LENGTH(pgdat_list) == NOT_FOUND_STRUCTURE))
1937
else if ((ARRAY_LENGTH(pgdat_list) != NOT_FOUND_STRUCTURE)
1938
&& (ARRAY_LENGTH(pgdat_list) < node))
1941
if (!readmem(VADDR, SYMBOL(pgdat_list) + (node * sizeof(void *)),
1942
&pgdat, sizeof pgdat))
1945
if (!is_kvaddr(pgdat))
1952
* linux-2.6.16 or former
1954
if ((SYMBOL(pgdat_list) == NOT_FOUND_SYMBOL)
1955
|| (OFFSET(pglist_data.pgdat_next) == NOT_FOUND_STRUCTURE))
1958
if (!readmem(VADDR, SYMBOL(pgdat_list), &pgdat, sizeof pgdat))
1961
if (!is_kvaddr(pgdat))
1967
for (i = 1; i <= node; i++) {
1968
if (!readmem(VADDR, pgdat+OFFSET(pglist_data.pgdat_next),
1969
&pgdat, sizeof pgdat))
1972
if (!is_kvaddr(pgdat))
1979
* Get the pglist_data structure from symbol "contig_page_data".
1981
if (SYMBOL(contig_page_data) == NOT_FOUND_SYMBOL)
1987
return SYMBOL(contig_page_data);
1991
dump_mem_map(unsigned long long pfn_start,
1992
unsigned long long pfn_end, unsigned long mem_map, int num_mm)
1994
struct mem_map_data *mmd;
1996
mmd = &info->mem_map_data[num_mm];
1997
mmd->pfn_start = pfn_start;
1998
mmd->pfn_end = pfn_end;
1999
mmd->mem_map = mem_map;
2001
DEBUG_MSG("mem_map (%d)\n", num_mm);
2002
DEBUG_MSG(" mem_map : %lx\n", mem_map);
2003
DEBUG_MSG(" pfn_start : %llx\n", pfn_start);
2004
DEBUG_MSG(" pfn_end : %llx\n", pfn_end);
2010
get_mm_flatmem(void)
2012
unsigned long mem_map;
2015
* Get the address of the symbol "mem_map".
2017
if (!readmem(VADDR, SYMBOL(mem_map), &mem_map, sizeof mem_map)
2019
ERRMSG("Can't get the address of mem_map.\n");
2022
info->num_mem_map = 1;
2023
if ((info->mem_map_data = (struct mem_map_data *)
2024
malloc(sizeof(struct mem_map_data)*info->num_mem_map)) == NULL) {
2025
ERRMSG("Can't allocate memory for the mem_map_data. %s\n",
2029
if (is_xen_memory())
2030
dump_mem_map(0, info->dom0_mapnr, mem_map, 0);
2032
dump_mem_map(0, info->max_mapnr, mem_map, 0);
2038
get_node_memblk(int num_memblk,
2039
unsigned long *start_paddr, unsigned long *size, int *nid)
2041
unsigned long node_memblk;
2043
if (ARRAY_LENGTH(node_memblk) <= num_memblk) {
2044
ERRMSG("Invalid num_memblk.\n");
2047
node_memblk = SYMBOL(node_memblk) + SIZE(node_memblk_s) * num_memblk;
2048
if (!readmem(VADDR, node_memblk+OFFSET(node_memblk_s.start_paddr),
2049
start_paddr, sizeof(unsigned long))) {
2050
ERRMSG("Can't get node_memblk_s.start_paddr.\n");
2053
if (!readmem(VADDR, node_memblk + OFFSET(node_memblk_s.size),
2054
size, sizeof(unsigned long))) {
2055
ERRMSG("Can't get node_memblk_s.size.\n");
2058
if (!readmem(VADDR, node_memblk + OFFSET(node_memblk_s.nid),
2059
nid, sizeof(int))) {
2060
ERRMSG("Can't get node_memblk_s.nid.\n");
2067
get_num_mm_discontigmem(void)
2070
unsigned long start_paddr, size;
2072
if ((SYMBOL(node_memblk) == NOT_FOUND_SYMBOL)
2073
|| (ARRAY_LENGTH(node_memblk) == NOT_FOUND_STRUCTURE)
2074
|| (SIZE(node_memblk_s) == NOT_FOUND_STRUCTURE)
2075
|| (OFFSET(node_memblk_s.start_paddr) == NOT_FOUND_STRUCTURE)
2076
|| (OFFSET(node_memblk_s.size) == NOT_FOUND_STRUCTURE)
2077
|| (OFFSET(node_memblk_s.nid) == NOT_FOUND_STRUCTURE)) {
2080
for (i = 0; i < ARRAY_LENGTH(node_memblk); i++) {
2081
if (!get_node_memblk(i, &start_paddr, &size, &nid)) {
2082
ERRMSG("Can't get the node_memblk (%d)\n", i);
2085
if (!start_paddr && !size &&!nid)
2088
DEBUG_MSG("nid : %d\n", nid);
2089
DEBUG_MSG(" start_paddr: %lx\n", start_paddr);
2090
DEBUG_MSG(" size : %lx\n", size);
2094
* On non-NUMA systems, node_memblk_s is not set.
2104
separate_mem_map(struct mem_map_data *mmd, int *id_mm, int nid_pgdat,
2105
unsigned long mem_map_pgdat, unsigned long pfn_start_pgdat)
2108
unsigned long start_paddr, size, pfn_start, pfn_end, mem_map;
2110
for (i = 0; i < ARRAY_LENGTH(node_memblk); i++) {
2111
if (!get_node_memblk(i, &start_paddr, &size, &nid)) {
2112
ERRMSG("Can't get the node_memblk (%d)\n", i);
2115
if (!start_paddr && !size && !nid)
2119
* Check pglist_data.node_id and node_memblk_s.nid match.
2121
if (nid_pgdat != nid)
2124
pfn_start = paddr_to_pfn(start_paddr);
2125
pfn_end = paddr_to_pfn(start_paddr + size);
2127
if (pfn_start < pfn_start_pgdat) {
2128
ERRMSG("node_memblk_s.start_paddr of node (%d) is invalid.\n", nid);
2131
if (info->max_mapnr < pfn_end) {
2132
DEBUG_MSG("pfn_end of node (%d) is over max_mapnr.\n",
2134
DEBUG_MSG(" pfn_start: %lx\n", pfn_start);
2135
DEBUG_MSG(" pfn_end : %lx\n", pfn_end);
2136
DEBUG_MSG(" max_mapnr: %llx\n", info->max_mapnr);
2138
pfn_end = info->max_mapnr;
2141
mem_map = mem_map_pgdat+SIZE(page)*(pfn_start-pfn_start_pgdat);
2143
mmd->pfn_start = pfn_start;
2144
mmd->pfn_end = pfn_end;
2145
mmd->mem_map = mem_map;
2154
get_mm_discontigmem(void)
2156
int i, j, id_mm, node, num_mem_map, separate_mm = FALSE;
2157
unsigned long pgdat, mem_map, pfn_start, pfn_end, node_spanned_pages;
2158
unsigned long vmem_map;
2159
struct mem_map_data temp_mmd;
2161
num_mem_map = get_num_mm_discontigmem();
2162
if (num_mem_map < vt.numnodes) {
2163
ERRMSG("Can't get the number of mem_map.\n");
2166
struct mem_map_data mmd[num_mem_map];
2167
if (vt.numnodes < num_mem_map) {
2173
* This note is only for ia64 discontigmem kernel.
2174
* It is better to take mem_map information from a symbol vmem_map
2175
* instead of pglist_data.node_mem_map, because some node_mem_map
2176
* sometimes does not have mem_map information corresponding to its
2179
if (SYMBOL(vmem_map) != NOT_FOUND_SYMBOL) {
2180
if (!readmem(VADDR, SYMBOL(vmem_map), &vmem_map, sizeof vmem_map)) {
2181
ERRMSG("Can't get vmem_map.\n");
2187
* Get the first node_id.
2189
if ((node = next_online_node(0)) < 0) {
2190
ERRMSG("Can't get next online node.\n");
2193
if (!(pgdat = next_online_pgdat(node))) {
2194
ERRMSG("Can't get pgdat list.\n");
2198
for (i = 0; i < vt.numnodes; i++) {
2199
if (!readmem(VADDR, pgdat + OFFSET(pglist_data.node_start_pfn),
2200
&pfn_start, sizeof pfn_start)) {
2201
ERRMSG("Can't get node_start_pfn.\n");
2204
if (!readmem(VADDR,pgdat+OFFSET(pglist_data.node_spanned_pages),
2205
&node_spanned_pages, sizeof node_spanned_pages)) {
2206
ERRMSG("Can't get node_spanned_pages.\n");
2209
pfn_end = pfn_start + node_spanned_pages;
2211
if (SYMBOL(vmem_map) == NOT_FOUND_SYMBOL) {
2212
if (!readmem(VADDR, pgdat + OFFSET(pglist_data.node_mem_map),
2213
&mem_map, sizeof mem_map)) {
2214
ERRMSG("Can't get mem_map.\n");
2218
mem_map = vmem_map + (SIZE(page) * pfn_start);
2222
* For some ia64 NUMA systems.
2223
* On some systems, a node has the separated memory.
2224
* And pglist_data(s) have the duplicated memory range
2227
* Nid: Physical address
2228
* 0 : 0x1000000000 - 0x2000000000
2229
* 1 : 0x2000000000 - 0x3000000000
2230
* 2 : 0x0000000000 - 0x6020000000 <- Overlapping
2231
* 3 : 0x3000000000 - 0x4000000000
2232
* 4 : 0x4000000000 - 0x5000000000
2233
* 5 : 0x5000000000 - 0x6000000000
2235
* Then, mem_map(s) should be separated by
2236
* node_memblk_s info.
2238
if (!separate_mem_map(&mmd[id_mm], &id_mm, node,
2239
mem_map, pfn_start)) {
2240
ERRMSG("Can't separate mem_map.\n");
2244
if (info->max_mapnr < pfn_end) {
2245
DEBUG_MSG("pfn_end of node (%d) is over max_mapnr.\n",
2247
DEBUG_MSG(" pfn_start: %lx\n", pfn_start);
2248
DEBUG_MSG(" pfn_end : %lx\n", pfn_end);
2249
DEBUG_MSG(" max_mapnr: %llx\n", info->max_mapnr);
2251
pfn_end = info->max_mapnr;
2255
* The number of mem_map is the same as the number
2258
mmd[id_mm].pfn_start = pfn_start;
2259
mmd[id_mm].pfn_end = pfn_end;
2260
mmd[id_mm].mem_map = mem_map;
2265
* Get pglist_data of the next node.
2267
if (i < (vt.numnodes - 1)) {
2268
if ((node = next_online_node(node + 1)) < 0) {
2269
ERRMSG("Can't get next online node.\n");
2271
} else if (!(pgdat = next_online_pgdat(node))) {
2272
ERRMSG("Can't determine pgdat list (node %d).\n",
2280
* Sort mem_map by pfn_start.
2282
for (i = 0; i < (num_mem_map - 1); i++) {
2283
for (j = i + 1; j < num_mem_map; j++) {
2284
if (mmd[j].pfn_start < mmd[i].pfn_start) {
2293
* Calculate the number of mem_map.
2295
info->num_mem_map = num_mem_map;
2296
if (mmd[0].pfn_start != 0)
2297
info->num_mem_map++;
2299
for (i = 0; i < num_mem_map - 1; i++) {
2300
if (mmd[i].pfn_end > mmd[i + 1].pfn_start) {
2301
ERRMSG("The mem_map is overlapped with the next one.\n");
2302
ERRMSG("mmd[%d].pfn_end = %llx\n", i, mmd[i].pfn_end);
2303
ERRMSG("mmd[%d].pfn_start = %llx\n", i + 1, mmd[i + 1].pfn_start);
2305
} else if (mmd[i].pfn_end == mmd[i + 1].pfn_start)
2307
* Continuous mem_map
2312
* Discontinuous mem_map
2314
info->num_mem_map++;
2316
if (mmd[num_mem_map - 1].pfn_end < info->max_mapnr)
2317
info->num_mem_map++;
2319
if ((info->mem_map_data = (struct mem_map_data *)
2320
malloc(sizeof(struct mem_map_data)*info->num_mem_map)) == NULL) {
2321
ERRMSG("Can't allocate memory for the mem_map_data. %s\n",
2327
* Create mem_map data.
2330
if (mmd[0].pfn_start != 0) {
2331
dump_mem_map(0, mmd[0].pfn_start, NOT_MEMMAP_ADDR, id_mm);
2334
for (i = 0; i < num_mem_map; i++) {
2335
dump_mem_map(mmd[i].pfn_start, mmd[i].pfn_end,
2336
mmd[i].mem_map, id_mm);
2338
if ((i < num_mem_map - 1)
2339
&& (mmd[i].pfn_end != mmd[i + 1].pfn_start)) {
2340
dump_mem_map(mmd[i].pfn_end, mmd[i +1].pfn_start,
2341
NOT_MEMMAP_ADDR, id_mm);
2345
i = num_mem_map - 1;
2346
if (is_xen_memory()) {
2347
if (mmd[i].pfn_end < info->dom0_mapnr)
2348
dump_mem_map(mmd[i].pfn_end, info->dom0_mapnr,
2349
NOT_MEMMAP_ADDR, id_mm);
2351
if (mmd[i].pfn_end < info->max_mapnr)
2352
dump_mem_map(mmd[i].pfn_end, info->max_mapnr,
2353
NOT_MEMMAP_ADDR, id_mm);
2359
nr_to_section(unsigned long nr, unsigned long *mem_sec)
2363
if (is_sparsemem_extreme())
2364
addr = mem_sec[SECTION_NR_TO_ROOT(nr)] +
2365
(nr & SECTION_ROOT_MASK()) * SIZE(mem_section);
2367
addr = SYMBOL(mem_section) + (nr * SIZE(mem_section));
2369
if (!is_kvaddr(addr))
2376
section_mem_map_addr(unsigned long addr)
2381
if (!is_kvaddr(addr))
2384
if ((mem_section = malloc(SIZE(mem_section))) == NULL) {
2385
ERRMSG("Can't allocate memory for a struct mem_section. %s\n",
2389
if (!readmem(VADDR, addr, mem_section, SIZE(mem_section))) {
2390
ERRMSG("Can't get a struct mem_section(%lx).\n", addr);
2394
map = ULONG(mem_section + OFFSET(mem_section.section_mem_map));
2395
map &= SECTION_MAP_MASK;
2402
sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long section_nr)
2404
if (!is_kvaddr(coded_mem_map))
2407
return coded_mem_map +
2408
(SECTION_NR_TO_PFN(section_nr) * SIZE(page));
2412
get_mm_sparsemem(void)
2414
unsigned int section_nr, mem_section_size, num_section;
2415
unsigned long long pfn_start, pfn_end;
2416
unsigned long section, mem_map;
2417
unsigned long *mem_sec = NULL;
2422
* Get the address of the symbol "mem_section".
2424
num_section = divideup(info->max_mapnr, PAGES_PER_SECTION());
2425
if (is_sparsemem_extreme()) {
2426
info->sections_per_root = _SECTIONS_PER_ROOT_EXTREME();
2427
mem_section_size = sizeof(void *) * NR_SECTION_ROOTS();
2429
info->sections_per_root = _SECTIONS_PER_ROOT();
2430
mem_section_size = SIZE(mem_section) * NR_SECTION_ROOTS();
2432
if ((mem_sec = malloc(mem_section_size)) == NULL) {
2433
ERRMSG("Can't allocate memory for the mem_section. %s\n",
2437
if (!readmem(VADDR, SYMBOL(mem_section), mem_sec,
2438
mem_section_size)) {
2439
ERRMSG("Can't get the address of mem_section.\n");
2442
info->num_mem_map = num_section;
2443
if ((info->mem_map_data = (struct mem_map_data *)
2444
malloc(sizeof(struct mem_map_data)*info->num_mem_map)) == NULL) {
2445
ERRMSG("Can't allocate memory for the mem_map_data. %s\n",
2449
for (section_nr = 0; section_nr < num_section; section_nr++) {
2450
section = nr_to_section(section_nr, mem_sec);
2451
mem_map = section_mem_map_addr(section);
2452
mem_map = sparse_decode_mem_map(mem_map, section_nr);
2453
if (!is_kvaddr(mem_map))
2454
mem_map = NOT_MEMMAP_ADDR;
2455
pfn_start = section_nr * PAGES_PER_SECTION();
2456
pfn_end = pfn_start + PAGES_PER_SECTION();
2457
if (info->max_mapnr < pfn_end)
2458
pfn_end = info->max_mapnr;
2459
dump_mem_map(pfn_start, pfn_end, mem_map, section_nr);
2463
if (mem_sec != NULL)
2470
get_mem_map_without_mm(void)
2472
info->num_mem_map = 1;
2473
if ((info->mem_map_data = (struct mem_map_data *)
2474
malloc(sizeof(struct mem_map_data)*info->num_mem_map)) == NULL) {
2475
ERRMSG("Can't allocate memory for the mem_map_data. %s\n",
2479
if (is_xen_memory())
2480
dump_mem_map(0, info->dom0_mapnr, NOT_MEMMAP_ADDR, 0);
2482
dump_mem_map(0, info->max_mapnr, NOT_MEMMAP_ADDR, 0);
2492
switch (get_mem_type()) {
2495
DEBUG_MSG("Memory type : SPARSEMEM\n");
2497
ret = get_mm_sparsemem();
2501
DEBUG_MSG("Memory type : SPARSEMEM_EX\n");
2503
ret = get_mm_sparsemem();
2507
DEBUG_MSG("Memory type : DISCONTIGMEM\n");
2509
ret = get_mm_discontigmem();
2513
DEBUG_MSG("Memory type : FLATMEM\n");
2515
ret = get_mm_flatmem();
2518
ERRMSG("Can't distinguish the memory type.\n");
2526
initialize_bitmap_memory(void)
2528
struct disk_dump_header *dh;
2529
struct dump_bitmap *bmp;
2530
off_t bitmap_offset;
2531
int bitmap_len, max_sect_len;
2536
dh = info->dh_memory;
2537
block_size = dh->block_size;
2540
= (DISKDUMP_HEADER_BLOCKS + dh->sub_hdr_size) * block_size;
2541
bitmap_len = block_size * dh->bitmap_blocks;
2543
bmp = malloc(sizeof(struct dump_bitmap));
2545
ERRMSG("Can't allocate memory for the memory-bitmap. %s\n",
2549
bmp->fd = info->fd_memory;
2550
bmp->file_name = info->name_memory;
2552
memset(bmp->buf, 0, BUFSIZE_BITMAP);
2553
bmp->offset = bitmap_offset + bitmap_len / 2;
2554
info->bitmap_memory = bmp;
2556
max_sect_len = divideup(dh->max_mapnr, BITMAP_SECT_LEN);
2557
info->valid_pages = calloc(sizeof(ulong), max_sect_len);
2558
if (info->valid_pages == NULL) {
2559
ERRMSG("Can't allocate memory for the valid_pages. %s\n",
2564
for (i = 1, pfn = 0; i < max_sect_len; i++) {
2565
info->valid_pages[i] = info->valid_pages[i - 1];
2566
for (j = 0; j < BITMAP_SECT_LEN; j++, pfn++)
2567
if (is_dumpable(info->bitmap_memory, pfn))
2568
info->valid_pages[i]++;
2579
int debug_info = FALSE;
2581
if (is_xen_memory() && !initial_xen())
2585
if (lzo_init() == LZO_E_OK)
2586
info->flag_lzo_support = TRUE;
2588
if (info->flag_compress == DUMP_DH_COMPRESSED_LZO) {
2589
MSG("'-l' option is disabled, ");
2590
MSG("because this binary doesn't support lzo compression.\n");
2591
MSG("Try `make USELZO=on` when building.\n");
2596
if (info->flag_compress == DUMP_DH_COMPRESSED_SNAPPY) {
2597
MSG("'-p' option is disabled, ");
2598
MSG("because this binary doesn't support snappy "
2600
MSG("Try `make USESNAPPY=on` when building.\n");
2604
if (info->flag_exclude_xen_dom) {
2605
if(info->flag_cyclic) {
2606
info->flag_cyclic = FALSE;
2607
MSG("Switched running mode from cyclic to non-cyclic,\n");
2608
MSG("because the cyclic mode doesn't support Xen.\n");
2611
if (!is_xen_memory()) {
2612
MSG("'-X' option is disable,");
2613
MSG("because %s is not Xen's memory core image.\n", info->name_memory);
2614
MSG("Commandline parameter is invalid.\n");
2615
MSG("Try `makedumpfile --help' for more information.\n");
2620
if (info->flag_refiltering) {
2621
if (info->flag_elf_dumpfile) {
2622
MSG("'-E' option is disable, ");
2623
MSG("because %s is kdump compressed format.\n",
2628
if(info->flag_cyclic) {
2629
info->flag_cyclic = FALSE;
2630
MSG("Switched running mode from cyclic to non-cyclic,\n");
2631
MSG("because the cyclic mode doesn't support refiltering\n");
2632
MSG("kdump compressed format.\n");
2635
info->phys_base = info->kh_memory->phys_base;
2636
info->max_dump_level |= info->kh_memory->dump_level;
2638
if (!initialize_bitmap_memory())
2641
} else if (info->flag_sadump) {
2642
if (info->flag_elf_dumpfile) {
2643
MSG("'-E' option is disable, ");
2644
MSG("because %s is sadump %s format.\n",
2645
info->name_memory, sadump_format_type_name());
2649
if(info->flag_cyclic) {
2650
info->flag_cyclic = FALSE;
2651
MSG("Switched running mode from cyclic to non-cyclic,\n");
2652
MSG("because the cyclic mode doesn't support sadump format.\n");
2655
set_page_size(sadump_page_size());
2657
if (!sadump_initialize_bitmap_memory())
2660
(void) sadump_set_timestamp(&info->timestamp);
2663
* NOTE: phys_base is never saved by sadump and so
2664
* must be computed in some way. We here choose the
2665
* way of looking at linux_banner. See
2666
* sadump_virt_phys_base(). The processing is
2667
* postponed until debug information becomes
2671
} else if (!get_phys_base())
2675
* Get the debug information for analysis from the vmcoreinfo file
2677
if (info->flag_read_vmcoreinfo) {
2678
if (!read_vmcoreinfo())
2683
* Get the debug information for analysis from the kernel file
2685
} else if (info->name_vmlinux) {
2686
set_dwarf_debuginfo("vmlinux", NULL,
2687
info->name_vmlinux, info->fd_vmlinux);
2689
if (!get_symbol_info())
2692
if (!get_structure_info())
2695
if (!get_srcfile_info())
2701
* Check whether /proc/vmcore contains vmcoreinfo,
2702
* and get both the offset and the size.
2704
if (!has_vmcoreinfo()) {
2705
if (info->max_dump_level <= DL_EXCLUDE_ZERO)
2708
MSG("%s doesn't contain vmcoreinfo.\n",
2710
MSG("Specify '-x' option or '-i' option.\n");
2711
MSG("Commandline parameter is invalid.\n");
2712
MSG("Try `makedumpfile --help' for more information.\n");
2718
* Get the debug information from /proc/vmcore.
2719
* NOTE: Don't move this code to the above, because the debugging
2720
* information token by -x/-i option is overwritten by vmcoreinfo
2721
* in /proc/vmcore. vmcoreinfo in /proc/vmcore is more reliable
2722
* than -x/-i option.
2724
if (has_vmcoreinfo()) {
2725
get_vmcoreinfo(&offset, &size);
2726
if (!read_vmcoreinfo_from_vmcore(offset, size, FALSE))
2732
if (!info->page_size) {
2734
* If we cannot get page_size from a vmcoreinfo file,
2735
* fall back to the current kernel page size.
2737
if (!fallback_to_current_page_size())
2740
if (!get_max_mapnr())
2743
if (info->flag_cyclic) {
2744
if (info->bufsize_cyclic == 0) {
2745
if (!calculate_cyclic_buffer_size())
2748
unsigned long long free_memory;
2751
* The buffer size is specified as Kbyte with
2752
* --cyclic-buffer <size> option.
2754
info->bufsize_cyclic <<= 10;
2757
* Truncate the buffer size to free memory size.
2759
free_memory = get_free_memory_size();
2760
if (info->bufsize_cyclic > free_memory) {
2761
MSG("Specified buffer size is larger than free memory.\n");
2762
MSG("The buffer size for the cyclic mode will ");
2763
MSG("be truncated to %lld byte.\n", free_memory);
2765
* bufsize_cyclic is used to allocate 1st and 2nd bitmap,
2766
* so it should be truncated to the half of free_memory.
2768
info->bufsize_cyclic = free_memory / 2;
2772
info->pfn_cyclic = info->bufsize_cyclic * BITPERBYTE;
2775
DEBUG_MSG("Buffer size for the cyclic mode: %ld\n", info->bufsize_cyclic);
2779
if (info->flag_sadump)
2780
(void) sadump_virt_phys_base();
2782
if (!get_machdep_info())
2785
if (info->flag_sadump) {
2788
online_cpus = sadump_num_online_cpus();
2792
set_nr_cpus(online_cpus);
2795
if (!check_release())
2798
if (!get_versiondep_info())
2802
* NOTE: This must be done before refering to
2803
* VMALLOC'ed memory. The first 640kB contains data
2804
* necessary for paging, like PTE. The absence of the
2805
* region affects reading VMALLOC'ed memory such as
2808
if (info->flag_sadump)
2809
sadump_kdump_backup_region_init();
2811
if (!get_numnodes())
2817
if (!info->flag_dmesg && info->flag_sadump &&
2818
sadump_check_debug_info() &&
2819
!sadump_generate_elf_note_from_dumpfile())
2822
if (info->flag_cyclic && info->dump_level & DL_EXCLUDE_FREE)
2823
check_cyclic_buffer_overrun();
2826
if (!get_mem_map_without_mm())
2830
if (is_xen_memory() && !get_dom0_mapnr())
2833
if (!get_value_for_old_linux())
2836
if (info->flag_cyclic && (info->dump_level & DL_EXCLUDE_FREE))
2837
setup_page_is_buddy();
2843
initialize_bitmap(struct dump_bitmap *bitmap)
2845
bitmap->fd = info->fd_bitmap;
2846
bitmap->file_name = info->name_bitmap;
2847
bitmap->no_block = -1;
2848
memset(bitmap->buf, 0, BUFSIZE_BITMAP);
2852
initialize_bitmap_cyclic(char *bitmap)
2854
memset(bitmap, 0, info->bufsize_cyclic);
2858
initialize_1st_bitmap(struct dump_bitmap *bitmap)
2860
initialize_bitmap(bitmap);
2865
initialize_2nd_bitmap(struct dump_bitmap *bitmap)
2867
initialize_bitmap(bitmap);
2868
bitmap->offset = info->len_bitmap / 2;
2872
set_bitmap(struct dump_bitmap *bitmap, unsigned long long pfn,
2876
off_t old_offset, new_offset;
2877
old_offset = bitmap->offset + BUFSIZE_BITMAP * bitmap->no_block;
2878
new_offset = bitmap->offset + BUFSIZE_BITMAP * (pfn / PFN_BUFBITMAP);
2880
if (0 <= bitmap->no_block && old_offset != new_offset) {
2881
if (lseek(bitmap->fd, old_offset, SEEK_SET) < 0 ) {
2882
ERRMSG("Can't seek the bitmap(%s). %s\n",
2883
bitmap->file_name, strerror(errno));
2886
if (write(bitmap->fd, bitmap->buf, BUFSIZE_BITMAP)
2887
!= BUFSIZE_BITMAP) {
2888
ERRMSG("Can't write the bitmap(%s). %s\n",
2889
bitmap->file_name, strerror(errno));
2893
if (old_offset != new_offset) {
2894
if (lseek(bitmap->fd, new_offset, SEEK_SET) < 0 ) {
2895
ERRMSG("Can't seek the bitmap(%s). %s\n",
2896
bitmap->file_name, strerror(errno));
2899
if (read(bitmap->fd, bitmap->buf, BUFSIZE_BITMAP)
2900
!= BUFSIZE_BITMAP) {
2901
ERRMSG("Can't read the bitmap(%s). %s\n",
2902
bitmap->file_name, strerror(errno));
2905
bitmap->no_block = pfn / PFN_BUFBITMAP;
2908
* If val is 0, clear bit on the bitmap.
2910
byte = (pfn%PFN_BUFBITMAP)>>3;
2911
bit = (pfn%PFN_BUFBITMAP) & 7;
2913
bitmap->buf[byte] |= 1<<bit;
2915
bitmap->buf[byte] &= ~(1<<bit);
2921
set_bitmap_cyclic(char *bitmap, unsigned long long pfn, int val)
2925
if (pfn < info->cyclic_start_pfn || info->cyclic_end_pfn <= pfn)
2929
* If val is 0, clear bit on the bitmap.
2931
byte = (pfn - info->cyclic_start_pfn)>>3;
2932
bit = (pfn - info->cyclic_start_pfn) & 7;
2934
bitmap[byte] |= 1<<bit;
2936
bitmap[byte] &= ~(1<<bit);
2942
sync_bitmap(struct dump_bitmap *bitmap)
2945
offset = bitmap->offset + BUFSIZE_BITMAP * bitmap->no_block;
2948
* The bitmap buffer is not dirty, and it is not necessary
2951
if (bitmap->no_block < 0)
2954
if (lseek(bitmap->fd, offset, SEEK_SET) < 0 ) {
2955
ERRMSG("Can't seek the bitmap(%s). %s\n",
2956
bitmap->file_name, strerror(errno));
2959
if (write(bitmap->fd, bitmap->buf, BUFSIZE_BITMAP)
2960
!= BUFSIZE_BITMAP) {
2961
ERRMSG("Can't write the bitmap(%s). %s\n",
2962
bitmap->file_name, strerror(errno));
2969
sync_1st_bitmap(void)
2971
return sync_bitmap(info->bitmap1);
2975
sync_2nd_bitmap(void)
2977
return sync_bitmap(info->bitmap2);
2981
set_bit_on_1st_bitmap(unsigned long long pfn)
2983
if (info->flag_cyclic) {
2984
return set_bitmap_cyclic(info->partial_bitmap1, pfn, 1);
2986
return set_bitmap(info->bitmap1, pfn, 1);
2991
clear_bit_on_1st_bitmap(unsigned long long pfn)
2993
if (info->flag_cyclic) {
2994
return set_bitmap_cyclic(info->partial_bitmap1, pfn, 0);
2996
return set_bitmap(info->bitmap1, pfn, 0);
3001
clear_bit_on_2nd_bitmap(unsigned long long pfn)
3003
if (info->flag_cyclic) {
3004
return set_bitmap_cyclic(info->partial_bitmap2, pfn, 0);
3006
return set_bitmap(info->bitmap2, pfn, 0);
3011
clear_bit_on_2nd_bitmap_for_kernel(unsigned long long pfn)
3013
unsigned long long maddr;
3015
if (is_xen_memory()) {
3016
maddr = ptom_xen(pfn_to_paddr(pfn));
3017
if (maddr == NOT_PADDR) {
3018
ERRMSG("Can't convert a physical address(%llx) to machine address.\n",
3022
pfn = paddr_to_pfn(maddr);
3024
return clear_bit_on_2nd_bitmap(pfn);
3028
is_in_segs(unsigned long long paddr)
3030
if (info->flag_refiltering) {
3031
static struct dump_bitmap bitmap1 = {0};
3033
if (bitmap1.fd == 0)
3034
initialize_1st_bitmap(&bitmap1);
3036
return is_dumpable(&bitmap1, paddr_to_pfn(paddr));
3039
if (paddr_to_offset(paddr))
3046
read_cache(struct cache_data *cd)
3048
const off_t failed = (off_t)-1;
3050
if (lseek(cd->fd, cd->offset, SEEK_SET) == failed) {
3051
ERRMSG("Can't seek the dump file(%s). %s\n",
3052
cd->file_name, strerror(errno));
3055
if (read(cd->fd, cd->buf, cd->cache_size) != cd->cache_size) {
3056
ERRMSG("Can't read the dump file(%s). %s\n",
3057
cd->file_name, strerror(errno));
3060
cd->offset += cd->cache_size;
3069
if (*(char *)&i == 0x12)
3076
write_and_check_space(int fd, void *buf, size_t buf_size, char *file_name)
3078
int status, written_size = 0;
3080
while (written_size < buf_size) {
3081
status = write(fd, buf + written_size,
3082
buf_size - written_size);
3084
written_size += status;
3087
if (errno == ENOSPC)
3088
info->flag_nospace = TRUE;
3089
MSG("\nCan't write the dump file(%s). %s\n",
3090
file_name, strerror(errno));
3097
write_buffer(int fd, off_t offset, void *buf, size_t buf_size, char *file_name)
3099
struct makedumpfile_data_header fdh;
3100
const off_t failed = (off_t)-1;
3102
if (fd == STDOUT_FILENO) {
3104
* Output a header of flattened format instead of
3105
* lseek(). For sending dump data to a different
3106
* architecture, change the values to big endian.
3108
if (is_bigendian()){
3109
fdh.offset = offset;
3110
fdh.buf_size = buf_size;
3112
fdh.offset = bswap_64(offset);
3113
fdh.buf_size = bswap_64(buf_size);
3115
if (!write_and_check_space(fd, &fdh, sizeof(fdh), file_name))
3118
if (lseek(fd, offset, SEEK_SET) == failed) {
3119
ERRMSG("Can't seek the dump file(%s). %s\n",
3120
file_name, strerror(errno));
3124
if (!write_and_check_space(fd, buf, buf_size, file_name))
3131
write_cache(struct cache_data *cd, void *buf, size_t size)
3133
memcpy(cd->buf + cd->buf_size, buf, size);
3134
cd->buf_size += size;
3136
if (cd->buf_size < cd->cache_size)
3139
if (!write_buffer(cd->fd, cd->offset, cd->buf, cd->cache_size,
3143
cd->buf_size -= cd->cache_size;
3144
memcpy(cd->buf, cd->buf + cd->cache_size, cd->buf_size);
3145
cd->offset += cd->cache_size;
3150
write_cache_bufsz(struct cache_data *cd)
3155
if (!write_buffer(cd->fd, cd->offset, cd->buf, cd->buf_size,
3159
cd->offset += cd->buf_size;
3165
write_cache_zero(struct cache_data *cd, size_t size)
3167
if (!write_cache_bufsz(cd))
3170
memset(cd->buf + cd->buf_size, 0, size);
3171
cd->buf_size += size;
3173
return write_cache_bufsz(cd);
3177
read_buf_from_stdin(void *buf, int buf_size)
3179
int read_size = 0, tmp_read_size = 0;
3180
time_t last_time, tm;
3182
last_time = time(NULL);
3184
while (read_size != buf_size) {
3186
tmp_read_size = read(STDIN_FILENO, buf + read_size,
3187
buf_size - read_size);
3189
if (tmp_read_size < 0) {
3190
ERRMSG("Can't read STDIN. %s\n", strerror(errno));
3193
} else if (0 == tmp_read_size) {
3195
* If it cannot get any data from a standard input
3196
* for a long time, break this loop.
3199
if (TIMEOUT_STDIN < (tm - last_time)) {
3200
ERRMSG("Can't get any data from STDIN.\n");
3204
read_size += tmp_read_size;
3205
last_time = time(NULL);
3212
read_start_flat_header(void)
3214
char buf[MAX_SIZE_MDF_HEADER];
3215
struct makedumpfile_header fh;
3220
if (!read_buf_from_stdin(buf, MAX_SIZE_MDF_HEADER)) {
3221
ERRMSG("Can't get header of flattened format.\n");
3224
memcpy(&fh, buf, sizeof(fh));
3226
if (!is_bigendian()){
3227
fh.type = bswap_64(fh.type);
3228
fh.version = bswap_64(fh.version);
3232
* Check flat header.
3234
if (strcmp(fh.signature, MAKEDUMPFILE_SIGNATURE)) {
3235
ERRMSG("Can't get signature of flattened format.\n");
3238
if (fh.type != TYPE_FLAT_HEADER) {
3239
ERRMSG("Can't get type of flattened format.\n");
3247
read_flat_data_header(struct makedumpfile_data_header *fdh)
3249
if (!read_buf_from_stdin(fdh,
3250
sizeof(struct makedumpfile_data_header))) {
3251
ERRMSG("Can't get header of flattened format.\n");
3254
if (!is_bigendian()){
3255
fdh->offset = bswap_64(fdh->offset);
3256
fdh->buf_size = bswap_64(fdh->buf_size);
3262
rearrange_dumpdata(void)
3264
int read_size, tmp_read_size;
3265
char buf[SIZE_BUF_STDIN];
3266
struct makedumpfile_data_header fdh;
3271
if (!read_start_flat_header()) {
3272
ERRMSG("Can't get header of flattened format.\n");
3277
* Read the first data header.
3279
if (!read_flat_data_header(&fdh)) {
3280
ERRMSG("Can't get header of flattened format.\n");
3286
while (read_size < fdh.buf_size) {
3287
if (sizeof(buf) < (fdh.buf_size - read_size))
3288
tmp_read_size = sizeof(buf);
3290
tmp_read_size = fdh.buf_size - read_size;
3292
if (!read_buf_from_stdin(buf, tmp_read_size)) {
3293
ERRMSG("Can't get data of flattened format.\n");
3296
if (!write_buffer(info->fd_dumpfile,
3297
fdh.offset + read_size, buf, tmp_read_size,
3298
info->name_dumpfile))
3301
read_size += tmp_read_size;
3304
* Read the next header.
3306
if (!read_flat_data_header(&fdh)) {
3307
ERRMSG("Can't get data header of flattened format.\n");
3311
} while ((0 <= fdh.offset) && (0 < fdh.buf_size));
3313
if ((fdh.offset != END_FLAG_FLAT_HEADER)
3314
|| (fdh.buf_size != END_FLAG_FLAT_HEADER)) {
3315
ERRMSG("Can't get valid end header of flattened format.\n");
3323
page_to_pfn(unsigned long page)
3326
unsigned long long pfn = ULONGLONG_MAX;
3327
unsigned long long index = 0;
3328
struct mem_map_data *mmd;
3330
mmd = info->mem_map_data;
3331
for (num = 0; num < info->num_mem_map; num++, mmd++) {
3332
if (mmd->mem_map == NOT_MEMMAP_ADDR)
3334
if (page < mmd->mem_map)
3336
index = (page - mmd->mem_map) / SIZE(page);
3337
if (index >= mmd->pfn_end - mmd->pfn_start)
3339
pfn = mmd->pfn_start + index;
3342
if (pfn == ULONGLONG_MAX) {
3343
ERRMSG("Can't convert the address of page descriptor (%lx) to pfn.\n", page);
3344
return ULONGLONG_MAX;
3350
reset_bitmap_of_free_pages(unsigned long node_zones)
3353
int order, i, migrate_type, migrate_types;
3354
unsigned long curr, previous, head, curr_page, curr_prev;
3355
unsigned long addr_free_pages, free_pages = 0, found_free_pages = 0;
3356
unsigned long long pfn, start_pfn;
3359
* On linux-2.6.24 or later, free_list is divided into the array.
3361
migrate_types = ARRAY_LENGTH(free_area.free_list);
3362
if (migrate_types == NOT_FOUND_STRUCTURE)
3365
for (order = (ARRAY_LENGTH(zone.free_area) - 1); order >= 0; --order) {
3366
for (migrate_type = 0; migrate_type < migrate_types;
3368
head = node_zones + OFFSET(zone.free_area)
3369
+ SIZE(free_area) * order
3370
+ OFFSET(free_area.free_list)
3371
+ SIZE(list_head) * migrate_type;
3373
if (!readmem(VADDR, head + OFFSET(list_head.next),
3374
&curr, sizeof curr)) {
3375
ERRMSG("Can't get next list_head.\n");
3378
for (;curr != head;) {
3379
curr_page = curr - OFFSET(page.lru);
3380
start_pfn = page_to_pfn(curr_page);
3381
if (start_pfn == ULONGLONG_MAX)
3384
if (!readmem(VADDR, curr+OFFSET(list_head.prev),
3385
&curr_prev, sizeof curr_prev)) {
3386
ERRMSG("Can't get prev list_head.\n");
3389
if (previous != curr_prev) {
3390
ERRMSG("The free list is broken.\n");
3391
retcd = ANALYSIS_FAILED;
3394
for (i = 0; i < (1<<order); i++) {
3395
pfn = start_pfn + i;
3396
if (clear_bit_on_2nd_bitmap_for_kernel(pfn))
3401
if (!readmem(VADDR, curr+OFFSET(list_head.next),
3402
&curr, sizeof curr)) {
3403
ERRMSG("Can't get next list_head.\n");
3411
* Check the number of free pages.
3413
if (OFFSET(zone.free_pages) != NOT_FOUND_STRUCTURE) {
3414
addr_free_pages = node_zones + OFFSET(zone.free_pages);
3416
} else if (OFFSET(zone.vm_stat) != NOT_FOUND_STRUCTURE) {
3418
* On linux-2.6.21 or later, the number of free_pages is
3419
* in vm_stat[NR_FREE_PAGES].
3421
addr_free_pages = node_zones + OFFSET(zone.vm_stat)
3422
+ sizeof(long) * NUMBER(NR_FREE_PAGES);
3425
ERRMSG("Can't get addr_free_pages.\n");
3428
if (!readmem(VADDR, addr_free_pages, &free_pages, sizeof free_pages)) {
3429
ERRMSG("Can't get free_pages.\n");
3432
if (free_pages != found_free_pages && !info->flag_cyclic) {
3434
* On linux-2.6.21 or later, the number of free_pages is
3435
* sometimes different from the one of the list "free_area",
3436
* because the former is flushed asynchronously.
3438
DEBUG_MSG("The number of free_pages is invalid.\n");
3439
DEBUG_MSG(" free_pages = %ld\n", free_pages);
3440
DEBUG_MSG(" found_free_pages = %ld\n", found_free_pages);
3442
pfn_free += found_free_pages;
3450
int log_buf_len, length_log, length_oldlog, ret = FALSE;
3451
unsigned long log_buf, log_end, index;
3452
unsigned long log_end_2_6_24;
3453
unsigned log_end_2_6_25;
3454
char *log_buffer = NULL;
3457
* log_end has been changed to "unsigned" since linux-2.6.25.
3458
* 2.6.24 or former: static unsigned long log_end;
3459
* 2.6.25 or later : static unsigned log_end;
3461
if (!open_files_for_creating_dumpfile())
3464
if (!info->flag_refiltering && !info->flag_sadump) {
3465
if (!get_elf_info(info->fd_memory, info->name_memory))
3471
if ((SYMBOL(log_buf) == NOT_FOUND_SYMBOL)
3472
|| (SYMBOL(log_buf_len) == NOT_FOUND_SYMBOL)
3473
|| (SYMBOL(log_end) == NOT_FOUND_SYMBOL)) {
3474
ERRMSG("Can't find some symbols for log_buf.\n");
3477
if (!readmem(VADDR, SYMBOL(log_buf), &log_buf, sizeof(log_buf))) {
3478
ERRMSG("Can't get log_buf.\n");
3481
if (info->kernel_version >= KERNEL_VERSION(2, 6, 25)) {
3482
if (!readmem(VADDR, SYMBOL(log_end), &log_end_2_6_25,
3483
sizeof(log_end_2_6_25))) {
3484
ERRMSG("Can't to get log_end.\n");
3487
log_end = log_end_2_6_25;
3489
if (!readmem(VADDR, SYMBOL(log_end), &log_end_2_6_24,
3490
sizeof(log_end_2_6_24))) {
3491
ERRMSG("Can't to get log_end.\n");
3494
log_end = log_end_2_6_24;
3496
if (!readmem(VADDR, SYMBOL(log_buf_len), &log_buf_len,
3497
sizeof(log_buf_len))) {
3498
ERRMSG("Can't get log_buf_len.\n");
3502
DEBUG_MSG("log_buf : %lx\n", log_buf);
3503
DEBUG_MSG("log_end : %lx\n", log_end);
3504
DEBUG_MSG("log_buf_len : %d\n", log_buf_len);
3506
if ((log_buffer = malloc(log_buf_len)) == NULL) {
3507
ERRMSG("Can't allocate memory for log_buf. %s\n",
3512
if (log_end < log_buf_len) {
3513
length_log = log_end;
3514
if(!readmem(VADDR, log_buf, log_buffer, length_log)) {
3515
ERRMSG("Can't read dmesg log.\n");
3519
index = log_end & (log_buf_len - 1);
3520
DEBUG_MSG("index : %lx\n", index);
3521
length_log = log_buf_len;
3522
length_oldlog = log_buf_len - index;
3523
if(!readmem(VADDR, log_buf + index, log_buffer, length_oldlog)) {
3524
ERRMSG("Can't read old dmesg log.\n");
3527
if(!readmem(VADDR, log_buf, log_buffer + length_oldlog, index)) {
3528
ERRMSG("Can't read new dmesg log.\n");
3532
DEBUG_MSG("length_log : %d\n", length_log);
3534
if (!open_dump_file()) {
3535
ERRMSG("Can't open output file.\n");
3538
if (write(info->fd_dumpfile, log_buffer, length_log) < 0)
3541
if (!close_files_for_creating_dumpfile())
3554
_exclude_free_page(void)
3556
int i, nr_zones, num_nodes, node;
3557
unsigned long node_zones, zone, spanned_pages, pgdat;
3558
struct timeval tv_start;
3560
if ((node = next_online_node(0)) < 0) {
3561
ERRMSG("Can't get next online node.\n");
3564
if (!(pgdat = next_online_pgdat(node))) {
3565
ERRMSG("Can't get pgdat list.\n");
3568
gettimeofday(&tv_start, NULL);
3570
for (num_nodes = 1; num_nodes <= vt.numnodes; num_nodes++) {
3572
print_progress(PROGRESS_FREE_PAGES, num_nodes - 1, vt.numnodes);
3574
node_zones = pgdat + OFFSET(pglist_data.node_zones);
3576
if (!readmem(VADDR, pgdat + OFFSET(pglist_data.nr_zones),
3577
&nr_zones, sizeof(nr_zones))) {
3578
ERRMSG("Can't get nr_zones.\n");
3582
for (i = 0; i < nr_zones; i++) {
3584
print_progress(PROGRESS_FREE_PAGES, i + nr_zones * (num_nodes - 1),
3585
nr_zones * vt.numnodes);
3587
zone = node_zones + (i * SIZE(zone));
3588
if (!readmem(VADDR, zone + OFFSET(zone.spanned_pages),
3589
&spanned_pages, sizeof spanned_pages)) {
3590
ERRMSG("Can't get spanned_pages.\n");
3595
if (!reset_bitmap_of_free_pages(zone))
3598
if (num_nodes < vt.numnodes) {
3599
if ((node = next_online_node(node + 1)) < 0) {
3600
ERRMSG("Can't get next online node.\n");
3602
} else if (!(pgdat = next_online_pgdat(node))) {
3603
ERRMSG("Can't determine pgdat list (node %d).\n",
3613
print_progress(PROGRESS_FREE_PAGES, vt.numnodes, vt.numnodes);
3614
print_execution_time(PROGRESS_FREE_PAGES, &tv_start);
3620
exclude_free_page(void)
3623
* Check having necessary information.
3625
if ((SYMBOL(node_data) == NOT_FOUND_SYMBOL)
3626
&& (SYMBOL(pgdat_list) == NOT_FOUND_SYMBOL)
3627
&& (SYMBOL(contig_page_data) == NOT_FOUND_SYMBOL)) {
3628
ERRMSG("Can't get necessary symbols for excluding free pages.\n");
3631
if ((SIZE(zone) == NOT_FOUND_STRUCTURE)
3632
|| ((OFFSET(zone.free_pages) == NOT_FOUND_STRUCTURE)
3633
&& (OFFSET(zone.vm_stat) == NOT_FOUND_STRUCTURE))
3634
|| (OFFSET(zone.free_area) == NOT_FOUND_STRUCTURE)
3635
|| (OFFSET(zone.spanned_pages) == NOT_FOUND_STRUCTURE)
3636
|| (OFFSET(pglist_data.node_zones) == NOT_FOUND_STRUCTURE)
3637
|| (OFFSET(pglist_data.nr_zones) == NOT_FOUND_STRUCTURE)
3638
|| (SIZE(free_area) == NOT_FOUND_STRUCTURE)
3639
|| (OFFSET(free_area.free_list) == NOT_FOUND_STRUCTURE)
3640
|| (OFFSET(list_head.next) == NOT_FOUND_STRUCTURE)
3641
|| (OFFSET(list_head.prev) == NOT_FOUND_STRUCTURE)
3642
|| (OFFSET(page.lru) == NOT_FOUND_STRUCTURE)
3643
|| (ARRAY_LENGTH(zone.free_area) == NOT_FOUND_STRUCTURE)) {
3644
ERRMSG("Can't get necessary structures for excluding free pages.\n");
3647
if (is_xen_memory() && !info->dom0_mapnr) {
3648
ERRMSG("Can't get max domain-0 PFN for excluding free pages.\n");
3653
* Detect free pages and update 2nd-bitmap.
3655
if (!_exclude_free_page())
3662
* Let C be a cyclic buffer size and B a bitmap size used for
3663
* representing maximum block size managed by buddy allocator.
3665
* For some combinations of C and B, clearing operation can overrun
3666
* the cyclic buffer. Let's consider three cases.
3668
* - If C == B, this is trivially safe.
3670
* - If B > C, overrun can easily happen.
3672
* - In case of C > B, if C mod B != 0, then there exist n > m > 0,
3673
* B > b > 0 such that n x C = m x B + b. This means that clearing
3674
* operation overruns cyclic buffer (B - b)-bytes in the
3675
* combination of n-th cycle and m-th block.
3677
* Note that C mod B != 0 iff (m x C) mod B != 0 for some m.
3679
* If C == B, C mod B == 0 always holds. Again, if B > C, C mod B != 0
3680
* always holds. Hence, it's always sufficient to check the condition
3681
* C mod B != 0 in order to determine whether overrun can happen or
3684
* The bitmap size used for maximum block size B is calculated from
3687
* B := DIVIDE_UP((1 << (MAX_ORDER - 1)), BITS_PER_BYTE)
3689
* Normally, MAX_ORDER is 11 at default. This is configurable through
3690
* CONFIG_FORCE_MAX_ZONEORDER.
3693
check_cyclic_buffer_overrun(void)
3695
int max_order = ARRAY_LENGTH(zone.free_area);
3696
int max_order_nr_pages = 1 << (max_order - 1);
3697
unsigned long max_block_size = roundup(max_order_nr_pages, BITPERBYTE);
3699
if (info->bufsize_cyclic %
3700
roundup(max_order_nr_pages, BITPERBYTE)) {
3701
unsigned long bufsize;
3703
if (max_block_size > info->bufsize_cyclic) {
3704
MSG("WARNING: some free pages are not filtered.\n");
3708
bufsize = info->bufsize_cyclic;
3709
info->bufsize_cyclic = round(bufsize, max_block_size);
3711
MSG("cyclic buffer size has been changed: %lu => %lu\n",
3712
bufsize, info->bufsize_cyclic);
3717
* For the kernel versions from v2.6.17 to v2.6.37.
3720
page_is_buddy_v2(unsigned long flags, unsigned int _mapcount,
3721
unsigned long private, unsigned int _count)
3723
if (flags & (1UL << NUMBER(PG_buddy)))
3730
* For v2.6.38 and later kernel versions.
3733
page_is_buddy_v3(unsigned long flags, unsigned int _mapcount,
3734
unsigned long private, unsigned int _count)
3736
if (flags & (1UL << NUMBER(PG_slab)))
3739
if (_mapcount == (int)NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE))
3746
setup_page_is_buddy(void)
3748
if (OFFSET(page.private) == NOT_FOUND_STRUCTURE)
3751
if (NUMBER(PG_buddy) == NOT_FOUND_NUMBER) {
3752
if (NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE) != NOT_FOUND_NUMBER) {
3753
if (OFFSET(page._mapcount) != NOT_FOUND_STRUCTURE)
3754
info->page_is_buddy = page_is_buddy_v3;
3757
info->page_is_buddy = page_is_buddy_v2;
3760
if (!info->page_is_buddy)
3761
DEBUG_MSG("Can't select page_is_buddy handler; "
3762
"follow free lists instead of mem_map array.\n");
3766
* If using a dumpfile in kdump-compressed format as a source file
3767
* instead of /proc/vmcore, 1st-bitmap of a new dumpfile must be
3768
* the same as the one of a source file.
3771
copy_1st_bitmap_from_memory(void)
3773
char buf[info->dh_memory->block_size];
3775
off_t bitmap_offset;
3776
struct disk_dump_header *dh = info->dh_memory;
3778
bitmap_offset = (DISKDUMP_HEADER_BLOCKS + dh->sub_hdr_size)
3781
if (lseek(info->fd_memory, bitmap_offset, SEEK_SET) < 0) {
3782
ERRMSG("Can't seek %s. %s\n",
3783
info->name_memory, strerror(errno));
3786
if (lseek(info->bitmap1->fd, info->bitmap1->offset, SEEK_SET) < 0) {
3787
ERRMSG("Can't seek the bitmap(%s). %s\n",
3788
info->bitmap1->file_name, strerror(errno));
3792
while (offset_page < (info->len_bitmap / 2)) {
3793
if (read(info->fd_memory, buf, sizeof(buf)) != sizeof(buf)) {
3794
ERRMSG("Can't read %s. %s\n",
3795
info->name_memory, strerror(errno));
3798
if (write(info->bitmap1->fd, buf, sizeof(buf)) != sizeof(buf)) {
3799
ERRMSG("Can't write the bitmap(%s). %s\n",
3800
info->bitmap1->file_name, strerror(errno));
3803
offset_page += sizeof(buf);
3809
create_1st_bitmap(void)
3812
unsigned int num_pt_loads = get_num_pt_loads();
3813
char buf[info->page_size];
3814
unsigned long long pfn, pfn_start, pfn_end, pfn_bitmap1;
3815
unsigned long long phys_start, phys_end;
3816
struct timeval tv_start;
3819
if (info->flag_refiltering)
3820
return copy_1st_bitmap_from_memory();
3822
if (info->flag_sadump)
3823
return sadump_copy_1st_bitmap_from_memory();
3826
* At first, clear all the bits on the 1st-bitmap.
3828
memset(buf, 0, sizeof(buf));
3830
if (lseek(info->bitmap1->fd, info->bitmap1->offset, SEEK_SET) < 0) {
3831
ERRMSG("Can't seek the bitmap(%s). %s\n",
3832
info->bitmap1->file_name, strerror(errno));
3836
while (offset_page < (info->len_bitmap / 2)) {
3837
if (write(info->bitmap1->fd, buf, info->page_size)
3838
!= info->page_size) {
3839
ERRMSG("Can't write the bitmap(%s). %s\n",
3840
info->bitmap1->file_name, strerror(errno));
3843
offset_page += info->page_size;
3846
gettimeofday(&tv_start, NULL);
3849
* If page is on memory hole, set bit on the 1st-bitmap.
3852
for (i = 0; get_pt_load(i, &phys_start, &phys_end, NULL, NULL); i++) {
3854
print_progress(PROGRESS_HOLES, i, num_pt_loads);
3856
pfn_start = paddr_to_pfn(phys_start);
3857
pfn_end = paddr_to_pfn(phys_end);
3859
if (!is_in_segs(pfn_to_paddr(pfn_start)))
3861
for (pfn = pfn_start; pfn < pfn_end; pfn++) {
3862
set_bit_on_1st_bitmap(pfn);
3866
pfn_memhole = info->max_mapnr - pfn_bitmap1;
3871
print_progress(PROGRESS_HOLES, info->max_mapnr, info->max_mapnr);
3872
print_execution_time(PROGRESS_HOLES, &tv_start);
3874
if (!sync_1st_bitmap())
3881
create_1st_bitmap_cyclic()
3884
unsigned long long pfn, pfn_bitmap1;
3885
unsigned long long phys_start, phys_end;
3886
unsigned long long pfn_start, pfn_end;
3889
* At first, clear all the bits on the 1st-bitmap.
3891
initialize_bitmap_cyclic(info->partial_bitmap1);
3894
* If page is on memory hole, set bit on the 1st-bitmap.
3897
for (i = 0; get_pt_load(i, &phys_start, &phys_end, NULL, NULL); i++) {
3898
pfn_start = paddr_to_pfn(phys_start);
3899
pfn_end = paddr_to_pfn(phys_end);
3901
if (!is_in_segs(pfn_to_paddr(pfn_start)))
3903
for (pfn = pfn_start; pfn < pfn_end; pfn++) {
3904
if (set_bit_on_1st_bitmap(pfn))
3908
pfn_memhole -= pfn_bitmap1;
3914
* Exclude the page filled with zero in case of creating an elf dumpfile.
3917
exclude_zero_pages(void)
3919
unsigned long long pfn, paddr;
3920
struct dump_bitmap bitmap2;
3921
struct timeval tv_start;
3922
unsigned char buf[info->page_size];
3924
initialize_2nd_bitmap(&bitmap2);
3926
gettimeofday(&tv_start, NULL);
3928
for (pfn = 0, paddr = pfn_to_paddr(pfn); pfn < info->max_mapnr;
3929
pfn++, paddr += info->page_size) {
3931
print_progress(PROGRESS_ZERO_PAGES, pfn, info->max_mapnr);
3933
if (!is_in_segs(paddr))
3936
if (!is_dumpable(&bitmap2, pfn))
3939
if (is_xen_memory()) {
3940
if (!readmem(MADDR_XEN, paddr, buf, info->page_size)) {
3941
ERRMSG("Can't get the page data(pfn:%llx, max_mapnr:%llx).\n",
3942
pfn, info->max_mapnr);
3946
if (!readmem(PADDR, paddr, buf, info->page_size)) {
3947
ERRMSG("Can't get the page data(pfn:%llx, max_mapnr:%llx).\n",
3948
pfn, info->max_mapnr);
3952
if (is_zero_page(buf, info->page_size)) {
3953
if (clear_bit_on_2nd_bitmap(pfn))
3961
print_progress(PROGRESS_ZERO_PAGES, info->max_mapnr, info->max_mapnr);
3962
print_execution_time(PROGRESS_ZERO_PAGES, &tv_start);
3968
__exclude_unnecessary_pages(unsigned long mem_map,
3969
unsigned long long pfn_start, unsigned long long pfn_end)
3971
unsigned long long pfn, pfn_mm, maddr;
3972
unsigned long long pfn_read_start, pfn_read_end, index_pg;
3973
unsigned char page_cache[SIZE(page) * PGMM_CACHED];
3974
unsigned char *pcache;
3975
unsigned int _count, _mapcount = 0;
3976
unsigned long flags, mapping, private = 0;
3979
* Refresh the buffer of struct page, when changing mem_map.
3981
pfn_read_start = ULONGLONG_MAX;
3984
for (pfn = pfn_start; pfn < pfn_end; pfn++, mem_map += SIZE(page)) {
3987
* If this pfn doesn't belong to target region, skip this pfn.
3989
if (info->flag_cyclic && !is_cyclic_region(pfn))
3993
* Exclude the memory hole.
3995
if (is_xen_memory()) {
3996
maddr = ptom_xen(pfn_to_paddr(pfn));
3997
if (maddr == NOT_PADDR) {
3998
ERRMSG("Can't convert a physical address(%llx) to machine address.\n",
4002
if (!is_in_segs(maddr))
4005
if (!is_in_segs(pfn_to_paddr(pfn)))
4009
index_pg = pfn % PGMM_CACHED;
4010
if (pfn < pfn_read_start || pfn_read_end < pfn) {
4011
if (roundup(pfn + 1, PGMM_CACHED) < pfn_end)
4012
pfn_mm = PGMM_CACHED - index_pg;
4014
pfn_mm = pfn_end - pfn;
4016
if (!readmem(VADDR, mem_map,
4017
page_cache + (index_pg * SIZE(page)),
4018
SIZE(page) * pfn_mm)) {
4019
ERRMSG("Can't read the buffer of struct page.\n");
4022
pfn_read_start = pfn;
4023
pfn_read_end = pfn + pfn_mm - 1;
4025
pcache = page_cache + (index_pg * SIZE(page));
4027
flags = ULONG(pcache + OFFSET(page.flags));
4028
_count = UINT(pcache + OFFSET(page._count));
4029
mapping = ULONG(pcache + OFFSET(page.mapping));
4030
if (OFFSET(page._mapcount) != NOT_FOUND_STRUCTURE)
4031
_mapcount = UINT(pcache + OFFSET(page._mapcount));
4032
if (OFFSET(page.private) != NOT_FOUND_STRUCTURE)
4033
private = ULONG(pcache + OFFSET(page.private));
4036
* Exclude the free page managed by a buddy
4038
if ((info->dump_level & DL_EXCLUDE_FREE)
4039
&& info->flag_cyclic
4040
&& info->page_is_buddy
4041
&& info->page_is_buddy(flags, _mapcount, private, _count)) {
4044
for (i = 0; i < (1 << private); ++i) {
4046
* According to combination of
4047
* MAX_ORDER and size of cyclic
4048
* buffer, this clearing bit operation
4049
* can overrun the cyclic buffer.
4051
* See check_cyclic_buffer_overrun()
4054
clear_bit_on_2nd_bitmap_for_kernel(pfn + i);
4059
* Exclude the cache page without the private page.
4061
else if ((info->dump_level & DL_EXCLUDE_CACHE)
4062
&& (isLRU(flags) || isSwapCache(flags))
4063
&& !isPrivate(flags) && !isAnon(mapping)) {
4064
if (clear_bit_on_2nd_bitmap_for_kernel(pfn))
4068
* Exclude the cache page with the private page.
4070
else if ((info->dump_level & DL_EXCLUDE_CACHE_PRI)
4071
&& (isLRU(flags) || isSwapCache(flags))
4072
&& !isAnon(mapping)) {
4073
if (clear_bit_on_2nd_bitmap_for_kernel(pfn))
4074
pfn_cache_private++;
4077
* Exclude the data page of the user process.
4079
else if ((info->dump_level & DL_EXCLUDE_USER_DATA)
4080
&& isAnon(mapping)) {
4081
if (clear_bit_on_2nd_bitmap_for_kernel(pfn))
4089
exclude_unnecessary_pages(void)
4092
struct mem_map_data *mmd;
4093
struct timeval tv_start;
4095
if (is_xen_memory() && !info->dom0_mapnr) {
4096
ERRMSG("Can't get max domain-0 PFN for excluding pages.\n");
4100
gettimeofday(&tv_start, NULL);
4102
for (mm = 0; mm < info->num_mem_map; mm++) {
4103
print_progress(PROGRESS_UNN_PAGES, mm, info->num_mem_map);
4105
mmd = &info->mem_map_data[mm];
4107
if (mmd->mem_map == NOT_MEMMAP_ADDR)
4110
if (!__exclude_unnecessary_pages(mmd->mem_map,
4111
mmd->pfn_start, mmd->pfn_end))
4118
print_progress(PROGRESS_UNN_PAGES, info->num_mem_map, info->num_mem_map);
4119
print_execution_time(PROGRESS_UNN_PAGES, &tv_start);
4125
copy_bitmap_cyclic(void)
4127
memcpy(info->partial_bitmap2, info->partial_bitmap1, info->bufsize_cyclic);
4131
exclude_unnecessary_pages_cyclic(void)
4134
struct mem_map_data *mmd;
4135
struct timeval tv_start;
4138
* Copy 1st-bitmap to 2nd-bitmap.
4140
copy_bitmap_cyclic();
4142
if ((info->dump_level & DL_EXCLUDE_FREE) && !info->page_is_buddy)
4143
if (!exclude_free_page())
4147
* Exclude cache pages, cache private pages, user data pages, and free pages.
4149
if (info->dump_level & DL_EXCLUDE_CACHE ||
4150
info->dump_level & DL_EXCLUDE_CACHE_PRI ||
4151
info->dump_level & DL_EXCLUDE_USER_DATA ||
4152
((info->dump_level & DL_EXCLUDE_FREE) && info->page_is_buddy)) {
4154
gettimeofday(&tv_start, NULL);
4156
for (mm = 0; mm < info->num_mem_map; mm++) {
4158
print_progress(PROGRESS_UNN_PAGES, mm, info->num_mem_map);
4160
mmd = &info->mem_map_data[mm];
4162
if (mmd->mem_map == NOT_MEMMAP_ADDR)
4165
if (mmd->pfn_end >= info->cyclic_start_pfn &&
4166
mmd->pfn_start <= info->cyclic_end_pfn) {
4167
if (!__exclude_unnecessary_pages(mmd->mem_map,
4168
mmd->pfn_start, mmd->pfn_end))
4176
print_progress(PROGRESS_UNN_PAGES, info->num_mem_map, info->num_mem_map);
4177
print_execution_time(PROGRESS_UNN_PAGES, &tv_start);
4184
update_cyclic_region(unsigned long long pfn)
4186
if (is_cyclic_region(pfn))
4189
info->cyclic_start_pfn = round(pfn, info->pfn_cyclic);
4190
info->cyclic_end_pfn = info->cyclic_start_pfn + info->pfn_cyclic;
4192
if (info->cyclic_end_pfn > info->max_mapnr)
4193
info->cyclic_end_pfn = info->max_mapnr;
4195
if (!create_1st_bitmap_cyclic())
4198
if (!exclude_unnecessary_pages_cyclic())
4208
unsigned char buf[info->page_size];
4209
const off_t failed = (off_t)-1;
4212
while (offset < (info->len_bitmap / 2)) {
4213
if (lseek(info->bitmap1->fd, info->bitmap1->offset + offset,
4214
SEEK_SET) == failed) {
4215
ERRMSG("Can't seek the bitmap(%s). %s\n",
4216
info->name_bitmap, strerror(errno));
4219
if (read(info->bitmap1->fd, buf, sizeof(buf)) != sizeof(buf)) {
4220
ERRMSG("Can't read the dump memory(%s). %s\n",
4221
info->name_memory, strerror(errno));
4224
if (lseek(info->bitmap2->fd, info->bitmap2->offset + offset,
4225
SEEK_SET) == failed) {
4226
ERRMSG("Can't seek the bitmap(%s). %s\n",
4227
info->name_bitmap, strerror(errno));
4230
if (write(info->bitmap2->fd, buf, sizeof(buf)) != sizeof(buf)) {
4231
ERRMSG("Can't write the bitmap(%s). %s\n",
4232
info->name_bitmap, strerror(errno));
4235
offset += sizeof(buf);
4242
create_2nd_bitmap(void)
4245
* Copy 1st-bitmap to 2nd-bitmap.
4247
if (!copy_bitmap()) {
4248
ERRMSG("Can't copy 1st-bitmap to 2nd-bitmap.\n");
4253
* Exclude cache pages, cache private pages, user data pages.
4255
if (info->dump_level & DL_EXCLUDE_CACHE ||
4256
info->dump_level & DL_EXCLUDE_CACHE_PRI ||
4257
info->dump_level & DL_EXCLUDE_USER_DATA) {
4258
if (!exclude_unnecessary_pages()) {
4259
ERRMSG("Can't exclude unnecessary pages.\n");
4265
* Exclude free pages.
4267
if (info->dump_level & DL_EXCLUDE_FREE)
4268
if (!exclude_free_page())
4272
* Exclude Xen user domain.
4274
if (info->flag_exclude_xen_dom) {
4275
if (!exclude_xen_user_domain()) {
4276
ERRMSG("Can't exclude xen user domain.\n");
4282
* Exclude pages filled with zero for creating an ELF dumpfile.
4284
* Note: If creating a kdump-compressed dumpfile, makedumpfile
4285
* checks zero-pages while copying dumpable pages to a
4286
* dumpfile from /proc/vmcore. That is valuable for the
4287
* speed, because each page is read one time only.
4288
* Otherwise (if creating an ELF dumpfile), makedumpfile
4289
* should check zero-pages at this time because 2nd-bitmap
4290
* should be fixed for creating an ELF header. That is slow
4291
* due to reading each page two times, but it is necessary.
4293
if ((info->dump_level & DL_EXCLUDE_ZERO) && info->flag_elf_dumpfile) {
4295
* 2nd-bitmap should be flushed at this time, because
4296
* exclude_zero_pages() checks 2nd-bitmap.
4298
if (!sync_2nd_bitmap())
4301
if (!exclude_zero_pages()) {
4302
ERRMSG("Can't exclude pages filled with zero for creating an ELF dumpfile.\n");
4307
if (!sync_2nd_bitmap())
4314
prepare_bitmap_buffer(void)
4319
* Create 2 bitmaps (1st-bitmap & 2nd-bitmap) on block_size boundary.
4320
* The crash utility requires both of them to be aligned to block_size
4323
tmp = divideup(divideup(info->max_mapnr, BITPERBYTE), info->page_size);
4324
info->len_bitmap = tmp*info->page_size*2;
4327
* Prepare bitmap buffers for creating dump bitmap.
4329
if ((info->bitmap1 = malloc(sizeof(struct dump_bitmap))) == NULL) {
4330
ERRMSG("Can't allocate memory for the 1st-bitmap. %s\n",
4334
if ((info->bitmap2 = malloc(sizeof(struct dump_bitmap))) == NULL) {
4335
ERRMSG("Can't allocate memory for the 2nd-bitmap. %s\n",
4339
initialize_1st_bitmap(info->bitmap1);
4340
initialize_2nd_bitmap(info->bitmap2);
4346
prepare_bitmap_buffer_cyclic(void)
4351
* Create 2 bitmaps (1st-bitmap & 2nd-bitmap) on block_size boundary.
4352
* The crash utility requires both of them to be aligned to block_size
4355
tmp = divideup(divideup(info->max_mapnr, BITPERBYTE), info->page_size);
4356
info->len_bitmap = tmp*info->page_size*2;
4359
* Prepare partial bitmap buffers for cyclic processing.
4361
if ((info->partial_bitmap1 = (char *)malloc(info->bufsize_cyclic)) == NULL) {
4362
ERRMSG("Can't allocate memory for the 1st-bitmap. %s\n",
4366
if ((info->partial_bitmap2 = (char *)malloc(info->bufsize_cyclic)) == NULL) {
4367
ERRMSG("Can't allocate memory for the 2nd-bitmap. %s\n",
4371
initialize_bitmap_cyclic(info->partial_bitmap1);
4372
initialize_bitmap_cyclic(info->partial_bitmap2);
4378
free_bitmap_buffer(void)
4380
if (info->bitmap1) {
4381
free(info->bitmap1);
4382
info->bitmap1 = NULL;
4384
if (info->bitmap2) {
4385
free(info->bitmap2);
4386
info->bitmap2 = NULL;
4393
create_dump_bitmap(void)
4397
if (info->flag_cyclic) {
4398
if (!prepare_bitmap_buffer_cyclic())
4401
info->num_dumpable = get_num_dumpable_cyclic();
4403
if (!prepare_bitmap_buffer())
4406
if (!create_1st_bitmap())
4409
if (!create_2nd_bitmap())
4415
free_bitmap_buffer();
4421
get_loads_dumpfile(void)
4423
int i, phnum, num_new_load = 0;
4424
long page_size = info->page_size;
4425
unsigned long long pfn, pfn_start, pfn_end, num_excluded;
4426
unsigned long frac_head, frac_tail;
4428
struct dump_bitmap bitmap2;
4430
initialize_2nd_bitmap(&bitmap2);
4432
if (!(phnum = get_phnum_memory()))
4435
for (i = 0; i < phnum; i++) {
4436
if (!get_phdr_memory(i, &load))
4438
if (load.p_type != PT_LOAD)
4441
pfn_start = paddr_to_pfn(load.p_paddr);
4442
pfn_end = paddr_to_pfn(load.p_paddr + load.p_memsz);
4443
frac_head = page_size - (load.p_paddr % page_size);
4444
frac_tail = (load.p_paddr + load.p_memsz) % page_size;
4449
if (frac_head && (frac_head != page_size))
4454
for (pfn = pfn_start; pfn < pfn_end; pfn++) {
4455
if (!is_dumpable(&bitmap2, pfn)) {
4461
* If the number of the contiguous pages to be excluded
4462
* is 256 or more, those pages are excluded really.
4463
* And a new PT_LOAD segment is created.
4465
if (num_excluded >= PFN_EXCLUDED) {
4471
return num_new_load;
4475
prepare_cache_data(struct cache_data *cd)
4477
cd->fd = info->fd_dumpfile;
4478
cd->file_name = info->name_dumpfile;
4479
cd->cache_size = info->page_size << info->block_order;
4483
if ((cd->buf = malloc(cd->cache_size + info->page_size)) == NULL) {
4484
ERRMSG("Can't allocate memory for the data buffer. %s\n",
4492
free_cache_data(struct cache_data *cd)
4499
write_start_flat_header()
4501
char buf[MAX_SIZE_MDF_HEADER];
4502
struct makedumpfile_header fh;
4504
if (!info->flag_flatten)
4507
strcpy(fh.signature, MAKEDUMPFILE_SIGNATURE);
4510
* For sending dump data to a different architecture, change the values
4513
if (is_bigendian()){
4514
fh.type = TYPE_FLAT_HEADER;
4515
fh.version = VERSION_FLAT_HEADER;
4517
fh.type = bswap_64(TYPE_FLAT_HEADER);
4518
fh.version = bswap_64(VERSION_FLAT_HEADER);
4521
memset(buf, 0, sizeof(buf));
4522
memcpy(buf, &fh, sizeof(fh));
4524
if (!write_and_check_space(info->fd_dumpfile, buf, MAX_SIZE_MDF_HEADER,
4525
info->name_dumpfile))
4532
write_end_flat_header(void)
4534
struct makedumpfile_data_header fdh;
4536
if (!info->flag_flatten)
4539
fdh.offset = END_FLAG_FLAT_HEADER;
4540
fdh.buf_size = END_FLAG_FLAT_HEADER;
4542
if (!write_and_check_space(info->fd_dumpfile, &fdh, sizeof(fdh),
4543
info->name_dumpfile))
4550
write_elf_phdr(struct cache_data *cd_hdr, Elf64_Phdr *load)
4554
if (is_elf64_memory()) { /* ELF64 */
4555
if (!write_cache(cd_hdr, load, sizeof(Elf64_Phdr)))
4559
memset(&load32, 0, sizeof(Elf32_Phdr));
4560
load32.p_type = load->p_type;
4561
load32.p_flags = load->p_flags;
4562
load32.p_offset = load->p_offset;
4563
load32.p_vaddr = load->p_vaddr;
4564
load32.p_paddr = load->p_paddr;
4565
load32.p_filesz = load->p_filesz;
4566
load32.p_memsz = load->p_memsz;
4567
load32.p_align = load->p_align;
4569
if (!write_cache(cd_hdr, &load32, sizeof(Elf32_Phdr)))
4576
write_elf_header(struct cache_data *cd_header)
4578
int i, num_loads_dumpfile, phnum;
4579
off_t offset_note_memory, offset_note_dumpfile;
4580
size_t size_note, size_eraseinfo = 0;
4586
const off_t failed = (off_t)-1;
4590
if (!info->flag_elf_dumpfile)
4594
* Get the PT_LOAD number of the dumpfile.
4596
if (info->flag_cyclic) {
4597
if (!(num_loads_dumpfile = get_loads_dumpfile_cyclic())) {
4598
ERRMSG("Can't get a number of PT_LOAD.\n");
4602
if (!(num_loads_dumpfile = get_loads_dumpfile())) {
4603
ERRMSG("Can't get a number of PT_LOAD.\n");
4608
if (is_elf64_memory()) { /* ELF64 */
4609
if (!get_elf64_ehdr(info->fd_memory,
4610
info->name_memory, &ehdr64)) {
4611
ERRMSG("Can't get ehdr64.\n");
4615
* PT_NOTE(1) + PT_LOAD(1+)
4617
ehdr64.e_phnum = 1 + num_loads_dumpfile;
4618
} else { /* ELF32 */
4619
if (!get_elf32_ehdr(info->fd_memory,
4620
info->name_memory, &ehdr32)) {
4621
ERRMSG("Can't get ehdr32.\n");
4625
* PT_NOTE(1) + PT_LOAD(1+)
4627
ehdr32.e_phnum = 1 + num_loads_dumpfile;
4631
* Write an ELF header.
4633
if (is_elf64_memory()) { /* ELF64 */
4634
if (!write_buffer(info->fd_dumpfile, 0, &ehdr64, sizeof(ehdr64),
4635
info->name_dumpfile))
4638
} else { /* ELF32 */
4639
if (!write_buffer(info->fd_dumpfile, 0, &ehdr32, sizeof(ehdr32),
4640
info->name_dumpfile))
4645
* Pre-calculate the required size to store eraseinfo in ELF note
4646
* section so that we can add enough space in ELF notes section and
4647
* adjust the PT_LOAD offset accordingly.
4649
size_eraseinfo = get_size_eraseinfo();
4652
* Store the size_eraseinfo for later use in write_elf_eraseinfo()
4655
info->size_elf_eraseinfo = size_eraseinfo;
4656
DEBUG_MSG("erase info size: %lu\n", info->size_elf_eraseinfo);
4659
* Write a PT_NOTE header.
4661
if (!(phnum = get_phnum_memory()))
4664
for (i = 0; i < phnum; i++) {
4665
if (!get_phdr_memory(i, ¬e))
4667
if (note.p_type == PT_NOTE)
4670
if (note.p_type != PT_NOTE) {
4671
ERRMSG("Can't get a PT_NOTE header.\n");
4675
if (is_elf64_memory()) { /* ELF64 */
4676
cd_header->offset = sizeof(ehdr64);
4677
offset_note_dumpfile = sizeof(ehdr64)
4678
+ sizeof(Elf64_Phdr) * ehdr64.e_phnum;
4680
cd_header->offset = sizeof(ehdr32);
4681
offset_note_dumpfile = sizeof(ehdr32)
4682
+ sizeof(Elf32_Phdr) * ehdr32.e_phnum;
4684
offset_note_memory = note.p_offset;
4685
note.p_offset = offset_note_dumpfile;
4686
size_note = note.p_filesz;
4689
* Modify the note size in PT_NOTE header to accomodate eraseinfo data.
4690
* Eraseinfo will be written later.
4692
if (info->size_elf_eraseinfo) {
4693
if (is_elf64_memory())
4694
note.p_filesz += sizeof(Elf64_Nhdr);
4696
note.p_filesz += sizeof(Elf32_Nhdr);
4697
note.p_filesz += roundup(ERASEINFO_NOTE_NAME_BYTES, 4) +
4698
roundup(size_eraseinfo, 4);
4701
if (!write_elf_phdr(cd_header, ¬e))
4705
* Write a PT_NOTE segment.
4706
* PT_LOAD header will be written later.
4708
if ((buf = malloc(size_note)) == NULL) {
4709
ERRMSG("Can't allocate memory for PT_NOTE segment. %s\n",
4713
if (lseek(info->fd_memory, offset_note_memory, SEEK_SET) == failed) {
4714
ERRMSG("Can't seek the dump memory(%s). %s\n",
4715
info->name_memory, strerror(errno));
4718
if (read(info->fd_memory, buf, size_note) != size_note) {
4719
ERRMSG("Can't read the dump memory(%s). %s\n",
4720
info->name_memory, strerror(errno));
4723
if (!write_buffer(info->fd_dumpfile, offset_note_dumpfile, buf,
4724
size_note, info->name_dumpfile))
4727
/* Set the size_note with new size. */
4728
size_note = note.p_filesz;
4731
* Set an offset of PT_LOAD segment.
4733
info->offset_load_dumpfile = offset_note_dumpfile + size_note;
4734
info->offset_note_dumpfile = offset_note_dumpfile;
4745
write_kdump_header(void)
4749
off_t offset_note, offset_vmcoreinfo;
4750
unsigned long size_note, size_vmcoreinfo;
4751
struct disk_dump_header *dh = info->dump_header;
4752
struct kdump_sub_header kh;
4755
if (info->flag_elf_dumpfile)
4758
get_pt_note(&offset_note, &size_note);
4761
* Write common header
4763
strncpy(dh->signature, KDUMP_SIGNATURE, strlen(KDUMP_SIGNATURE));
4764
dh->header_version = 5;
4765
dh->block_size = info->page_size;
4766
dh->sub_hdr_size = sizeof(kh) + size_note;
4767
dh->sub_hdr_size = divideup(dh->sub_hdr_size, dh->block_size);
4768
dh->max_mapnr = info->max_mapnr;
4769
dh->nr_cpus = get_nr_cpus();
4770
dh->bitmap_blocks = divideup(info->len_bitmap, dh->block_size);
4771
memcpy(&dh->timestamp, &info->timestamp, sizeof(dh->timestamp));
4772
memcpy(&dh->utsname, &info->system_utsname, sizeof(dh->utsname));
4774
size = sizeof(struct disk_dump_header);
4775
if (!write_buffer(info->fd_dumpfile, 0, dh, size, info->name_dumpfile))
4781
size = sizeof(struct kdump_sub_header);
4782
memset(&kh, 0, size);
4783
kh.phys_base = info->phys_base;
4784
kh.dump_level = info->dump_level;
4785
if (info->flag_split) {
4787
kh.start_pfn = info->split_start_pfn;
4788
kh.end_pfn = info->split_end_pfn;
4790
if (has_pt_note()) {
4792
* Write ELF note section
4795
= DISKDUMP_HEADER_BLOCKS * dh->block_size + sizeof(kh);
4796
kh.size_note = size_note;
4798
buf = malloc(size_note);
4800
ERRMSG("Can't allocate memory for ELF note section. %s\n",
4805
if (!info->flag_sadump) {
4806
if (lseek(info->fd_memory, offset_note, SEEK_SET) < 0) {
4807
ERRMSG("Can't seek the dump memory(%s). %s\n",
4808
info->name_memory, strerror(errno));
4811
if (read(info->fd_memory, buf, size_note) != size_note) {
4812
ERRMSG("Can't read the dump memory(%s). %s\n",
4813
info->name_memory, strerror(errno));
4817
if (!sadump_read_elf_note(buf, size_note))
4821
if (!write_buffer(info->fd_dumpfile, kh.offset_note, buf,
4822
kh.size_note, info->name_dumpfile))
4825
if (has_vmcoreinfo()) {
4826
get_vmcoreinfo(&offset_vmcoreinfo, &size_vmcoreinfo);
4828
* Set vmcoreinfo data
4830
* NOTE: ELF note section contains vmcoreinfo data, and
4831
* kh.offset_vmcoreinfo points the vmcoreinfo data.
4833
kh.offset_vmcoreinfo
4834
= offset_vmcoreinfo - offset_note
4836
kh.size_vmcoreinfo = size_vmcoreinfo;
4839
if (!write_buffer(info->fd_dumpfile, dh->block_size, &kh,
4840
size, info->name_dumpfile))
4843
info->sub_header = kh;
4844
info->offset_bitmap1
4845
= (DISKDUMP_HEADER_BLOCKS + dh->sub_hdr_size) * dh->block_size;
4856
get_num_dumpable(void)
4858
unsigned long long pfn, num_dumpable;
4859
struct dump_bitmap bitmap2;
4861
initialize_2nd_bitmap(&bitmap2);
4863
for (pfn = 0, num_dumpable = 0; pfn < info->max_mapnr; pfn++) {
4864
if (is_dumpable(&bitmap2, pfn))
4867
return num_dumpable;
4871
get_num_dumpable_cyclic(void)
4873
unsigned long long pfn, num_dumpable=0;
4875
for (pfn = 0; pfn < info->max_mapnr; pfn++) {
4876
if (!update_cyclic_region(pfn))
4879
if (is_dumpable_cyclic(info->partial_bitmap2, pfn))
4883
return num_dumpable;
4887
write_elf_load_segment(struct cache_data *cd_page, unsigned long long paddr,
4888
off_t off_memory, long long size)
4890
long page_size = info->page_size;
4891
long long bufsz_write;
4892
char buf[info->page_size];
4894
off_memory = paddr_to_offset2(paddr, off_memory);
4896
ERRMSG("Can't convert physaddr(%llx) to an offset.\n",
4900
if (lseek(info->fd_memory, off_memory, SEEK_SET) < 0) {
4901
ERRMSG("Can't seek the dump memory(%s). %s\n",
4902
info->name_memory, strerror(errno));
4907
if (size >= page_size)
4908
bufsz_write = page_size;
4912
if (read(info->fd_memory, buf, bufsz_write) != bufsz_write) {
4913
ERRMSG("Can't read the dump memory(%s). %s\n",
4914
info->name_memory, strerror(errno));
4917
filter_data_buffer((unsigned char *)buf, paddr, bufsz_write);
4918
paddr += bufsz_write;
4919
if (!write_cache(cd_page, buf, bufsz_write))
4928
write_elf_pages(struct cache_data *cd_header, struct cache_data *cd_page)
4931
long page_size = info->page_size;
4932
unsigned long long pfn, pfn_start, pfn_end, paddr, num_excluded;
4933
unsigned long long num_dumpable, per;
4934
unsigned long long memsz, filesz;
4935
unsigned long frac_head, frac_tail;
4936
off_t off_seg_load, off_memory;
4938
struct dump_bitmap bitmap2;
4939
struct timeval tv_start;
4941
if (!info->flag_elf_dumpfile)
4944
initialize_2nd_bitmap(&bitmap2);
4946
num_dumpable = get_num_dumpable();
4947
per = num_dumpable / 100;
4949
off_seg_load = info->offset_load_dumpfile;
4950
cd_page->offset = info->offset_load_dumpfile;
4952
if (!(phnum = get_phnum_memory()))
4955
gettimeofday(&tv_start, NULL);
4957
for (i = 0; i < phnum; i++) {
4958
if (!get_phdr_memory(i, &load))
4961
if (load.p_type != PT_LOAD)
4964
off_memory= load.p_offset;
4965
paddr = load.p_paddr;
4966
pfn_start = paddr_to_pfn(load.p_paddr);
4967
pfn_end = paddr_to_pfn(load.p_paddr + load.p_memsz);
4968
frac_head = page_size - (load.p_paddr % page_size);
4969
frac_tail = (load.p_paddr + load.p_memsz)%page_size;
4974
if (frac_head && (frac_head != page_size)) {
4983
for (pfn = pfn_start; pfn < pfn_end; pfn++) {
4984
if (!is_dumpable(&bitmap2, pfn)) {
4986
if ((pfn == pfn_end - 1) && frac_tail)
4993
if ((num_dumped % per) == 0)
4994
print_progress(PROGRESS_COPY, num_dumped, num_dumpable);
4999
* The dumpable pages are continuous.
5001
if (!num_excluded) {
5002
if ((pfn == pfn_end - 1) && frac_tail) {
5004
filesz += frac_tail;
5007
filesz += page_size;
5011
* If the number of the contiguous pages to be excluded
5012
* is 255 or less, those pages are not excluded.
5014
} else if (num_excluded < PFN_EXCLUDED) {
5015
if ((pfn == pfn_end - 1) && frac_tail) {
5017
filesz += (page_size*num_excluded
5021
filesz += (page_size*num_excluded
5029
* If the number of the contiguous pages to be excluded
5030
* is 256 or more, those pages are excluded really.
5031
* And a new PT_LOAD segment is created.
5033
load.p_memsz = memsz;
5034
load.p_filesz = filesz;
5036
load.p_offset = off_seg_load;
5039
* If PT_LOAD segment does not have real data
5040
* due to the all excluded pages, the file
5041
* offset is not effective and it should be 0.
5046
* Write a PT_LOAD header.
5048
if (!write_elf_phdr(cd_header, &load))
5052
* Write a PT_LOAD segment.
5055
if (!write_elf_load_segment(cd_page, paddr,
5056
off_memory, load.p_filesz))
5059
load.p_paddr += load.p_memsz;
5063
* (x86) Fill PT_LOAD headers with appropriate
5064
* virtual addresses.
5066
if (load.p_paddr < MAXMEM)
5067
load.p_vaddr += load.p_memsz;
5069
load.p_vaddr += load.p_memsz;
5071
paddr = load.p_paddr;
5072
off_seg_load += load.p_filesz;
5079
* Write the last PT_LOAD.
5081
load.p_memsz = memsz;
5082
load.p_filesz = filesz;
5083
load.p_offset = off_seg_load;
5086
* Write a PT_LOAD header.
5088
if (!write_elf_phdr(cd_header, &load))
5092
* Write a PT_LOAD segment.
5095
if (!write_elf_load_segment(cd_page, paddr,
5096
off_memory, load.p_filesz))
5099
off_seg_load += load.p_filesz;
5101
if (!write_cache_bufsz(cd_header))
5103
if (!write_cache_bufsz(cd_page))
5109
print_progress(PROGRESS_COPY, num_dumpable, num_dumpable);
5110
print_execution_time(PROGRESS_COPY, &tv_start);
5117
* This function is specific for reading page.
5119
* If reading the separated page on different PT_LOAD segments,
5120
* this function gets the page data from both segments. This is
5121
* worthy of ia64 /proc/vmcore. In ia64 /proc/vmcore, region 5
5122
* segment is overlapping to region 7 segment. The following is
5123
* example (page_size is 16KBytes):
5125
* region | paddr | memsz
5126
* --------+--------------------+--------------------
5127
* 5 | 0x0000000004000000 | 0x0000000000638ce0
5128
* 7 | 0x0000000004000000 | 0x0000000000db3000
5130
* In the above example, the last page of region 5 is 0x4638000
5131
* and the segment does not contain complete data of this page.
5132
* Then this function gets the data of 0x4638000 - 0x4638ce0
5133
* from region 5, and gets the remaining data from region 7.
5136
read_pfn(unsigned long long pfn, unsigned char *buf)
5138
unsigned long long paddr;
5139
off_t offset1, offset2;
5140
size_t size1, size2;
5142
paddr = pfn_to_paddr(pfn);
5143
if (info->flag_refiltering || info->flag_sadump) {
5144
if (!readmem(PADDR, paddr, buf, info->page_size)) {
5145
ERRMSG("Can't get the page data.\n");
5148
filter_data_buffer(buf, paddr, info->page_size);
5152
offset1 = paddr_to_offset(paddr);
5153
offset2 = paddr_to_offset(paddr + info->page_size);
5156
* Check the separated page on different PT_LOAD segments.
5158
if (offset1 + info->page_size == offset2) {
5159
size1 = info->page_size;
5161
for (size1 = 1; size1 < info->page_size; size1++) {
5162
offset2 = paddr_to_offset(paddr + size1);
5163
if (offset1 + size1 != offset2)
5167
if (!readmem(PADDR, paddr, buf, size1)) {
5168
ERRMSG("Can't get the page data.\n");
5171
filter_data_buffer(buf, paddr, size1);
5172
if (size1 != info->page_size) {
5173
size2 = info->page_size - size1;
5175
memset(buf + size1, 0, size2);
5177
if (!readmem(PADDR, paddr + size1, buf + size1, size2)) {
5178
ERRMSG("Can't get the page data.\n");
5181
filter_data_buffer(buf + size1, paddr + size1, size2);
5188
get_loads_dumpfile_cyclic(void)
5190
int i, phnum, num_new_load = 0;
5191
long page_size = info->page_size;
5192
unsigned char buf[info->page_size];
5193
unsigned long long pfn, pfn_start, pfn_end, num_excluded;
5194
unsigned long frac_head, frac_tail;
5198
* Initialize target region and bitmap.
5200
info->cyclic_start_pfn = 0;
5201
info->cyclic_end_pfn = info->pfn_cyclic;
5202
if (!create_1st_bitmap_cyclic())
5204
if (!exclude_unnecessary_pages_cyclic())
5207
if (!(phnum = get_phnum_memory()))
5210
for (i = 0; i < phnum; i++) {
5211
if (!get_phdr_memory(i, &load))
5213
if (load.p_type != PT_LOAD)
5216
pfn_start = paddr_to_pfn(load.p_paddr);
5217
pfn_end = paddr_to_pfn(load.p_paddr + load.p_memsz);
5218
frac_head = page_size - (load.p_paddr % page_size);
5219
frac_tail = (load.p_paddr + load.p_memsz) % page_size;
5224
if (frac_head && (frac_head != page_size))
5229
for (pfn = pfn_start; pfn < pfn_end; pfn++) {
5231
* Update target region and bitmap
5233
if (!is_cyclic_region(pfn)) {
5234
if (!update_cyclic_region(pfn))
5238
if (!is_dumpable_cyclic(info->partial_bitmap2, pfn)) {
5244
* Exclude zero pages.
5246
if (info->dump_level & DL_EXCLUDE_ZERO) {
5247
if (!read_pfn(pfn, buf))
5249
if (is_zero_page(buf, page_size)) {
5255
info->num_dumpable++;
5258
* If the number of the contiguous pages to be excluded
5259
* is 256 or more, those pages are excluded really.
5260
* And a new PT_LOAD segment is created.
5262
if (num_excluded >= PFN_EXCLUDED) {
5268
return num_new_load;
5272
write_elf_pages_cyclic(struct cache_data *cd_header, struct cache_data *cd_page)
5275
long page_size = info->page_size;
5276
unsigned char buf[info->page_size];
5277
unsigned long long pfn, pfn_start, pfn_end, paddr, num_excluded;
5278
unsigned long long num_dumpable, per;
5279
unsigned long long memsz, filesz;
5280
unsigned long frac_head, frac_tail;
5281
off_t off_seg_load, off_memory;
5283
struct timeval tv_start;
5285
if (!info->flag_elf_dumpfile)
5288
num_dumpable = info->num_dumpable;
5289
per = num_dumpable / 100;
5291
off_seg_load = info->offset_load_dumpfile;
5292
cd_page->offset = info->offset_load_dumpfile;
5295
* Reset counter for debug message.
5297
pfn_zero = pfn_cache = pfn_cache_private = pfn_user = pfn_free = 0;
5298
pfn_memhole = info->max_mapnr;
5300
info->cyclic_start_pfn = 0;
5301
info->cyclic_end_pfn = 0;
5302
if (!update_cyclic_region(0))
5305
if (!(phnum = get_phnum_memory()))
5308
gettimeofday(&tv_start, NULL);
5310
for (i = 0; i < phnum; i++) {
5311
if (!get_phdr_memory(i, &load))
5314
if (load.p_type != PT_LOAD)
5317
off_memory= load.p_offset;
5318
paddr = load.p_paddr;
5319
pfn_start = paddr_to_pfn(load.p_paddr);
5320
pfn_end = paddr_to_pfn(load.p_paddr + load.p_memsz);
5321
frac_head = page_size - (load.p_paddr % page_size);
5322
frac_tail = (load.p_paddr + load.p_memsz)%page_size;
5327
if (frac_head && (frac_head != page_size)) {
5336
for (pfn = pfn_start; pfn < pfn_end; pfn++) {
5338
* Update target region and partial bitmap if necessary.
5340
if (!update_cyclic_region(pfn))
5343
if (!is_dumpable_cyclic(info->partial_bitmap2, pfn)) {
5345
if ((pfn == pfn_end - 1) && frac_tail)
5353
* Exclude zero pages.
5355
if (info->dump_level & DL_EXCLUDE_ZERO) {
5356
if (!read_pfn(pfn, buf))
5358
if (is_zero_page(buf, page_size)) {
5361
if ((pfn == pfn_end - 1) && frac_tail)
5369
if ((num_dumped % per) == 0)
5370
print_progress(PROGRESS_COPY, num_dumped, num_dumpable);
5375
* The dumpable pages are continuous.
5377
if (!num_excluded) {
5378
if ((pfn == pfn_end - 1) && frac_tail) {
5380
filesz += frac_tail;
5383
filesz += page_size;
5387
* If the number of the contiguous pages to be excluded
5388
* is 255 or less, those pages are not excluded.
5390
} else if (num_excluded < PFN_EXCLUDED) {
5391
if ((pfn == pfn_end - 1) && frac_tail) {
5393
filesz += (page_size*num_excluded
5397
filesz += (page_size*num_excluded
5405
* If the number of the contiguous pages to be excluded
5406
* is 256 or more, those pages are excluded really.
5407
* And a new PT_LOAD segment is created.
5409
load.p_memsz = memsz;
5410
load.p_filesz = filesz;
5412
load.p_offset = off_seg_load;
5415
* If PT_LOAD segment does not have real data
5416
* due to the all excluded pages, the file
5417
* offset is not effective and it should be 0.
5422
* Write a PT_LOAD header.
5424
if (!write_elf_phdr(cd_header, &load))
5428
* Write a PT_LOAD segment.
5431
if (!write_elf_load_segment(cd_page, paddr,
5432
off_memory, load.p_filesz))
5435
load.p_paddr += load.p_memsz;
5439
* (x86) Fill PT_LOAD headers with appropriate
5440
* virtual addresses.
5442
if (load.p_paddr < MAXMEM)
5443
load.p_vaddr += load.p_memsz;
5445
load.p_vaddr += load.p_memsz;
5447
paddr = load.p_paddr;
5448
off_seg_load += load.p_filesz;
5455
* Write the last PT_LOAD.
5457
load.p_memsz = memsz;
5458
load.p_filesz = filesz;
5459
load.p_offset = off_seg_load;
5462
* Write a PT_LOAD header.
5464
if (!write_elf_phdr(cd_header, &load))
5468
* Write a PT_LOAD segment.
5471
if (!write_elf_load_segment(cd_page, paddr,
5472
off_memory, load.p_filesz))
5475
off_seg_load += load.p_filesz;
5477
if (!write_cache_bufsz(cd_header))
5479
if (!write_cache_bufsz(cd_page))
5485
print_progress(PROGRESS_COPY, num_dumpable, num_dumpable);
5486
print_execution_time(PROGRESS_COPY, &tv_start);
5493
write_kdump_pages(struct cache_data *cd_header, struct cache_data *cd_page)
5495
unsigned long long pfn, per, num_dumpable;
5496
unsigned long long start_pfn, end_pfn;
5497
unsigned long size_out;
5498
struct page_desc pd, pd_zero;
5499
off_t offset_data = 0;
5500
struct disk_dump_header *dh = info->dump_header;
5501
unsigned char buf[info->page_size], *buf_out = NULL;
5502
unsigned long len_buf_out;
5503
struct dump_bitmap bitmap2;
5504
struct timeval tv_start;
5505
const off_t failed = (off_t)-1;
5506
unsigned long len_buf_out_zlib, len_buf_out_lzo, len_buf_out_snappy;
5510
if (info->flag_elf_dumpfile)
5513
initialize_2nd_bitmap(&bitmap2);
5515
len_buf_out_zlib = len_buf_out_lzo = len_buf_out_snappy = 0;
5520
if ((wrkmem = malloc(LZO1X_1_MEM_COMPRESS)) == NULL) {
5521
ERRMSG("Can't allocate memory for the working memory. %s\n",
5526
len_buf_out_lzo = info->page_size + info->page_size / 16 + 64 + 3;
5530
len_buf_out_snappy = snappy_max_compressed_length(info->page_size);
5533
len_buf_out_zlib = compressBound(info->page_size);
5535
len_buf_out = MAX(len_buf_out_zlib,
5536
MAX(len_buf_out_lzo,
5537
len_buf_out_snappy));
5539
if ((buf_out = malloc(len_buf_out)) == NULL) {
5540
ERRMSG("Can't allocate memory for the compression buffer. %s\n",
5545
num_dumpable = get_num_dumpable();
5546
per = num_dumpable / 100;
5549
* Calculate the offset of the page data.
5552
= (DISKDUMP_HEADER_BLOCKS + dh->sub_hdr_size + dh->bitmap_blocks)
5554
cd_page->offset = cd_header->offset + sizeof(page_desc_t)*num_dumpable;
5555
offset_data = cd_page->offset;
5558
* Set a fileoffset of Physical Address 0x0.
5560
if (lseek(info->fd_memory, get_offset_pt_load_memory(), SEEK_SET)
5562
ERRMSG("Can't seek the dump memory(%s). %s\n",
5563
info->name_memory, strerror(errno));
5568
* Write the data of zero-filled page.
5570
gettimeofday(&tv_start, NULL);
5571
if (info->dump_level & DL_EXCLUDE_ZERO) {
5572
pd_zero.size = info->page_size;
5574
pd_zero.offset = offset_data;
5575
pd_zero.page_flags = 0;
5576
memset(buf, 0, pd_zero.size);
5577
if (!write_cache(cd_page, buf, pd_zero.size))
5579
offset_data += pd_zero.size;
5581
if (info->flag_split) {
5582
start_pfn = info->split_start_pfn;
5583
end_pfn = info->split_end_pfn;
5587
end_pfn = info->max_mapnr;
5590
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
5592
if ((num_dumped % per) == 0)
5593
print_progress(PROGRESS_COPY, num_dumped, num_dumpable);
5596
* Check the excluded page.
5598
if (!is_dumpable(&bitmap2, pfn))
5603
if (!read_pfn(pfn, buf))
5607
* Exclude the page filled with zeros.
5609
if ((info->dump_level & DL_EXCLUDE_ZERO)
5610
&& is_zero_page(buf, info->page_size)) {
5611
if (!write_cache(cd_header, &pd_zero, sizeof(page_desc_t)))
5617
* Compress the page data.
5619
size_out = len_buf_out;
5620
if ((info->flag_compress & DUMP_DH_COMPRESSED_ZLIB)
5621
&& ((size_out = len_buf_out),
5622
compress2(buf_out, &size_out, buf, info->page_size,
5623
Z_BEST_SPEED) == Z_OK)
5624
&& (size_out < info->page_size)) {
5625
pd.flags = DUMP_DH_COMPRESSED_ZLIB;
5627
memcpy(buf, buf_out, pd.size);
5629
} else if (info->flag_lzo_support
5630
&& (info->flag_compress & DUMP_DH_COMPRESSED_LZO)
5631
&& ((size_out = info->page_size),
5632
lzo1x_1_compress(buf, info->page_size, buf_out,
5633
&size_out, wrkmem) == LZO_E_OK)
5634
&& (size_out < info->page_size)) {
5635
pd.flags = DUMP_DH_COMPRESSED_LZO;
5637
memcpy(buf, buf_out, pd.size);
5640
} else if ((info->flag_compress & DUMP_DH_COMPRESSED_SNAPPY)
5641
&& ((size_out = len_buf_out_snappy),
5642
snappy_compress((char *)buf, info->page_size,
5644
(size_t *)&size_out)
5646
&& (size_out < info->page_size)) {
5647
pd.flags = DUMP_DH_COMPRESSED_SNAPPY;
5649
memcpy(buf, buf_out, pd.size);
5653
pd.size = info->page_size;
5656
pd.offset = offset_data;
5657
offset_data += pd.size;
5660
* Write the page header.
5662
if (!write_cache(cd_header, &pd, sizeof(page_desc_t)))
5666
* Write the page data.
5668
if (!write_cache(cd_page, buf, pd.size))
5673
* Write the remainder.
5675
if (!write_cache_bufsz(cd_page))
5677
if (!write_cache_bufsz(cd_header))
5683
print_progress(PROGRESS_COPY, num_dumpable, num_dumpable);
5684
print_execution_time(PROGRESS_COPY, &tv_start);
5689
if (buf_out != NULL)
5700
write_kdump_pages_cyclic(struct cache_data *cd_header, struct cache_data *cd_page,
5701
struct page_desc *pd_zero, off_t *offset_data)
5703
unsigned long long pfn, per;
5704
unsigned long long start_pfn, end_pfn;
5705
unsigned long size_out;
5706
struct page_desc pd;
5707
unsigned char buf[info->page_size], *buf_out = NULL;
5708
unsigned long len_buf_out;
5709
const off_t failed = (off_t)-1;
5710
unsigned long len_buf_out_zlib, len_buf_out_lzo, len_buf_out_snappy;
5714
if (info->flag_elf_dumpfile)
5717
len_buf_out_zlib = len_buf_out_lzo = len_buf_out_snappy = 0;
5722
if ((wrkmem = malloc(LZO1X_1_MEM_COMPRESS)) == NULL) {
5723
ERRMSG("Can't allocate memory for the working memory. %s\n",
5728
len_buf_out_lzo = info->page_size + info->page_size / 16 + 64 + 3;
5731
len_buf_out_snappy = snappy_max_compressed_length(info->page_size);
5734
len_buf_out_zlib = compressBound(info->page_size);
5736
len_buf_out = MAX(len_buf_out_zlib,
5737
MAX(len_buf_out_lzo,
5738
len_buf_out_snappy));
5740
if ((buf_out = malloc(len_buf_out)) == NULL) {
5741
ERRMSG("Can't allocate memory for the compression buffer. %s\n",
5746
per = info->num_dumpable / 100;
5749
* Set a fileoffset of Physical Address 0x0.
5751
if (lseek(info->fd_memory, get_offset_pt_load_memory(), SEEK_SET)
5753
ERRMSG("Can't seek the dump memory(%s). %s\n",
5754
info->name_memory, strerror(errno));
5758
start_pfn = info->cyclic_start_pfn;
5759
end_pfn = info->cyclic_end_pfn;
5761
if (info->flag_split) {
5762
if (start_pfn < info->split_start_pfn)
5763
start_pfn = info->split_start_pfn;
5764
if (end_pfn > info->split_end_pfn)
5765
end_pfn = info->split_end_pfn;
5768
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
5770
if ((num_dumped % per) == 0)
5771
print_progress(PROGRESS_COPY, num_dumped, info->num_dumpable);
5774
* Check the excluded page.
5776
if (!is_dumpable_cyclic(info->partial_bitmap2, pfn))
5781
if (!read_pfn(pfn, buf))
5785
* Exclude the page filled with zeros.
5787
if ((info->dump_level & DL_EXCLUDE_ZERO)
5788
&& is_zero_page(buf, info->page_size)) {
5789
if (!write_cache(cd_header, pd_zero, sizeof(page_desc_t)))
5795
* Compress the page data.
5797
size_out = len_buf_out;
5798
if ((info->flag_compress & DUMP_DH_COMPRESSED_ZLIB)
5799
&& ((size_out = len_buf_out),
5800
compress2(buf_out, &size_out, buf, info->page_size,
5801
Z_BEST_SPEED) == Z_OK)
5802
&& (size_out < info->page_size)) {
5803
pd.flags = DUMP_DH_COMPRESSED_ZLIB;
5805
memcpy(buf, buf_out, pd.size);
5807
} else if (info->flag_lzo_support
5808
&& (info->flag_compress & DUMP_DH_COMPRESSED_LZO)
5809
&& ((size_out = info->page_size),
5810
lzo1x_1_compress(buf, info->page_size, buf_out,
5811
&size_out, wrkmem) == LZO_E_OK)
5812
&& (size_out < info->page_size)) {
5813
pd.flags = DUMP_DH_COMPRESSED_LZO;
5815
memcpy(buf, buf_out, pd.size);
5818
} else if ((info->flag_compress & DUMP_DH_COMPRESSED_SNAPPY)
5819
&& ((size_out = len_buf_out_snappy),
5820
snappy_compress((char *)buf, info->page_size,
5822
(size_t *)&size_out)
5824
&& (size_out < info->page_size)) {
5825
pd.flags = DUMP_DH_COMPRESSED_SNAPPY;
5827
memcpy(buf, buf_out, pd.size);
5831
pd.size = info->page_size;
5834
pd.offset = *offset_data;
5835
*offset_data += pd.size;
5838
* Write the page header.
5840
if (!write_cache(cd_header, &pd, sizeof(page_desc_t)))
5844
* Write the page data.
5846
if (!write_cache(cd_page, buf, pd.size))
5852
if (buf_out != NULL)
5863
* Copy eraseinfo from input dumpfile/vmcore to output dumpfile.
5866
copy_eraseinfo(struct cache_data *cd_eraseinfo)
5873
get_eraseinfo(&offset, &size);
5876
ERRMSG("Can't allocate memory for erase info section. %s\n",
5880
if (lseek(info->fd_memory, offset, SEEK_SET) < 0) {
5881
ERRMSG("Can't seek the dump memory(%s). %s\n",
5882
info->name_memory, strerror(errno));
5885
if (read(info->fd_memory, buf, size) != size) {
5886
ERRMSG("Can't read the dump memory(%s). %s\n",
5887
info->name_memory, strerror(errno));
5890
if (!write_cache(cd_eraseinfo, buf, size))
5900
update_eraseinfo_of_sub_header(off_t offset_eraseinfo,
5901
unsigned long size_eraseinfo)
5905
/* seek to kdump sub header offset */
5906
offset = DISKDUMP_HEADER_BLOCKS * info->page_size;
5908
info->sub_header.offset_eraseinfo = offset_eraseinfo;
5909
info->sub_header.size_eraseinfo = size_eraseinfo;
5911
if (!write_buffer(info->fd_dumpfile, offset, &info->sub_header,
5912
sizeof(struct kdump_sub_header), info->name_dumpfile))
5919
* Traverse through eraseinfo nodes and write it to the o/p dumpfile if the
5920
* node has erased flag set.
5923
write_eraseinfo(struct cache_data *cd_page, unsigned long *size_out)
5925
int i, j, obuf_size = 0, ei_size = 0;
5927
unsigned long size_eraseinfo = 0;
5929
char size_str[MAX_SIZE_STR_LEN];
5931
for (i = 1; i < num_erase_info; i++) {
5932
if (!erase_info[i].erased)
5934
for (j = 0; j < erase_info[i].num_sizes; j++) {
5935
if (erase_info[i].sizes[j] > 0)
5936
sprintf(size_str, "size %ld\n",
5937
erase_info[i].sizes[j]);
5938
else if (erase_info[i].sizes[j] == -1)
5939
sprintf(size_str, "nullify\n");
5941
/* Calculate the required buffer size. */
5942
ei_size = strlen("erase ") +
5943
strlen(erase_info[i].symbol_expr) + 1 +
5947
* If obuf is allocated in the previous run and is
5948
* big enough to hold current erase info string then
5949
* reuse it otherwise realloc.
5951
if (ei_size > obuf_size) {
5952
obuf_size = ei_size;
5953
obuf = realloc(obuf, obuf_size);
5955
ERRMSG("Can't allocate memory for"
5956
" output buffer\n");
5960
sprintf(obuf, "erase %s %s", erase_info[i].symbol_expr,
5963
if (!write_cache(cd_page, obuf, strlen(obuf)))
5965
size_eraseinfo += strlen(obuf);
5969
* Write the remainder.
5971
if (!write_cache_bufsz(cd_page))
5974
*size_out = size_eraseinfo;
5984
write_elf_eraseinfo(struct cache_data *cd_header)
5986
char note[MAX_SIZE_NHDR];
5987
char buf[ERASEINFO_NOTE_NAME_BYTES + 4];
5988
off_t offset_eraseinfo;
5989
unsigned long note_header_size, size_written, size_note;
5991
if (!info->size_elf_eraseinfo)
5994
DEBUG_MSG("Writing erase info...\n");
5996
/* calculate the eraseinfo ELF note offset */
5997
get_pt_note(NULL, &size_note);
5998
cd_header->offset = info->offset_note_dumpfile +
5999
roundup(size_note, 4);
6001
/* Write eraseinfo ELF note header. */
6002
memset(note, 0, sizeof(note));
6003
if (is_elf64_memory()) {
6004
Elf64_Nhdr *nh = (Elf64_Nhdr *)note;
6006
note_header_size = sizeof(Elf64_Nhdr);
6007
nh->n_namesz = ERASEINFO_NOTE_NAME_BYTES;
6008
nh->n_descsz = info->size_elf_eraseinfo;
6011
Elf32_Nhdr *nh = (Elf32_Nhdr *)note;
6013
note_header_size = sizeof(Elf32_Nhdr);
6014
nh->n_namesz = ERASEINFO_NOTE_NAME_BYTES;
6015
nh->n_descsz = info->size_elf_eraseinfo;
6018
if (!write_cache(cd_header, note, note_header_size))
6021
/* Write eraseinfo Note name */
6022
memset(buf, 0, sizeof(buf));
6023
memcpy(buf, ERASEINFO_NOTE_NAME, ERASEINFO_NOTE_NAME_BYTES);
6024
if (!write_cache(cd_header, buf,
6025
roundup(ERASEINFO_NOTE_NAME_BYTES, 4)))
6028
offset_eraseinfo = cd_header->offset;
6029
if (!write_eraseinfo(cd_header, &size_written))
6033
* The actual eraseinfo written may be less than pre-calculated size.
6034
* Hence fill up the rest of size with zero's.
6036
if (size_written < info->size_elf_eraseinfo)
6037
write_cache_zero(cd_header,
6038
info->size_elf_eraseinfo - size_written);
6040
DEBUG_MSG("offset_eraseinfo: %llx, size_eraseinfo: %ld\n",
6041
(unsigned long long)offset_eraseinfo, info->size_elf_eraseinfo);
6047
write_kdump_eraseinfo(struct cache_data *cd_page)
6049
off_t offset_eraseinfo;
6050
unsigned long size_eraseinfo, size_written;
6052
DEBUG_MSG("Writing erase info...\n");
6053
offset_eraseinfo = cd_page->offset;
6056
* In case of refiltering copy the existing eraseinfo from input
6057
* dumpfile to o/p dumpfile.
6059
if (has_eraseinfo()) {
6060
get_eraseinfo(NULL, &size_eraseinfo);
6061
if (!copy_eraseinfo(cd_page))
6066
if (!write_eraseinfo(cd_page, &size_written))
6069
size_eraseinfo += size_written;
6070
DEBUG_MSG("offset_eraseinfo: %llx, size_eraseinfo: %ld\n",
6071
(unsigned long long)offset_eraseinfo, size_eraseinfo);
6074
/* Update the erase info offset and size in kdump sub header */
6075
if (!update_eraseinfo_of_sub_header(offset_eraseinfo,
6083
write_kdump_bitmap(void)
6085
struct cache_data bm;
6091
if (info->flag_elf_dumpfile)
6094
bm.fd = info->fd_bitmap;
6095
bm.file_name = info->name_bitmap;
6099
if ((bm.buf = calloc(1, BUFSIZE_BITMAP)) == NULL) {
6100
ERRMSG("Can't allocate memory for dump bitmap buffer. %s\n",
6104
offset = info->offset_bitmap1;
6105
buf_size = info->len_bitmap;
6107
while (buf_size > 0) {
6108
if (buf_size >= BUFSIZE_BITMAP)
6109
bm.cache_size = BUFSIZE_BITMAP;
6111
bm.cache_size = buf_size;
6113
if(!read_cache(&bm))
6116
if (!write_buffer(info->fd_dumpfile, offset,
6117
bm.buf, bm.cache_size, info->name_dumpfile))
6120
offset += bm.cache_size;
6121
buf_size -= BUFSIZE_BITMAP;
6132
write_kdump_bitmap_cyclic(void)
6138
increment = divideup(info->cyclic_end_pfn - info->cyclic_start_pfn, BITPERBYTE);
6140
if (info->flag_elf_dumpfile)
6143
offset = info->offset_bitmap1;
6144
if (!write_buffer(info->fd_dumpfile, offset,
6145
info->partial_bitmap1, increment, info->name_dumpfile))
6148
offset += info->len_bitmap / 2;
6149
if (!write_buffer(info->fd_dumpfile, offset,
6150
info->partial_bitmap2, increment, info->name_dumpfile))
6153
info->offset_bitmap1 += increment;
6162
write_kdump_pages_and_bitmap_cyclic(struct cache_data *cd_header, struct cache_data *cd_page)
6164
struct page_desc pd_zero;
6165
off_t offset_data=0;
6166
struct disk_dump_header *dh = info->dump_header;
6167
unsigned char buf[info->page_size];
6168
unsigned long long pfn;
6169
struct timeval tv_start;
6171
gettimeofday(&tv_start, NULL);
6174
* Reset counter for debug message.
6176
pfn_zero = pfn_cache = pfn_cache_private = pfn_user = pfn_free = 0;
6177
pfn_memhole = info->max_mapnr;
6180
= (DISKDUMP_HEADER_BLOCKS + dh->sub_hdr_size + dh->bitmap_blocks)
6182
cd_page->offset = cd_header->offset + sizeof(page_desc_t)*info->num_dumpable;
6183
offset_data = cd_page->offset;
6186
* Write the data of zero-filled page.
6188
if (info->dump_level & DL_EXCLUDE_ZERO) {
6189
pd_zero.size = info->page_size;
6191
pd_zero.offset = offset_data;
6192
pd_zero.page_flags = 0;
6193
memset(buf, 0, pd_zero.size);
6194
if (!write_cache(cd_page, buf, pd_zero.size))
6196
offset_data += pd_zero.size;
6200
* Write pages and bitmap cyclically.
6202
info->cyclic_start_pfn = 0;
6203
info->cyclic_end_pfn = 0;
6204
for (pfn = 0; pfn < info->max_mapnr; pfn++) {
6205
if (is_cyclic_region(pfn))
6208
if (!update_cyclic_region(pfn))
6211
if (!write_kdump_pages_cyclic(cd_header, cd_page, &pd_zero, &offset_data))
6214
if (!write_kdump_bitmap_cyclic())
6219
* Write the remainder.
6221
if (!write_cache_bufsz(cd_page))
6223
if (!write_cache_bufsz(cd_header))
6229
print_progress(PROGRESS_COPY, num_dumped, info->num_dumpable);
6230
print_execution_time(PROGRESS_COPY, &tv_start);
6237
close_vmcoreinfo(void)
6239
if(fclose(info->file_vmcoreinfo) < 0)
6240
ERRMSG("Can't close the vmcoreinfo file(%s). %s\n",
6241
info->name_vmcoreinfo, strerror(errno));
6245
close_dump_memory(void)
6247
if ((info->fd_memory = close(info->fd_memory)) < 0)
6248
ERRMSG("Can't close the dump memory(%s). %s\n",
6249
info->name_memory, strerror(errno));
6253
close_dump_file(void)
6255
if (info->flag_flatten)
6258
if ((info->fd_dumpfile = close(info->fd_dumpfile)) < 0)
6259
ERRMSG("Can't close the dump file(%s). %s\n",
6260
info->name_dumpfile, strerror(errno));
6264
close_dump_bitmap(void)
6266
if ((info->fd_bitmap = close(info->fd_bitmap)) < 0)
6267
ERRMSG("Can't close the bitmap file(%s). %s\n",
6268
info->name_bitmap, strerror(errno));
6269
free(info->name_bitmap);
6270
info->name_bitmap = NULL;
6274
close_kernel_file(void)
6276
if (info->name_vmlinux) {
6277
if ((info->fd_vmlinux = close(info->fd_vmlinux)) < 0) {
6278
ERRMSG("Can't close the kernel file(%s). %s\n",
6279
info->name_vmlinux, strerror(errno));
6282
if (info->name_xen_syms) {
6283
if ((info->fd_xen_syms = close(info->fd_xen_syms)) < 0) {
6284
ERRMSG("Can't close the kernel file(%s). %s\n",
6285
info->name_xen_syms, strerror(errno));
6291
* Close the following files when it generates the vmcoreinfo file.
6296
close_files_for_generating_vmcoreinfo(void)
6298
close_kernel_file();
6306
* Close the following file when it rearranges the dump data.
6310
close_files_for_rearranging_dumpdata(void)
6318
* Close the following files when it creates the dump file.
6322
* if it reads the vmcoreinfo file
6328
close_files_for_creating_dumpfile(void)
6330
if (info->max_dump_level > DL_EXCLUDE_ZERO)
6331
close_kernel_file();
6333
/* free name for vmcoreinfo */
6334
if (has_vmcoreinfo()) {
6335
free(info->name_vmcoreinfo);
6336
info->name_vmcoreinfo = NULL;
6338
close_dump_memory();
6340
close_dump_bitmap();
6346
* for Xen extraction
6349
get_symbol_info_xen(void)
6354
SYMBOL_INIT(dom_xen, "dom_xen");
6355
SYMBOL_INIT(dom_io, "dom_io");
6356
SYMBOL_INIT(domain_list, "domain_list");
6357
SYMBOL_INIT(frame_table, "frame_table");
6358
SYMBOL_INIT(alloc_bitmap, "alloc_bitmap");
6359
SYMBOL_INIT(max_page, "max_page");
6360
SYMBOL_INIT(xenheap_phys_end, "xenheap_phys_end");
6363
* Architecture specific
6365
SYMBOL_INIT(pgd_l2, "idle_pg_table_l2"); /* x86 */
6366
SYMBOL_INIT(pgd_l3, "idle_pg_table_l3"); /* x86-PAE */
6367
if (SYMBOL(pgd_l3) == NOT_FOUND_SYMBOL)
6368
SYMBOL_INIT(pgd_l3, "idle_pg_table"); /* x86-PAE */
6369
SYMBOL_INIT(pgd_l4, "idle_pg_table_4"); /* x86_64 */
6370
if (SYMBOL(pgd_l4) == NOT_FOUND_SYMBOL)
6371
SYMBOL_INIT(pgd_l4, "idle_pg_table"); /* x86_64 */
6373
SYMBOL_INIT(xen_heap_start, "xen_heap_start"); /* ia64 */
6374
SYMBOL_INIT(xen_pstart, "xen_pstart"); /* ia64 */
6375
SYMBOL_INIT(frametable_pg_dir, "frametable_pg_dir"); /* ia64 */
6381
get_structure_info_xen(void)
6383
SIZE_INIT(page_info, "page_info");
6384
OFFSET_INIT(page_info.count_info, "page_info", "count_info");
6385
OFFSET_INIT(page_info._domain, "page_info", "_domain");
6387
SIZE_INIT(domain, "domain");
6388
OFFSET_INIT(domain.domain_id, "domain", "domain_id");
6389
OFFSET_INIT(domain.next_in_list, "domain", "next_in_list");
6395
init_xen_crash_info(void)
6397
off_t offset_xen_crash_info;
6398
unsigned long size_xen_crash_info;
6401
get_xen_crash_info(&offset_xen_crash_info, &size_xen_crash_info);
6402
if (!size_xen_crash_info) {
6403
info->xen_crash_info_v = -1;
6404
return TRUE; /* missing info is non-fatal */
6407
if (size_xen_crash_info < sizeof(xen_crash_info_com_t)) {
6408
ERRMSG("Xen crash info too small (%lu bytes).\n",
6409
size_xen_crash_info);
6413
buf = malloc(size_xen_crash_info);
6415
ERRMSG("Can't allocate note (%lu bytes). %s\n",
6416
size_xen_crash_info, strerror(errno));
6420
if (lseek(info->fd_memory, offset_xen_crash_info, SEEK_SET) < 0) {
6421
ERRMSG("Can't seek the dump memory(%s). %s\n",
6422
info->name_memory, strerror(errno));
6425
if (read(info->fd_memory, buf, size_xen_crash_info)
6426
!= size_xen_crash_info) {
6427
ERRMSG("Can't read the dump memory(%s). %s\n",
6428
info->name_memory, strerror(errno));
6432
info->xen_crash_info.com = buf;
6433
if (size_xen_crash_info >= sizeof(xen_crash_info_v2_t))
6434
info->xen_crash_info_v = 2;
6435
else if (size_xen_crash_info >= sizeof(xen_crash_info_t))
6436
info->xen_crash_info_v = 1;
6438
info->xen_crash_info_v = 0;
6446
unsigned long domain;
6447
unsigned int domain_id;
6451
* Get architecture specific basic data
6453
if (!get_xen_basic_info_arch())
6456
if (!info->xen_crash_info.com ||
6457
info->xen_crash_info.com->xen_major_version < 4) {
6458
if (SYMBOL(alloc_bitmap) == NOT_FOUND_SYMBOL) {
6459
ERRMSG("Can't get the symbol of alloc_bitmap.\n");
6462
if (!readmem(VADDR_XEN, SYMBOL(alloc_bitmap), &info->alloc_bitmap,
6463
sizeof(info->alloc_bitmap))) {
6464
ERRMSG("Can't get the value of alloc_bitmap.\n");
6467
if (SYMBOL(max_page) == NOT_FOUND_SYMBOL) {
6468
ERRMSG("Can't get the symbol of max_page.\n");
6471
if (!readmem(VADDR_XEN, SYMBOL(max_page), &info->max_page,
6472
sizeof(info->max_page))) {
6473
ERRMSG("Can't get the value of max_page.\n");
6479
* Walk through domain_list
6481
if (SYMBOL(domain_list) == NOT_FOUND_SYMBOL) {
6482
ERRMSG("Can't get the symbol of domain_list.\n");
6485
if (!readmem(VADDR_XEN, SYMBOL(domain_list), &domain, sizeof(domain))){
6486
ERRMSG("Can't get the value of domain_list.\n");
6491
* Get numbers of domain first
6496
if (!readmem(VADDR_XEN, domain + OFFSET(domain.next_in_list),
6497
&domain, sizeof(domain))) {
6498
ERRMSG("Can't get through the domain_list.\n");
6503
if ((info->domain_list = (struct domain_list *)
6504
malloc(sizeof(struct domain_list) * (num_domain + 2))) == NULL) {
6505
ERRMSG("Can't allocate memory for domain_list.\n");
6509
info->num_domain = num_domain + 2;
6511
if (!readmem(VADDR_XEN, SYMBOL(domain_list), &domain, sizeof(domain))) {
6512
ERRMSG("Can't get the value of domain_list.\n");
6517
if (!readmem(VADDR_XEN, domain + OFFSET(domain.domain_id),
6518
&domain_id, sizeof(domain_id))) {
6519
ERRMSG("Can't get the domain_id.\n");
6522
info->domain_list[num_domain].domain_addr = domain;
6523
info->domain_list[num_domain].domain_id = domain_id;
6525
* pickled_id is set by architecture specific
6529
if (!readmem(VADDR_XEN, domain + OFFSET(domain.next_in_list),
6530
&domain, sizeof(domain))) {
6531
ERRMSG("Can't get through the domain_list.\n");
6539
if (SYMBOL(dom_xen) == NOT_FOUND_SYMBOL) {
6540
ERRMSG("Can't get the symbol of dom_xen.\n");
6543
if (!readmem(VADDR_XEN, SYMBOL(dom_xen), &domain, sizeof(domain))) {
6544
ERRMSG("Can't get the value of dom_xen.\n");
6547
if (!readmem(VADDR_XEN, domain + OFFSET(domain.domain_id), &domain_id,
6548
sizeof(domain_id))) {
6549
ERRMSG( "Can't get the value of dom_xen domain_id.\n");
6552
info->domain_list[num_domain].domain_addr = domain;
6553
info->domain_list[num_domain].domain_id = domain_id;
6556
if (SYMBOL(dom_io) == NOT_FOUND_SYMBOL) {
6557
ERRMSG("Can't get the symbol of dom_io.\n");
6560
if (!readmem(VADDR_XEN, SYMBOL(dom_io), &domain, sizeof(domain))) {
6561
ERRMSG("Can't get the value of dom_io.\n");
6564
if (!readmem(VADDR_XEN, domain + OFFSET(domain.domain_id), &domain_id,
6565
sizeof(domain_id))) {
6566
ERRMSG( "Can't get the value of dom_io domain_id.\n");
6569
info->domain_list[num_domain].domain_addr = domain;
6570
info->domain_list[num_domain].domain_id = domain_id;
6573
* Get architecture specific data
6575
if (!get_xen_info_arch())
6587
* Show data for debug
6590
MSG("SYMBOL(dom_xen): %llx\n", SYMBOL(dom_xen));
6591
MSG("SYMBOL(dom_io): %llx\n", SYMBOL(dom_io));
6592
MSG("SYMBOL(domain_list): %llx\n", SYMBOL(domain_list));
6593
MSG("SYMBOL(xen_heap_start): %llx\n", SYMBOL(xen_heap_start));
6594
MSG("SYMBOL(frame_table): %llx\n", SYMBOL(frame_table));
6595
MSG("SYMBOL(alloc_bitmap): %llx\n", SYMBOL(alloc_bitmap));
6596
MSG("SYMBOL(max_page): %llx\n", SYMBOL(max_page));
6597
MSG("SYMBOL(pgd_l2): %llx\n", SYMBOL(pgd_l2));
6598
MSG("SYMBOL(pgd_l3): %llx\n", SYMBOL(pgd_l3));
6599
MSG("SYMBOL(pgd_l4): %llx\n", SYMBOL(pgd_l4));
6600
MSG("SYMBOL(xenheap_phys_end): %llx\n", SYMBOL(xenheap_phys_end));
6601
MSG("SYMBOL(xen_pstart): %llx\n", SYMBOL(xen_pstart));
6602
MSG("SYMBOL(frametable_pg_dir): %llx\n", SYMBOL(frametable_pg_dir));
6604
MSG("SIZE(page_info): %ld\n", SIZE(page_info));
6605
MSG("OFFSET(page_info.count_info): %ld\n", OFFSET(page_info.count_info));
6606
MSG("OFFSET(page_info._domain): %ld\n", OFFSET(page_info._domain));
6607
MSG("SIZE(domain): %ld\n", SIZE(domain));
6608
MSG("OFFSET(domain.domain_id): %ld\n", OFFSET(domain.domain_id));
6609
MSG("OFFSET(domain.next_in_list): %ld\n", OFFSET(domain.next_in_list));
6612
if (info->xen_crash_info.com) {
6613
MSG("xen_major_version: %lx\n",
6614
info->xen_crash_info.com->xen_major_version);
6615
MSG("xen_minor_version: %lx\n",
6616
info->xen_crash_info.com->xen_minor_version);
6618
MSG("xen_phys_start: %lx\n", info->xen_phys_start);
6619
MSG("frame_table_vaddr: %lx\n", info->frame_table_vaddr);
6620
MSG("xen_heap_start: %lx\n", info->xen_heap_start);
6621
MSG("xen_heap_end:%lx\n", info->xen_heap_end);
6622
MSG("alloc_bitmap: %lx\n", info->alloc_bitmap);
6623
MSG("max_page: %lx\n", info->max_page);
6624
MSG("num_domain: %d\n", info->num_domain);
6625
for (i = 0; i < info->num_domain; i++) {
6626
MSG(" %u: %x: %lx\n", info->domain_list[i].domain_id,
6627
info->domain_list[i].pickled_id,
6628
info->domain_list[i].domain_addr);
6633
generate_vmcoreinfo_xen(void)
6635
if ((info->page_size = sysconf(_SC_PAGE_SIZE)) <= 0) {
6636
ERRMSG("Can't get the size of page.\n");
6639
set_dwarf_debuginfo("xen-syms", NULL,
6640
info->name_xen_syms, info->fd_xen_syms);
6642
if (!get_symbol_info_xen())
6645
if (!get_structure_info_xen())
6649
* write 1st kernel's PAGESIZE
6651
fprintf(info->file_vmcoreinfo, "%s%ld\n", STR_PAGESIZE,
6655
* write the symbol of 1st kernel
6657
WRITE_SYMBOL("dom_xen", dom_xen);
6658
WRITE_SYMBOL("dom_io", dom_io);
6659
WRITE_SYMBOL("domain_list", domain_list);
6660
WRITE_SYMBOL("xen_heap_start", xen_heap_start);
6661
WRITE_SYMBOL("frame_table", frame_table);
6662
WRITE_SYMBOL("alloc_bitmap", alloc_bitmap);
6663
WRITE_SYMBOL("max_page", max_page);
6664
WRITE_SYMBOL("pgd_l2", pgd_l2);
6665
WRITE_SYMBOL("pgd_l3", pgd_l3);
6666
WRITE_SYMBOL("pgd_l4", pgd_l4);
6667
WRITE_SYMBOL("xenheap_phys_end", xenheap_phys_end);
6668
WRITE_SYMBOL("xen_pstart", xen_pstart);
6669
WRITE_SYMBOL("frametable_pg_dir", frametable_pg_dir);
6672
* write the structure size of 1st kernel
6674
WRITE_STRUCTURE_SIZE("page_info", page_info);
6675
WRITE_STRUCTURE_SIZE("domain", domain);
6678
* write the member offset of 1st kernel
6680
WRITE_MEMBER_OFFSET("page_info.count_info", page_info.count_info);
6681
WRITE_MEMBER_OFFSET("page_info._domain", page_info._domain);
6682
WRITE_MEMBER_OFFSET("domain.domain_id", domain.domain_id);
6683
WRITE_MEMBER_OFFSET("domain.next_in_list", domain.next_in_list);
6689
read_vmcoreinfo_basic_info_xen(void)
6691
long page_size = FALSE;
6692
char buf[BUFSIZE_FGETS], *endp;
6695
if (fseek(info->file_vmcoreinfo, 0, SEEK_SET) < 0) {
6696
ERRMSG("Can't seek the vmcoreinfo file(%s). %s\n",
6697
info->name_vmcoreinfo, strerror(errno));
6701
while (fgets(buf, BUFSIZE_FGETS, info->file_vmcoreinfo)) {
6705
if (buf[i - 1] == '\n')
6707
if (strncmp(buf, STR_PAGESIZE, strlen(STR_PAGESIZE)) == 0) {
6708
page_size = strtol(buf+strlen(STR_PAGESIZE),&endp,10);
6709
if ((!page_size || page_size == LONG_MAX)
6710
|| strlen(endp) != 0) {
6711
ERRMSG("Invalid data in %s: %s",
6712
info->name_vmcoreinfo, buf);
6715
if (!set_page_size(page_size)) {
6716
ERRMSG("Invalid data in %s: %s",
6717
info->name_vmcoreinfo, buf);
6723
if (!info->page_size) {
6724
ERRMSG("Invalid format in %s", info->name_vmcoreinfo);
6731
read_vmcoreinfo_xen(void)
6733
if (!read_vmcoreinfo_basic_info_xen())
6736
READ_SYMBOL("dom_xen", dom_xen);
6737
READ_SYMBOL("dom_io", dom_io);
6738
READ_SYMBOL("domain_list", domain_list);
6739
READ_SYMBOL("xen_heap_start", xen_heap_start);
6740
READ_SYMBOL("frame_table", frame_table);
6741
READ_SYMBOL("alloc_bitmap", alloc_bitmap);
6742
READ_SYMBOL("max_page", max_page);
6743
READ_SYMBOL("pgd_l2", pgd_l2);
6744
READ_SYMBOL("pgd_l3", pgd_l3);
6745
READ_SYMBOL("pgd_l4", pgd_l4);
6746
READ_SYMBOL("xenheap_phys_end", xenheap_phys_end);
6747
READ_SYMBOL("xen_pstart", xen_pstart);
6748
READ_SYMBOL("frametable_pg_dir", frametable_pg_dir);
6750
READ_STRUCTURE_SIZE("page_info", page_info);
6751
READ_STRUCTURE_SIZE("domain", domain);
6753
READ_MEMBER_OFFSET("page_info.count_info", page_info.count_info);
6754
READ_MEMBER_OFFSET("page_info._domain", page_info._domain);
6755
READ_MEMBER_OFFSET("domain.domain_id", domain.domain_id);
6756
READ_MEMBER_OFFSET("domain.next_in_list", domain.next_in_list);
6762
allocated_in_map(unsigned long long pfn)
6764
static unsigned long long cur_idx = -1;
6765
static unsigned long cur_word;
6766
unsigned long long idx;
6768
idx = pfn / PAGES_PER_MAPWORD;
6769
if (idx != cur_idx) {
6770
if (!readmem(VADDR_XEN,
6771
info->alloc_bitmap + idx * sizeof(unsigned long),
6772
&cur_word, sizeof(cur_word))) {
6773
ERRMSG("Can't access alloc_bitmap.\n");
6779
return !!(cur_word & (1UL << (pfn & (PAGES_PER_MAPWORD - 1))));
6783
is_select_domain(unsigned int id)
6787
/* selected domain is fix to dom0 only now !!
6788
(yes... domain_list is not necessary right now,
6789
it can get from "dom0" directly) */
6791
for (i = 0; i < info->num_domain; i++) {
6792
if (info->domain_list[i].domain_id == 0 &&
6793
info->domain_list[i].pickled_id == id)
6801
exclude_xen3_user_domain(void)
6804
unsigned int count_info, _domain;
6805
unsigned int num_pt_loads = get_num_pt_loads();
6806
unsigned long page_info_addr;
6807
unsigned long long phys_start, phys_end;
6808
unsigned long long pfn, pfn_end;
6809
unsigned long long j, size;
6812
* NOTE: the first half of bitmap is not used for Xen extraction
6814
for (i = 0; get_pt_load(i, &phys_start, &phys_end, NULL, NULL); i++) {
6816
print_progress(PROGRESS_XEN_DOMAIN, i, num_pt_loads);
6818
pfn = paddr_to_pfn(phys_start);
6819
pfn_end = paddr_to_pfn(phys_end);
6820
size = pfn_end - pfn;
6822
for (j = 0; pfn < pfn_end; pfn++, j++) {
6823
print_progress(PROGRESS_XEN_DOMAIN, j + (size * i),
6824
size * num_pt_loads);
6826
if (!allocated_in_map(pfn)) {
6827
clear_bit_on_2nd_bitmap(pfn);
6831
page_info_addr = info->frame_table_vaddr + pfn * SIZE(page_info);
6832
if (!readmem(VADDR_XEN,
6833
page_info_addr + OFFSET(page_info.count_info),
6834
&count_info, sizeof(count_info))) {
6835
clear_bit_on_2nd_bitmap(pfn);
6836
continue; /* page_info may not exist */
6838
if (!readmem(VADDR_XEN,
6839
page_info_addr + OFFSET(page_info._domain),
6840
&_domain, sizeof(_domain))) {
6841
ERRMSG("Can't get page_info._domain.\n");
6846
* - anonymous (_domain == 0), or
6847
* - xen heap area, or
6848
* - selected domain page
6852
if (info->xen_heap_start <= pfn && pfn < info->xen_heap_end)
6854
if ((count_info & 0xffff) && is_select_domain(_domain))
6856
clear_bit_on_2nd_bitmap(pfn);
6864
exclude_xen4_user_domain(void)
6867
unsigned long count_info;
6868
unsigned int _domain;
6869
unsigned int num_pt_loads = get_num_pt_loads();
6870
unsigned long page_info_addr;
6871
unsigned long long phys_start, phys_end;
6872
unsigned long long pfn, pfn_end;
6873
unsigned long long j, size;
6876
* NOTE: the first half of bitmap is not used for Xen extraction
6878
for (i = 0; get_pt_load(i, &phys_start, &phys_end, NULL, NULL); i++) {
6880
print_progress(PROGRESS_XEN_DOMAIN, i, num_pt_loads);
6882
pfn = paddr_to_pfn(phys_start);
6883
pfn_end = paddr_to_pfn(phys_end);
6884
size = pfn_end - pfn;
6886
for (j = 0; pfn < pfn_end; pfn++, j++) {
6887
print_progress(PROGRESS_XEN_DOMAIN, j + (size * i),
6888
size * num_pt_loads);
6890
page_info_addr = info->frame_table_vaddr + pfn * SIZE(page_info);
6891
if (!readmem(VADDR_XEN,
6892
page_info_addr + OFFSET(page_info.count_info),
6893
&count_info, sizeof(count_info))) {
6894
clear_bit_on_2nd_bitmap(pfn);
6895
continue; /* page_info may not exist */
6898
/* always keep Xen heap pages */
6899
if (count_info & PGC_xen_heap)
6902
/* delete free, offlined and broken pages */
6903
if (page_state_is(count_info, free) ||
6904
page_state_is(count_info, offlined) ||
6905
count_info & PGC_broken) {
6906
clear_bit_on_2nd_bitmap(pfn);
6910
/* keep inuse pages not allocated to any domain
6911
* this covers e.g. Xen static data
6913
if (! (count_info & PGC_allocated))
6916
/* Need to check the domain
6918
* - anonymous (_domain == 0), or
6919
* - selected domain page
6921
if (!readmem(VADDR_XEN,
6922
page_info_addr + OFFSET(page_info._domain),
6923
&_domain, sizeof(_domain))) {
6924
ERRMSG("Can't get page_info._domain.\n");
6930
if (is_select_domain(_domain))
6932
clear_bit_on_2nd_bitmap(pfn);
6940
exclude_xen_user_domain(void)
6942
struct timeval tv_start;
6945
gettimeofday(&tv_start, NULL);
6947
if (info->xen_crash_info.com &&
6948
info->xen_crash_info.com->xen_major_version >= 4)
6949
ret = exclude_xen4_user_domain();
6951
ret = exclude_xen3_user_domain();
6956
print_progress(PROGRESS_XEN_DOMAIN, 1, 1);
6957
print_execution_time(PROGRESS_XEN_DOMAIN, &tv_start);
6968
#if defined(__powerpc64__) || defined(__powerpc32__)
6970
MSG("Xen is not supported on powerpc.\n");
6973
if(!info->flag_elf_dumpfile) {
6974
MSG("Specify '-E' option for Xen.\n");
6975
MSG("Commandline parameter is invalid.\n");
6976
MSG("Try `makedumpfile --help' for more information.\n");
6980
if (DL_EXCLUDE_ZERO < info->max_dump_level) {
6981
MSG("Dump_level is invalid. It should be 0 or 1.\n");
6982
MSG("Commandline parameter is invalid.\n");
6983
MSG("Try `makedumpfile --help' for more information.\n");
6987
if (!init_xen_crash_info())
6989
if (!fallback_to_current_page_size())
6992
* Get the debug information for analysis from the vmcoreinfo file
6994
if (info->flag_read_vmcoreinfo) {
6995
if (!read_vmcoreinfo_xen())
6999
* Get the debug information for analysis from the xen-syms file
7001
} else if (info->name_xen_syms) {
7002
set_dwarf_debuginfo("xen-syms", NULL,
7003
info->name_xen_syms, info->fd_xen_syms);
7005
if (!get_symbol_info_xen())
7007
if (!get_structure_info_xen())
7010
* Get the debug information for analysis from /proc/vmcore
7014
* Check whether /proc/vmcore contains vmcoreinfo,
7015
* and get both the offset and the size.
7017
if (!has_vmcoreinfo_xen()){
7018
if (!info->flag_exclude_xen_dom)
7021
MSG("%s doesn't contain a vmcoreinfo for Xen.\n",
7023
MSG("Specify '--xen-syms' option or '--xen-vmcoreinfo' option.\n");
7024
MSG("Commandline parameter is invalid.\n");
7025
MSG("Try `makedumpfile --help' for more information.\n");
7029
* Get the debug information from /proc/vmcore
7031
get_vmcoreinfo_xen(&offset, &size);
7032
if (!read_vmcoreinfo_from_vmcore(offset, size, TRUE))
7035
if (!get_xen_info())
7038
if (message_level & ML_PRINT_DEBUG_MSG)
7041
if (!get_max_mapnr())
7051
unsigned long long paddr;
7053
if (!info->vaddr_for_vtop)
7057
MSG("Translating virtual address %lx to physical address.\n", info->vaddr_for_vtop);
7059
paddr = vaddr_to_paddr(info->vaddr_for_vtop);
7061
MSG("VIRTUAL PHYSICAL\n");
7062
MSG("%16lx %llx\n", info->vaddr_for_vtop, paddr);
7065
info->vaddr_for_vtop = 0;
7073
unsigned long long pfn_original, pfn_excluded, shrinking;
7076
* /proc/vmcore doesn't contain the memory hole area.
7078
pfn_original = info->max_mapnr - pfn_memhole;
7080
pfn_excluded = pfn_zero + pfn_cache + pfn_cache_private
7081
+ pfn_user + pfn_free;
7082
shrinking = (pfn_original - pfn_excluded) * 100;
7083
shrinking = shrinking / pfn_original;
7086
REPORT_MSG("Original pages : 0x%016llx\n", pfn_original);
7087
REPORT_MSG(" Excluded pages : 0x%016llx\n", pfn_excluded);
7088
REPORT_MSG(" Pages filled with zero : 0x%016llx\n", pfn_zero);
7089
REPORT_MSG(" Cache pages : 0x%016llx\n", pfn_cache);
7090
REPORT_MSG(" Cache pages + private : 0x%016llx\n",
7092
REPORT_MSG(" User process data pages : 0x%016llx\n", pfn_user);
7093
REPORT_MSG(" Free pages : 0x%016llx\n", pfn_free);
7094
REPORT_MSG(" Remaining pages : 0x%016llx\n",
7095
pfn_original - pfn_excluded);
7096
REPORT_MSG(" (The number of pages is reduced to %lld%%.)\n",
7098
REPORT_MSG("Memory Hole : 0x%016llx\n", pfn_memhole);
7099
REPORT_MSG("--------------------------------------------------\n");
7100
REPORT_MSG("Total pages : 0x%016llx\n", info->max_mapnr);
7105
writeout_dumpfile(void)
7108
struct cache_data cd_header, cd_page;
7110
info->flag_nospace = FALSE;
7112
if (!open_dump_file())
7115
if (info->flag_flatten) {
7116
if (!write_start_flat_header())
7119
if (!prepare_cache_data(&cd_header))
7122
if (!prepare_cache_data(&cd_page)) {
7123
free_cache_data(&cd_header);
7126
if (info->flag_elf_dumpfile) {
7127
if (!write_elf_header(&cd_header))
7129
if (info->flag_cyclic) {
7130
if (!write_elf_pages_cyclic(&cd_header, &cd_page))
7133
if (!write_elf_pages(&cd_header, &cd_page))
7136
if (!write_elf_eraseinfo(&cd_header))
7138
} else if (info->flag_cyclic) {
7139
if (!write_kdump_header())
7141
if (!write_kdump_pages_and_bitmap_cyclic(&cd_header, &cd_page))
7143
if (!write_kdump_eraseinfo(&cd_page))
7146
if (!write_kdump_header())
7148
if (!write_kdump_pages(&cd_header, &cd_page))
7150
if (!write_kdump_eraseinfo(&cd_page))
7152
if (!write_kdump_bitmap())
7155
if (info->flag_flatten) {
7156
if (!write_end_flat_header())
7162
free_cache_data(&cd_header);
7163
free_cache_data(&cd_page);
7167
if ((ret == FALSE) && info->flag_nospace)
7174
setup_splitting(void)
7177
unsigned long long j, pfn_per_dumpfile;
7178
unsigned long long start_pfn, end_pfn;
7179
unsigned long long num_dumpable = get_num_dumpable();
7180
struct dump_bitmap bitmap2;
7182
if (info->num_dumpfile <= 1)
7185
if (info->flag_cyclic) {
7186
for (i = 0; i < info->num_dumpfile; i++) {
7187
SPLITTING_START_PFN(i) = divideup(info->max_mapnr, info->num_dumpfile) * i;
7188
SPLITTING_END_PFN(i) = divideup(info->max_mapnr, info->num_dumpfile) * (i + 1);
7190
if (SPLITTING_END_PFN(i-1) > info->max_mapnr)
7191
SPLITTING_END_PFN(i-1) = info->max_mapnr;
7193
initialize_2nd_bitmap(&bitmap2);
7195
pfn_per_dumpfile = num_dumpable / info->num_dumpfile;
7196
start_pfn = end_pfn = 0;
7197
for (i = 0; i < info->num_dumpfile; i++) {
7198
start_pfn = end_pfn;
7199
if (i == (info->num_dumpfile - 1)) {
7200
end_pfn = info->max_mapnr;
7202
for (j = 0; j < pfn_per_dumpfile; end_pfn++) {
7203
if (is_dumpable(&bitmap2, end_pfn))
7207
SPLITTING_START_PFN(i) = start_pfn;
7208
SPLITTING_END_PFN(i) = end_pfn;
7216
* This function is for creating split dumpfiles by multiple
7217
* processes. Each child process should re-open a /proc/vmcore
7218
* file, because it prevents each other from affectting the file
7219
* offset due to read(2) call.
7222
reopen_dump_memory()
7224
close_dump_memory();
7226
if ((info->fd_memory = open(info->name_memory, O_RDONLY)) < 0) {
7227
ERRMSG("Can't open the dump memory(%s). %s\n",
7228
info->name_memory, strerror(errno));
7235
get_next_dump_level(int index)
7237
if (info->num_dump_level <= index)
7240
return info->array_dump_level[index];
7244
delete_dumpfile(void)
7248
if (info->flag_flatten)
7251
if (info->flag_split) {
7252
for (i = 0; i < info->num_dumpfile; i++)
7253
unlink(SPLITTING_DUMPFILE(i));
7255
unlink(info->name_dumpfile);
7261
writeout_multiple_dumpfiles(void)
7263
int i, status, ret = TRUE;
7265
pid_t array_pid[info->num_dumpfile];
7267
if (!setup_splitting())
7270
for (i = 0; i < info->num_dumpfile; i++) {
7271
if ((pid = fork()) < 0) {
7274
} else if (pid == 0) { /* Child */
7275
info->name_dumpfile = SPLITTING_DUMPFILE(i);
7276
info->fd_bitmap = SPLITTING_FD_BITMAP(i);
7277
info->split_start_pfn = SPLITTING_START_PFN(i);
7278
info->split_end_pfn = SPLITTING_END_PFN(i);
7280
if (!reopen_dump_memory())
7282
if ((status = writeout_dumpfile()) == FALSE)
7284
else if (status == NOSPACE)
7290
for (i = 0; i < info->num_dumpfile; i++) {
7291
waitpid(array_pid[i], &status, WUNTRACED);
7292
if (!WIFEXITED(status) || WEXITSTATUS(status) == 1) {
7293
ERRMSG("Child process(%d) finished imcompletely.(%d)\n",
7294
array_pid[i], status);
7296
} else if ((ret == TRUE) && (WEXITSTATUS(status) == 2))
7303
create_dumpfile(void)
7305
int num_retry, status, new_level;
7307
if (!open_files_for_creating_dumpfile())
7310
if (!info->flag_refiltering && !info->flag_sadump) {
7311
if (!get_elf_info(info->fd_memory, info->name_memory))
7321
if (info->flag_refiltering) {
7322
/* Change dump level */
7323
new_level = info->dump_level | info->kh_memory->dump_level;
7324
if (new_level != info->dump_level) {
7325
info->dump_level = new_level;
7326
MSG("dump_level is changed to %d, " \
7327
"because %s was created by dump_level(%d).",
7328
new_level, info->name_memory,
7329
info->kh_memory->dump_level);
7333
if (info->name_filterconfig && !gather_filter_info())
7336
if (!create_dump_bitmap())
7339
if (info->flag_split) {
7340
if ((status = writeout_multiple_dumpfiles()) == FALSE)
7343
if ((status = writeout_dumpfile()) == FALSE)
7346
if (status == NOSPACE) {
7348
* If specifying the other dump_level, makedumpfile tries
7349
* to create a dumpfile with it again.
7352
if ((info->dump_level = get_next_dump_level(num_retry)) < 0)
7354
MSG("Retry to create a dumpfile by dump_level(%d).\n",
7356
if (!delete_dumpfile())
7362
clear_filter_info();
7363
if (!close_files_for_creating_dumpfile())
7370
__read_disk_dump_header(struct disk_dump_header *dh, char *filename)
7372
int fd, ret = FALSE;
7374
if ((fd = open(filename, O_RDONLY)) < 0) {
7375
ERRMSG("Can't open a file(%s). %s\n",
7376
filename, strerror(errno));
7379
if (lseek(fd, 0x0, SEEK_SET) < 0) {
7380
ERRMSG("Can't seek a file(%s). %s\n",
7381
filename, strerror(errno));
7384
if (read(fd, dh, sizeof(struct disk_dump_header))
7385
!= sizeof(struct disk_dump_header)) {
7386
ERRMSG("Can't read a file(%s). %s\n",
7387
filename, strerror(errno));
7398
read_disk_dump_header(struct disk_dump_header *dh, char *filename)
7400
if (!__read_disk_dump_header(dh, filename))
7403
if (strncmp(dh->signature, KDUMP_SIGNATURE, strlen(KDUMP_SIGNATURE))) {
7404
ERRMSG("%s is not the kdump-compressed format.\n",
7412
read_kdump_sub_header(struct kdump_sub_header *kh, char *filename)
7414
int fd, ret = FALSE;
7415
struct disk_dump_header dh;
7418
if (!read_disk_dump_header(&dh, filename))
7421
offset = DISKDUMP_HEADER_BLOCKS * dh.block_size;
7423
if ((fd = open(filename, O_RDONLY)) < 0) {
7424
ERRMSG("Can't open a file(%s). %s\n",
7425
filename, strerror(errno));
7428
if (lseek(fd, offset, SEEK_SET) < 0) {
7429
ERRMSG("Can't seek a file(%s). %s\n",
7430
filename, strerror(errno));
7433
if (read(fd, kh, sizeof(struct kdump_sub_header))
7434
!= sizeof(struct kdump_sub_header)) {
7435
ERRMSG("Can't read a file(%s). %s\n",
7436
filename, strerror(errno));
7447
store_splitting_info(void)
7450
struct disk_dump_header dh, tmp_dh;
7451
struct kdump_sub_header kh;
7453
for (i = 0; i < info->num_dumpfile; i++) {
7454
if (!read_disk_dump_header(&tmp_dh, SPLITTING_DUMPFILE(i)))
7458
memcpy(&dh, &tmp_dh, sizeof(tmp_dh));
7459
info->max_mapnr = dh.max_mapnr;
7460
if (!set_page_size(dh.block_size))
7462
DEBUG_MSG("max_mapnr : %llx\n", info->max_mapnr);
7463
DEBUG_MSG("page_size : %ld\n", info->page_size);
7467
* Check whether multiple dumpfiles are parts of
7468
* the same /proc/vmcore.
7470
if (memcmp(&dh, &tmp_dh, sizeof(tmp_dh))) {
7471
ERRMSG("Invalid dumpfile(%s).\n",
7472
SPLITTING_DUMPFILE(i));
7475
if (!read_kdump_sub_header(&kh, SPLITTING_DUMPFILE(i)))
7479
info->dump_level = kh.dump_level;
7480
DEBUG_MSG("dump_level : %d\n", info->dump_level);
7482
SPLITTING_START_PFN(i) = kh.start_pfn;
7483
SPLITTING_END_PFN(i) = kh.end_pfn;
7484
SPLITTING_OFFSET_EI(i) = kh.offset_eraseinfo;
7485
SPLITTING_SIZE_EI(i) = kh.size_eraseinfo;
7491
sort_splitting_info(void)
7494
unsigned long long start_pfn, end_pfn;
7495
char *name_dumpfile;
7498
* Sort splitting_info by start_pfn.
7500
for (i = 0; i < (info->num_dumpfile - 1); i++) {
7501
for (j = i; j < info->num_dumpfile; j++) {
7502
if (SPLITTING_START_PFN(i) < SPLITTING_START_PFN(j))
7504
start_pfn = SPLITTING_START_PFN(i);
7505
end_pfn = SPLITTING_END_PFN(i);
7506
name_dumpfile = SPLITTING_DUMPFILE(i);
7508
SPLITTING_START_PFN(i) = SPLITTING_START_PFN(j);
7509
SPLITTING_END_PFN(i) = SPLITTING_END_PFN(j);
7510
SPLITTING_DUMPFILE(i) = SPLITTING_DUMPFILE(j);
7512
SPLITTING_START_PFN(j) = start_pfn;
7513
SPLITTING_END_PFN(j) = end_pfn;
7514
SPLITTING_DUMPFILE(j) = name_dumpfile;
7518
DEBUG_MSG("num_dumpfile : %d\n", info->num_dumpfile);
7519
for (i = 0; i < info->num_dumpfile; i++) {
7520
DEBUG_MSG("dumpfile (%s)\n", SPLITTING_DUMPFILE(i));
7521
DEBUG_MSG(" start_pfn : %llx\n", SPLITTING_START_PFN(i));
7522
DEBUG_MSG(" end_pfn : %llx\n", SPLITTING_END_PFN(i));
7527
check_splitting_info(void)
7530
unsigned long long end_pfn;
7533
* Check whether there are not lack of /proc/vmcore.
7535
if (SPLITTING_START_PFN(0) != 0) {
7536
ERRMSG("There is not dumpfile corresponding to pfn 0x%x - 0x%llx.\n",
7537
0x0, SPLITTING_START_PFN(0));
7540
end_pfn = SPLITTING_END_PFN(0);
7542
for (i = 1; i < info->num_dumpfile; i++) {
7543
if (end_pfn != SPLITTING_START_PFN(i)) {
7544
ERRMSG("There is not dumpfile corresponding to pfn 0x%llx - 0x%llx.\n",
7545
end_pfn, SPLITTING_START_PFN(i));
7548
end_pfn = SPLITTING_END_PFN(i);
7550
if (end_pfn != info->max_mapnr) {
7551
ERRMSG("There is not dumpfile corresponding to pfn 0x%llx - 0x%llx.\n",
7552
end_pfn, info->max_mapnr);
7560
get_splitting_info(void)
7562
if (!store_splitting_info())
7565
sort_splitting_info();
7567
if (!check_splitting_info())
7570
if (!get_kdump_compressed_header_info(SPLITTING_DUMPFILE(0)))
7577
copy_same_data(int src_fd, int dst_fd, off_t offset, unsigned long size)
7582
if ((buf = malloc(size)) == NULL) {
7583
ERRMSG("Can't allocate memory.\n");
7586
if (lseek(src_fd, offset, SEEK_SET) < 0) {
7587
ERRMSG("Can't seek a source file. %s\n", strerror(errno));
7590
if (read(src_fd, buf, size) != size) {
7591
ERRMSG("Can't read a source file. %s\n", strerror(errno));
7594
if (lseek(dst_fd, offset, SEEK_SET) < 0) {
7595
ERRMSG("Can't seek a destination file. %s\n", strerror(errno));
7598
if (write(dst_fd, buf, size) != size) {
7599
ERRMSG("Can't write a destination file. %s\n", strerror(errno));
7609
reassemble_kdump_header(void)
7611
int fd = -1, ret = FALSE;
7614
struct disk_dump_header dh;
7615
struct kdump_sub_header kh;
7616
char *buf_bitmap = NULL;
7619
* Write common header.
7621
if (!read_disk_dump_header(&dh, SPLITTING_DUMPFILE(0)))
7624
if (lseek(info->fd_dumpfile, 0x0, SEEK_SET) < 0) {
7625
ERRMSG("Can't seek a file(%s). %s\n",
7626
info->name_dumpfile, strerror(errno));
7629
if (write(info->fd_dumpfile, &dh, sizeof(dh)) != sizeof(dh)) {
7630
ERRMSG("Can't write a file(%s). %s\n",
7631
info->name_dumpfile, strerror(errno));
7638
if (!read_kdump_sub_header(&kh, SPLITTING_DUMPFILE(0)))
7645
if (lseek(info->fd_dumpfile, info->page_size, SEEK_SET) < 0) {
7646
ERRMSG("Can't seek a file(%s). %s\n",
7647
info->name_dumpfile, strerror(errno));
7650
if (write(info->fd_dumpfile, &kh, sizeof(kh)) != sizeof(kh)) {
7651
ERRMSG("Can't write a file(%s). %s\n",
7652
info->name_dumpfile, strerror(errno));
7655
memcpy(&info->sub_header, &kh, sizeof(kh));
7657
if ((fd = open(SPLITTING_DUMPFILE(0), O_RDONLY)) < 0) {
7658
ERRMSG("Can't open a file(%s). %s\n",
7659
SPLITTING_DUMPFILE(0), strerror(errno));
7662
if (has_pt_note()) {
7663
get_pt_note(&offset, &size);
7664
if (!copy_same_data(fd, info->fd_dumpfile, offset, size)) {
7665
ERRMSG("Can't copy pt_note data to %s.\n",
7666
info->name_dumpfile);
7670
if (has_vmcoreinfo()) {
7671
get_vmcoreinfo(&offset, &size);
7672
if (!copy_same_data(fd, info->fd_dumpfile, offset, size)) {
7673
ERRMSG("Can't copy vmcoreinfo data to %s.\n",
7674
info->name_dumpfile);
7680
* Write dump bitmap to both a dumpfile and a bitmap file.
7682
offset = (DISKDUMP_HEADER_BLOCKS + dh.sub_hdr_size) * dh.block_size;
7683
info->len_bitmap = dh.bitmap_blocks * dh.block_size;
7684
if ((buf_bitmap = malloc(info->len_bitmap)) == NULL) {
7685
ERRMSG("Can't allocate memory for bitmap.\n");
7688
if (lseek(fd, offset, SEEK_SET) < 0) {
7689
ERRMSG("Can't seek a file(%s). %s\n",
7690
SPLITTING_DUMPFILE(0), strerror(errno));
7693
if (read(fd, buf_bitmap, info->len_bitmap) != info->len_bitmap) {
7694
ERRMSG("Can't read a file(%s). %s\n",
7695
SPLITTING_DUMPFILE(0), strerror(errno));
7699
if (lseek(info->fd_dumpfile, offset, SEEK_SET) < 0) {
7700
ERRMSG("Can't seek a file(%s). %s\n",
7701
info->name_dumpfile, strerror(errno));
7704
if (write(info->fd_dumpfile, buf_bitmap, info->len_bitmap)
7705
!= info->len_bitmap) {
7706
ERRMSG("Can't write a file(%s). %s\n",
7707
info->name_dumpfile, strerror(errno));
7711
if (lseek(info->fd_bitmap, 0x0, SEEK_SET) < 0) {
7712
ERRMSG("Can't seek a file(%s). %s\n",
7713
info->name_bitmap, strerror(errno));
7716
if (write(info->fd_bitmap, buf_bitmap, info->len_bitmap)
7717
!= info->len_bitmap) {
7718
ERRMSG("Can't write a file(%s). %s\n",
7719
info->name_bitmap, strerror(errno));
7733
reassemble_kdump_pages(void)
7735
int i, fd = 0, ret = FALSE;
7736
off_t offset_first_ph, offset_ph_org, offset_eraseinfo;
7737
off_t offset_data_new, offset_zero_page = 0;
7738
unsigned long long pfn, start_pfn, end_pfn;
7739
unsigned long long num_dumpable;
7740
unsigned long size_eraseinfo;
7741
struct dump_bitmap bitmap2;
7742
struct disk_dump_header dh;
7743
struct page_desc pd, pd_zero;
7744
struct cache_data cd_pd, cd_data;
7745
struct timeval tv_start;
7747
unsigned long data_buf_size = info->page_size;
7749
initialize_2nd_bitmap(&bitmap2);
7751
if (!read_disk_dump_header(&dh, SPLITTING_DUMPFILE(0)))
7754
if (!prepare_cache_data(&cd_pd))
7757
if (!prepare_cache_data(&cd_data)) {
7758
free_cache_data(&cd_pd);
7761
if ((data = malloc(data_buf_size)) == NULL) {
7762
ERRMSG("Can't allocate memory for page data.\n");
7763
free_cache_data(&cd_pd);
7764
free_cache_data(&cd_data);
7767
num_dumpable = get_num_dumpable();
7771
= (DISKDUMP_HEADER_BLOCKS + dh.sub_hdr_size + dh.bitmap_blocks)
7773
cd_pd.offset = offset_first_ph;
7774
offset_data_new = offset_first_ph + sizeof(page_desc_t) * num_dumpable;
7775
cd_data.offset = offset_data_new;
7778
* Write page header of zero-filled page.
7780
gettimeofday(&tv_start, NULL);
7781
if (info->dump_level & DL_EXCLUDE_ZERO) {
7783
* makedumpfile outputs the data of zero-filled page at first
7784
* if excluding zero-filled page, so the offset of first data
7785
* is for zero-filled page in all dumpfiles.
7787
offset_zero_page = offset_data_new;
7789
pd_zero.size = info->page_size;
7791
pd_zero.offset = offset_data_new;
7792
pd_zero.page_flags = 0;
7793
memset(data, 0, pd_zero.size);
7794
if (!write_cache(&cd_data, data, pd_zero.size))
7796
offset_data_new += pd_zero.size;
7799
for (i = 0; i < info->num_dumpfile; i++) {
7800
if ((fd = open(SPLITTING_DUMPFILE(i), O_RDONLY)) < 0) {
7801
ERRMSG("Can't open a file(%s). %s\n",
7802
SPLITTING_DUMPFILE(i), strerror(errno));
7805
start_pfn = SPLITTING_START_PFN(i);
7806
end_pfn = SPLITTING_END_PFN(i);
7808
offset_ph_org = offset_first_ph;
7809
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
7810
if (!is_dumpable(&bitmap2, pfn))
7815
print_progress(PROGRESS_COPY, num_dumped, num_dumpable);
7817
if (lseek(fd, offset_ph_org, SEEK_SET) < 0) {
7818
ERRMSG("Can't seek a file(%s). %s\n",
7819
SPLITTING_DUMPFILE(i), strerror(errno));
7822
if (read(fd, &pd, sizeof(pd)) != sizeof(pd)) {
7823
ERRMSG("Can't read a file(%s). %s\n",
7824
SPLITTING_DUMPFILE(i), strerror(errno));
7827
if (lseek(fd, pd.offset, SEEK_SET) < 0) {
7828
ERRMSG("Can't seek a file(%s). %s\n",
7829
SPLITTING_DUMPFILE(i), strerror(errno));
7832
if (read(fd, data, pd.size) != pd.size) {
7833
ERRMSG("Can't read a file(%s). %s\n",
7834
SPLITTING_DUMPFILE(i), strerror(errno));
7837
if ((info->dump_level & DL_EXCLUDE_ZERO)
7838
&& (pd.offset == offset_zero_page)) {
7840
* Handle the data of zero-filled page.
7842
if (!write_cache(&cd_pd, &pd_zero,
7845
offset_ph_org += sizeof(pd);
7848
pd.offset = offset_data_new;
7849
if (!write_cache(&cd_pd, &pd, sizeof(pd)))
7851
offset_ph_org += sizeof(pd);
7853
if (!write_cache(&cd_data, data, pd.size))
7856
offset_data_new += pd.size;
7861
if (!write_cache_bufsz(&cd_pd))
7863
if (!write_cache_bufsz(&cd_data))
7866
offset_eraseinfo = cd_data.offset;
7868
/* Copy eraseinfo from split dumpfiles to o/p dumpfile */
7869
for (i = 0; i < info->num_dumpfile; i++) {
7870
if (!SPLITTING_SIZE_EI(i))
7873
if (SPLITTING_SIZE_EI(i) > data_buf_size) {
7874
data_buf_size = SPLITTING_SIZE_EI(i);
7875
if ((data = realloc(data, data_buf_size)) == NULL) {
7876
ERRMSG("Can't allocate memory for eraseinfo"
7881
if ((fd = open(SPLITTING_DUMPFILE(i), O_RDONLY)) < 0) {
7882
ERRMSG("Can't open a file(%s). %s\n",
7883
SPLITTING_DUMPFILE(i), strerror(errno));
7886
if (lseek(fd, SPLITTING_OFFSET_EI(i), SEEK_SET) < 0) {
7887
ERRMSG("Can't seek a file(%s). %s\n",
7888
SPLITTING_DUMPFILE(i), strerror(errno));
7891
if (read(fd, data, SPLITTING_SIZE_EI(i)) !=
7892
SPLITTING_SIZE_EI(i)) {
7893
ERRMSG("Can't read a file(%s). %s\n",
7894
SPLITTING_DUMPFILE(i), strerror(errno));
7897
if (!write_cache(&cd_data, data, SPLITTING_SIZE_EI(i)))
7899
size_eraseinfo += SPLITTING_SIZE_EI(i);
7904
if (size_eraseinfo) {
7905
if (!write_cache_bufsz(&cd_data))
7908
if (!update_eraseinfo_of_sub_header(offset_eraseinfo,
7912
print_progress(PROGRESS_COPY, num_dumpable, num_dumpable);
7913
print_execution_time(PROGRESS_COPY, &tv_start);
7917
free_cache_data(&cd_pd);
7918
free_cache_data(&cd_data);
7929
reassemble_dumpfile(void)
7931
if (!get_splitting_info())
7934
if (!open_dump_bitmap())
7937
if (!open_dump_file())
7940
if (!reassemble_kdump_header())
7943
if (!reassemble_kdump_pages())
7947
close_dump_bitmap();
7953
check_param_for_generating_vmcoreinfo(int argc, char *argv[])
7958
if (info->flag_compress || info->dump_level
7959
|| info->flag_elf_dumpfile || info->flag_read_vmcoreinfo
7960
|| info->flag_flatten || info->flag_rearrange
7961
|| info->flag_exclude_xen_dom
7962
|| (!info->name_vmlinux && !info->name_xen_syms))
7970
* Parameters for creating dumpfile from the dump data
7971
* of flattened format by rearranging the dump data.
7974
check_param_for_rearranging_dumpdata(int argc, char *argv[])
7976
if (argc != optind + 1)
7979
if (info->flag_compress || info->dump_level
7980
|| info->flag_elf_dumpfile || info->flag_read_vmcoreinfo
7981
|| info->name_vmlinux || info->name_xen_syms
7982
|| info->flag_flatten || info->flag_generate_vmcoreinfo
7983
|| info->flag_exclude_xen_dom)
7986
info->name_dumpfile = argv[optind];
7991
* Parameters for reassembling multiple dumpfiles into one dumpfile.
7994
check_param_for_reassembling_dumpfile(int argc, char *argv[])
7998
info->num_dumpfile = argc - optind - 1;
7999
info->name_dumpfile = argv[argc - 1];
8001
DEBUG_MSG("num_dumpfile : %d\n", info->num_dumpfile);
8003
if (info->flag_compress || info->dump_level
8004
|| info->flag_elf_dumpfile || info->flag_read_vmcoreinfo
8005
|| info->name_vmlinux || info->name_xen_syms
8006
|| info->flag_flatten || info->flag_generate_vmcoreinfo
8007
|| info->flag_exclude_xen_dom || info->flag_split)
8010
if ((info->splitting_info
8011
= malloc(sizeof(splitting_info_t) * info->num_dumpfile))
8013
MSG("Can't allocate memory for splitting_info.\n");
8016
for (i = 0; i < info->num_dumpfile; i++)
8017
SPLITTING_DUMPFILE(i) = argv[optind + i];
8023
* Check parameters to create the dump file.
8026
check_param_for_creating_dumpfile(int argc, char *argv[])
8030
if (info->flag_generate_vmcoreinfo || info->flag_rearrange)
8033
if ((message_level < MIN_MSG_LEVEL)
8034
|| (MAX_MSG_LEVEL < message_level)) {
8035
message_level = DEFAULT_MSG_LEVEL;
8036
MSG("Message_level is invalid.\n");
8039
if ((info->flag_compress && info->flag_elf_dumpfile)
8040
|| (info->flag_read_vmcoreinfo && info->name_vmlinux)
8041
|| (info->flag_read_vmcoreinfo && info->name_xen_syms))
8044
if (info->flag_flatten && info->flag_split)
8047
if (info->name_filterconfig && !info->name_vmlinux)
8050
if (info->flag_sadump_diskset && !sadump_is_supported_arch())
8053
if ((argc == optind + 2) && !info->flag_flatten
8054
&& !info->flag_split
8055
&& !info->flag_sadump_diskset) {
8057
* Parameters for creating the dumpfile from vmcore.
8059
info->name_memory = argv[optind];
8060
info->name_dumpfile = argv[optind+1];
8062
} else if (info->flag_split && (info->flag_sadump_diskset
8063
? (argc >= optind + 2)
8064
: (argc > optind + 2))) {
8068
* Parameters for creating multiple dumpfiles from vmcore.
8070
if (info->flag_sadump_diskset) {
8072
info->name_memory = sadump_head_disk_name_memory();
8075
info->name_memory = argv[optind];
8077
info->num_dumpfile = argc - optind - num_vmcore;
8079
if (info->flag_elf_dumpfile) {
8080
MSG("Options for splitting dumpfile cannot be used with Elf format.\n");
8083
if ((info->splitting_info
8084
= malloc(sizeof(splitting_info_t) * info->num_dumpfile))
8086
MSG("Can't allocate memory for splitting_info.\n");
8089
for (i = 0; i < info->num_dumpfile; i++)
8090
SPLITTING_DUMPFILE(i) = argv[optind + num_vmcore + i];
8092
} else if ((argc == optind + 1) && !info->flag_split
8093
&& info->flag_sadump_diskset) {
8094
info->name_dumpfile = argv[optind];
8095
info->name_memory = sadump_head_disk_name_memory();
8097
DEBUG_MSG("name_dumpfile: %s\n", info->name_dumpfile);
8098
DEBUG_MSG("name_memory: %s\n", info->name_memory);
8100
} else if ((argc == optind + 1) && info->flag_flatten) {
8102
* Parameters for outputting the dump data of the
8103
* flattened format to STDOUT.
8105
info->name_memory = argv[optind];
8114
parse_dump_level(char *str_dump_level)
8119
if (!(buf = strdup(str_dump_level))) {
8120
MSG("Can't duplicate strings(%s).\n", str_dump_level);
8123
info->max_dump_level = 0;
8124
info->num_dump_level = 0;
8127
ptr = strtok(ptr, ",");
8132
if ((i < MIN_DUMP_LEVEL) || (MAX_DUMP_LEVEL < i)) {
8133
MSG("Dump_level(%d) is invalid.\n", i);
8136
if (NUM_ARRAY_DUMP_LEVEL <= info->num_dump_level) {
8137
MSG("Dump_level is invalid.\n");
8140
if (info->max_dump_level < i)
8141
info->max_dump_level = i;
8142
if (info->num_dump_level == 0)
8143
info->dump_level = i;
8144
info->array_dump_level[info->num_dump_level] = i;
8145
info->num_dump_level++;
8156
* Get the amount of free memory from /proc/meminfo.
8159
get_free_memory_size(void) {
8160
char buf[BUFSIZE_FGETS];
8162
unsigned long long free_size = 0;
8163
char *name_meminfo = "/proc/meminfo";
8166
if ((file_meminfo = fopen(name_meminfo, "r")) == NULL) {
8167
ERRMSG("Can't open the %s. %s\n", name_meminfo, strerror(errno));
8171
while (fgets(buf, BUFSIZE_FGETS, file_meminfo) != NULL) {
8172
if (sscanf(buf, "MemFree: %llu %s", &free_size, unit) == 2) {
8173
if (strcmp(unit, "kB") == 0) {
8180
ERRMSG("Can't get free memory size.\n");
8183
if (fclose(file_meminfo) < 0)
8184
ERRMSG("Can't close the %s. %s\n", name_meminfo, strerror(errno));
8191
* Choose the lesser value of the two below as the size of cyclic buffer.
8192
* - the size enough for storing the 1st/2nd bitmap for the whole of vmcore
8193
* - 80% of free memory (as safety limit)
8196
calculate_cyclic_buffer_size(void) {
8197
unsigned long long free_size, needed_size;
8199
if (info->max_mapnr <= 0) {
8200
ERRMSG("Invalid max_mapnr(%llu).\n", info->max_mapnr);
8205
* free_size will be used to allocate 1st and 2nd bitmap, so it
8206
* should be 40% of free memory to keep the size of cyclic buffer
8207
* within 80% of free memory.
8209
free_size = get_free_memory_size() * 0.4;
8210
needed_size = (info->max_mapnr * 2) / BITPERBYTE;
8212
info->bufsize_cyclic = (free_size <= needed_size) ? free_size : needed_size;
8217
static struct option longopts[] = {
8218
{"split", no_argument, NULL, 's'},
8219
{"reassemble", no_argument, NULL, 'r'},
8220
{"xen-syms", required_argument, NULL, 'y'},
8221
{"xen-vmcoreinfo", required_argument, NULL, 'z'},
8222
{"xen_phys_start", required_argument, NULL, 'P'},
8223
{"message-level", required_argument, NULL, 'm'},
8224
{"vtop", required_argument, NULL, 'V'},
8225
{"dump-dmesg", no_argument, NULL, 'M'},
8226
{"config", required_argument, NULL, 'C'},
8227
{"help", no_argument, NULL, 'h'},
8228
{"diskset", required_argument, NULL, 'k'},
8229
{"non-cyclic", no_argument, NULL, 'Y'},
8230
{"cyclic-buffer", required_argument, NULL, 'Z'},
8235
main(int argc, char *argv[])
8237
int i, opt, flag_debug = FALSE;
8239
if ((info = calloc(1, sizeof(struct DumpInfo))) == NULL) {
8240
ERRMSG("Can't allocate memory for the pagedesc cache. %s.\n",
8244
if ((info->dump_header = calloc(1, sizeof(struct disk_dump_header)))
8246
ERRMSG("Can't allocate memory for the dump header. %s\n",
8250
initialize_tables();
8253
* By default, makedumpfile works in constant memory space.
8255
info->flag_cyclic = TRUE;
8257
info->block_order = DEFAULT_ORDER;
8258
message_level = DEFAULT_MSG_LEVEL;
8259
while ((opt = getopt_long(argc, argv, "b:cDd:EFfg:hi:lMpRrsvXx:", longopts,
8263
info->block_order = atoi(optarg);
8266
info->name_filterconfig = optarg;
8269
info->flag_compress = DUMP_DH_COMPRESSED_ZLIB;
8275
if (!parse_dump_level(optarg))
8279
info->flag_elf_dumpfile = 1;
8282
info->flag_flatten = 1;
8284
* All messages are output to STDERR because STDOUT is
8285
* used for outputting dump data.
8287
flag_strerr_message = TRUE;
8290
info->flag_force = 1;
8293
info->flag_generate_vmcoreinfo = 1;
8294
info->name_vmcoreinfo = optarg;
8297
info->flag_show_usage = 1;
8300
info->flag_read_vmcoreinfo = 1;
8301
info->name_vmcoreinfo = optarg;
8304
if (!sadump_add_diskset_info(optarg))
8306
info->flag_sadump_diskset = 1;
8309
info->flag_compress = DUMP_DH_COMPRESSED_LZO;
8312
message_level = atoi(optarg);
8315
info->flag_dmesg = 1;
8318
info->flag_compress = DUMP_DH_COMPRESSED_SNAPPY;
8321
info->xen_phys_start = strtoul(optarg, NULL, 0);
8324
info->flag_rearrange = 1;
8327
info->flag_split = 1;
8330
info->flag_reassemble = 1;
8333
info->vaddr_for_vtop = strtoul(optarg, NULL, 0);
8336
info->flag_show_version = 1;
8339
info->flag_exclude_xen_dom = 1;
8342
info->name_vmlinux = optarg;
8345
info->name_xen_syms = optarg;
8348
info->flag_cyclic = FALSE;
8351
info->flag_read_vmcoreinfo = 1;
8352
info->name_vmcoreinfo = optarg;
8355
info->bufsize_cyclic = atoi(optarg);
8358
MSG("Commandline parameter is invalid.\n");
8359
MSG("Try `makedumpfile --help' for more information.\n");
8364
message_level |= ML_PRINT_DEBUG_MSG;
8366
if (info->flag_show_usage) {
8370
if (info->flag_show_version) {
8375
if (elf_version(EV_CURRENT) == EV_NONE ) {
8377
* library out of date
8379
ERRMSG("Elf library out of date!\n");
8382
if (info->flag_generate_vmcoreinfo) {
8383
if (!check_param_for_generating_vmcoreinfo(argc, argv)) {
8384
MSG("Commandline parameter is invalid.\n");
8385
MSG("Try `makedumpfile --help' for more information.\n");
8388
if (!open_files_for_generating_vmcoreinfo())
8391
if (info->name_xen_syms) {
8392
if (!generate_vmcoreinfo_xen())
8395
if (!generate_vmcoreinfo())
8399
if (!close_files_for_generating_vmcoreinfo())
8403
MSG("The vmcoreinfo is saved to %s.\n", info->name_vmcoreinfo);
8405
} else if (info->flag_rearrange) {
8406
if (!check_param_for_rearranging_dumpdata(argc, argv)) {
8407
MSG("Commandline parameter is invalid.\n");
8408
MSG("Try `makedumpfile --help' for more information.\n");
8411
if (!open_files_for_rearranging_dumpdata())
8414
if (!rearrange_dumpdata())
8417
if (!close_files_for_rearranging_dumpdata())
8421
MSG("The dumpfile is saved to %s.\n", info->name_dumpfile);
8422
} else if (info->flag_reassemble) {
8423
if (!check_param_for_reassembling_dumpfile(argc, argv)) {
8424
MSG("Commandline parameter is invalid.\n");
8425
MSG("Try `makedumpfile --help' for more information.\n");
8428
if (!reassemble_dumpfile())
8432
MSG("The dumpfile is saved to %s.\n", info->name_dumpfile);
8433
} else if (info->flag_dmesg) {
8434
if (!check_param_for_creating_dumpfile(argc, argv)) {
8435
MSG("Commandline parameter is invalid.\n");
8436
MSG("Try `makedumpfile --help' for more information.\n");
8443
MSG("The dmesg log is saved to %s.\n", info->name_dumpfile);
8445
if (!check_param_for_creating_dumpfile(argc, argv)) {
8446
MSG("Commandline parameter is invalid.\n");
8447
MSG("Try `makedumpfile --help' for more information.\n");
8450
if (!create_dumpfile())
8454
if (info->flag_split) {
8455
MSG("The dumpfiles are saved to ");
8456
for (i = 0; i < info->num_dumpfile; i++) {
8457
if (i != (info->num_dumpfile - 1))
8458
MSG("%s, ", SPLITTING_DUMPFILE(i));
8460
MSG("and %s.\n", SPLITTING_DUMPFILE(i));
8463
MSG("The dumpfile is saved to %s.\n", info->name_dumpfile);
8469
if (retcd == COMPLETED)
8470
MSG("makedumpfile Completed.\n");
8472
MSG("makedumpfile Failed.\n");
8475
if (info->dh_memory)
8476
free(info->dh_memory);
8477
if (info->kh_memory)
8478
free(info->kh_memory);
8479
if (info->valid_pages)
8480
free(info->valid_pages);
8481
if (info->bitmap_memory)
8482
free(info->bitmap_memory);
8483
if (info->fd_memory)
8484
close(info->fd_memory);
8485
if (info->fd_dumpfile)
8486
close(info->fd_dumpfile);
8487
if (info->fd_bitmap)
8488
close(info->fd_bitmap);
8489
if (vt.node_online_map != NULL)
8490
free(vt.node_online_map);
8491
if (info->mem_map_data != NULL)
8492
free(info->mem_map_data);
8493
if (info->dump_header != NULL)
8494
free(info->dump_header);
8495
if (info->splitting_info != NULL)
8496
free(info->splitting_info);
8497
if (info->p2m_mfn_frame_list != NULL)
8498
free(info->p2m_mfn_frame_list);
8499
if (info->partial_bitmap1 != NULL)
8500
free(info->partial_bitmap1);
8501
if (info->partial_bitmap2 != NULL)
8502
free(info->partial_bitmap2);