From 696c6f26cc23f5eeb47f166f7978f56de132b2dc Mon Sep 17 00:00:00 2001 From: nanxiongchao Date: Feb 10 2020 07:48:56 +0000 Subject: Allow building in mips64 Signed-off-by: nanxiongchao --- diff --git a/.crash-trace-command.metadata b/.crash-trace-command.metadata new file mode 100644 index 0000000..60d2250 --- /dev/null +++ b/.crash-trace-command.metadata @@ -0,0 +1 @@ +b92c7a1f6b69e5a2e3142b68c76f46e0ebcf204e SOURCES/crash-trace-command-2.0.tar.gz diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..6a4b556 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +SOURCES/crash-trace-command-2.0.tar.gz diff --git a/SOURCES/ARM32.patch b/SOURCES/ARM32.patch new file mode 100644 index 0000000..737aec6 --- /dev/null +++ b/SOURCES/ARM32.patch @@ -0,0 +1,13 @@ +--- crash-trace-command-2.0/Makefile.orig ++++ crash-trace-command-2.0/Makefile +@@ -30,6 +30,10 @@ ifeq ($(shell arch), aarch64) + TARGET=ARM64 + TARGET_CFLAGS= + endif ++ifeq ($(shell arch), armv7l) ++ TARGET=ARM32 ++ TARGET_CFLAGS= ++endif + + INCDIR=/usr/include/crash + diff --git a/SOURCES/ARM64.patch b/SOURCES/ARM64.patch new file mode 100644 index 0000000..ff7abd0 --- /dev/null +++ b/SOURCES/ARM64.patch @@ -0,0 +1,13 @@ +--- crash-trace-command-2.0/Makefile.orig ++++ crash-trace-command-2.0/Makefile +@@ -22,6 +22,10 @@ ifeq ($(shell arch), s390) + TARGET=S390 + TARGET_CFLAGS= + endif ++ifeq ($(shell arch), aarch64) ++ TARGET=ARM64 ++ TARGET_CFLAGS= ++endif + + INCDIR=/usr/include/crash + diff --git a/SOURCES/MIPS.patch b/SOURCES/MIPS.patch new file mode 100644 index 0000000..ef3055e --- /dev/null +++ b/SOURCES/MIPS.patch @@ -0,0 +1,13 @@ +--- crash-trace-command-2.0/Makefile.orig ++++ crash-trace-command-2.0/Makefile +@@ -34,6 +34,10 @@ ifeq ($(shell arch), armv7l) + TARGET=ARM32 + TARGET_CFLAGS= + endif ++ifeq ($(shell arch), mips64) ++ TARGET=MIPS ++ TARGET_CFLAGS= ++endif + + INCDIR=/usr/include/crash + diff --git a/SOURCES/TRACE_EVENT_FL_TRACEPOINT_flag.patch b/SOURCES/TRACE_EVENT_FL_TRACEPOINT_flag.patch new file mode 100644 index 0000000..530f1ed --- /dev/null +++ b/SOURCES/TRACE_EVENT_FL_TRACEPOINT_flag.patch @@ -0,0 +1,743 @@ +--- crash-trace-command-2.0/trace.c.orig ++++ crash-trace-command-2.0/trace.c +@@ -38,6 +38,10 @@ static int trace_buffer_available; + * max_buffer is supported + */ + static int max_buffer_available; ++/* ++ * multiple trace instances are supported ++ */ ++static int multiple_instances_available; + + #define koffset(struct, member) struct##_##member##_offset + +@@ -78,6 +82,8 @@ static int koffset(ftrace_event_field, o + static int koffset(ftrace_event_field, size); + static int koffset(ftrace_event_field, is_signed); + ++static int koffset(trace_array, name); ++ + static int koffset(POINTER_SYM, POINTER) = 0; + + struct ring_buffer_per_cpu { +@@ -101,16 +107,25 @@ struct ring_buffer_per_cpu { + }; + + static ulong global_trace; +-static ulong global_trace_buffer; +-static ulong global_max_buffer; +-static ulong global_ring_buffer; +-static unsigned global_pages; +-static struct ring_buffer_per_cpu *global_buffers; +- + static ulong max_tr_trace; +-static ulong max_tr_ring_buffer; +-static unsigned max_tr_pages; +-static struct ring_buffer_per_cpu *max_tr_buffers; ++ ++struct trace_instance { ++ char name[NAME_MAX + 1]; ++ ulong trace_buffer; ++ ulong max_buffer; ++ ulong ring_buffer; ++ unsigned pages; ++ struct ring_buffer_per_cpu *buffers; ++ ++ ulong max_tr_ring_buffer; ++ unsigned max_tr_pages; ++ struct ring_buffer_per_cpu *max_tr_buffers; ++}; ++ ++static ulong ftrace_trace_arrays; ++static struct trace_instance global_trace_instance; ++static struct trace_instance *trace_instances = NULL; ++static int instance_count; + + static ulong ftrace_events; + static ulong current_trace; +@@ -229,6 +244,9 @@ static int init_offsets(void) + init_offset(ftrace_event_field, size); + init_offset(ftrace_event_field, is_signed); + ++ if (MEMBER_EXISTS("trace_array", "name")) ++ init_offset(trace_array, name); ++ + return 0; + #undef init_offset + } +@@ -435,61 +453,140 @@ out_fail: + return -1; + } + +-static int ftrace_int_global_trace(void) ++static int ftrace_init_trace(struct trace_instance *ti, ulong instance_addr) + { + if (trace_buffer_available) { +- global_trace_buffer = global_trace + koffset(trace_array, trace_buffer); +- read_value(global_ring_buffer, global_trace_buffer, trace_buffer, buffer); ++ ti->trace_buffer = instance_addr + ++ koffset(trace_array, trace_buffer); ++ read_value(ti->ring_buffer, ti->trace_buffer, ++ trace_buffer, buffer); ++ ++ if (max_buffer_available) { ++ ti->max_buffer = instance_addr + ++ koffset(trace_array, max_buffer); ++ read_value(ti->max_tr_ring_buffer, ti->max_buffer, ++ trace_buffer, buffer); ++ } + } else { +- read_value(global_ring_buffer, global_trace, trace_array, buffer); +- read_value(global_pages, global_ring_buffer, ring_buffer, pages); ++ read_value(ti->ring_buffer, instance_addr, trace_array, buffer); ++ read_value(ti->pages, ti->ring_buffer, ring_buffer, pages); ++ ++ read_value(ti->max_tr_ring_buffer, max_tr_trace, trace_array, buffer); ++ if (ti->max_tr_ring_buffer) ++ read_value(ti->max_tr_pages, ti->max_tr_ring_buffer, ring_buffer, pages); + } + +- global_buffers = calloc(sizeof(*global_buffers), nr_cpu_ids); +- if (global_buffers == NULL) ++ ti->buffers = calloc(sizeof(*ti->buffers), nr_cpu_ids); ++ if (ti->buffers == NULL) ++ goto out_fail; ++ ++ if (ftrace_init_buffers(ti->buffers, ti->ring_buffer, ++ ti->pages) < 0) ++ goto out_fail; ++ ++ if (!ti->max_tr_ring_buffer) ++ return 0; ++ ++ ti->max_tr_buffers = calloc(sizeof(*ti->max_tr_buffers), nr_cpu_ids); ++ if (ti->max_tr_buffers == NULL) + goto out_fail; + +- if (ftrace_init_buffers(global_buffers, global_ring_buffer, +- global_pages) < 0) ++ if (ftrace_init_buffers(ti->max_tr_buffers, ti->max_tr_ring_buffer, ++ ti->max_tr_pages) < 0) + goto out_fail; + + return 0; + + out_fail: +- free(global_buffers); ++ free(ti->max_tr_buffers); ++ free(ti->buffers); + return -1; + } + +-static int ftrace_int_max_tr_trace(void) ++static void ftrace_destroy_all_instance_buffers() + { +- if (trace_buffer_available) { +- if (!max_buffer_available) +- return 0; ++ int i; + +- global_max_buffer = global_trace + koffset(trace_array, max_buffer); +- read_value(max_tr_ring_buffer, global_max_buffer, trace_buffer, buffer); +- } else { +- read_value(max_tr_ring_buffer, max_tr_trace, trace_array, buffer); ++ for (i = 0; i < instance_count; i++) ++ { ++ struct trace_instance *ti = &trace_instances[i]; + +- if (!max_tr_ring_buffer) +- return 0; ++ if (ti->max_tr_ring_buffer) { ++ ftrace_destroy_buffers(ti->max_tr_buffers); ++ free(ti->max_tr_buffers); ++ } + +- read_value(max_tr_pages, max_tr_ring_buffer, ring_buffer, pages); ++ ftrace_destroy_buffers(ti->buffers); ++ free(ti->buffers); + } ++} + +- max_tr_buffers = calloc(sizeof(*max_tr_buffers), nr_cpu_ids); +- if (max_tr_buffers == NULL) +- goto out_fail; ++static void ftrace_destroy_instances() ++{ ++ ftrace_destroy_all_instance_buffers(); ++ free(trace_instances); ++} + +- if (ftrace_init_buffers(max_tr_buffers, max_tr_ring_buffer, +- max_tr_pages) < 0) +- goto out_fail; ++static int ftrace_init_instances() ++{ ++ int i; ++ struct trace_instance *ti; ++ struct list_data list_data; ++ struct list_data *ld = &list_data; ++ ++ if (!multiple_instances_available) ++ return 0; ++ ++ BZERO(ld, sizeof(struct list_data)); ++ ld->start = ftrace_trace_arrays; ++ ld->end = global_trace; ++ ld->flags = LIST_ALLOCATE; ++ instance_count = do_list(ld); ++ ++ /* The do_list count includes the list_head, which is not a ++ * proper instance */ ++ instance_count--; ++ if (instance_count <= 0) ++ return 0; ++ ++ trace_instances = calloc(sizeof(struct trace_instance), instance_count); ++ ++ /* We start i at 1 to skip over the list_head and continue to the last ++ * instance, which lies at index instance_count */ ++ for (i = 1; i <= instance_count; i++) ++ { ++ ulong instance_ptr; ++ ulong name_addr; ++ int ret; ++ ++ ti = &trace_instances[i-1]; ++ instance_ptr = ld->list_ptr[i]; ++ read_value(name_addr, instance_ptr, trace_array, name); ++ if (!name_addr) ++ { ++ console("Instance name is NULL\n"); ++ } ++ else if (!read_string(name_addr, ti->name, sizeof(ti->name))) ++ { ++ console("Failed to read instance name at address %p\n", (void*)name_addr); ++ goto out_fail; ++ } ++ ++ ret = ftrace_init_trace(ti, instance_ptr); ++ if (ret < 0) ++ goto out_fail; ++ } ++ FREEBUF(ld->list_ptr); + + return 0; + + out_fail: +- free(max_tr_buffers); +- max_tr_ring_buffer = 0; ++ /* We've already freed the current instance's trace buffer info, so ++ * we'll clear that out to avoid double freeing in ++ * ftrace_destroy_instances() */ ++ BZERO(ti, sizeof(struct trace_instance)); ++ ftrace_destroy_instances(); ++ + return -1; + } + +@@ -504,7 +601,7 @@ static int ftrace_init_current_tracer(vo + } else { + read_value(addr, current_trace, POINTER_SYM, POINTER); + } +- ++ + read_value(addr, addr, tracer, name); + read_string(addr, tmp, 128); + +@@ -524,9 +621,11 @@ static int ftrace_init(void) + struct syment *sym_max_tr_trace; + struct syment *sym_ftrace_events; + struct syment *sym_current_trace; ++ struct syment *sym_ftrace_trace_arrays; + + sym_global_trace = symbol_search("global_trace"); + sym_ftrace_events = symbol_search("ftrace_events"); ++ sym_ftrace_trace_arrays = symbol_search("ftrace_trace_arrays"); + + if (sym_global_trace == NULL || sym_ftrace_events == NULL) + return -1; +@@ -534,6 +633,13 @@ static int ftrace_init(void) + global_trace = sym_global_trace->value; + ftrace_events = sym_ftrace_events->value; + ++ if (sym_ftrace_trace_arrays) ++ { ++ multiple_instances_available = 1; ++ ftrace_trace_arrays = sym_ftrace_trace_arrays->value; ++ } ++ ++ + if (MEMBER_EXISTS("trace_array", "current_trace")) { + encapsulated_current_trace = 1; + } else { +@@ -564,28 +670,31 @@ static int ftrace_init(void) + return -1; + print_offsets(); + +- if (ftrace_int_global_trace() < 0) ++ if (ftrace_init_trace(&global_trace_instance, global_trace) < 0) + goto out_0; + +- ftrace_int_max_tr_trace(); ++ if (ftrace_init_instances() < 0) ++ goto out_1; + + if (ftrace_init_event_types() < 0) +- goto out_1; ++ goto out_2; + + if (ftrace_init_current_tracer() < 0) +- goto out_2; ++ goto out_3; + + return 0; + +-out_2: ++out_3: + ftrace_destroy_event_types(); ++out_2: ++ ftrace_destroy_instances(); + out_1: +- if (max_tr_ring_buffer) { +- ftrace_destroy_buffers(max_tr_buffers); +- free(max_tr_buffers); ++ if (global_trace_instance.max_tr_ring_buffer) { ++ ftrace_destroy_buffers(global_trace_instance.max_tr_buffers); ++ free(global_trace_instance.max_tr_buffers); + } +- ftrace_destroy_buffers(global_buffers); +- free(global_buffers); ++ ftrace_destroy_buffers(global_trace_instance.buffers); ++ free(global_trace_instance.buffers); + out_0: + return -1; + } +@@ -595,13 +704,15 @@ static void ftrace_destroy(void) + free(current_tracer_name); + ftrace_destroy_event_types(); + +- if (max_tr_ring_buffer) { +- ftrace_destroy_buffers(max_tr_buffers); +- free(max_tr_buffers); ++ ftrace_destroy_instances(); ++ ++ if (global_trace_instance.max_tr_ring_buffer) { ++ ftrace_destroy_buffers(global_trace_instance.max_tr_buffers); ++ free(global_trace_instance.max_tr_buffers); + } + +- ftrace_destroy_buffers(global_buffers); +- free(global_buffers); ++ ftrace_destroy_buffers(global_trace_instance.buffers); ++ free(global_trace_instance.buffers); + } + + static int ftrace_dump_page(int fd, ulong page, void *page_tmp) +@@ -652,7 +763,8 @@ static int try_mkdir(const char *pathnam + return 0; + } + +-static int ftrace_dump_buffers(const char *per_cpu_path) ++static int ftrace_dump_buffers(const char *per_cpu_path, ++ struct trace_instance *ti) + { + int i; + void *page_tmp; +@@ -664,7 +776,7 @@ static int ftrace_dump_buffers(const cha + return -1; + + for (i = 0; i < nr_cpu_ids; i++) { +- struct ring_buffer_per_cpu *cpu_buffer = &global_buffers[i]; ++ struct ring_buffer_per_cpu *cpu_buffer = &ti->buffers[i]; + + if (!cpu_buffer->kaddr) + continue; +@@ -679,7 +791,7 @@ static int ftrace_dump_buffers(const cha + if (fd < 0) + goto out_fail; + +- ftrace_dump_buffer(fd, cpu_buffer, global_pages, page_tmp); ++ ftrace_dump_buffer(fd, cpu_buffer, ti->pages, page_tmp); + close(fd); + } + +@@ -1015,8 +1127,6 @@ static void ftrace_destroy_event_types(v + free(ftrace_common_fields); + } + +-#define TRACE_EVENT_FL_TRACEPOINT 0x40 +- + static + int ftrace_get_event_type_name(ulong call, char *name, int len) + { +@@ -1024,34 +1134,35 @@ int ftrace_get_event_type_name(ulong cal + static int name_offset; + static int flags_offset; + static int tp_name_offset; +- uint flags; ++ static long tracepoint_flag; + ++ uint flags; + ulong name_addr; + + if (inited) + goto work; + +- inited = 1; +- name_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "name"), +- MEMBER_OFFSET("trace_event_call", "name")); +- if (name_offset >= 0) +- goto work; +- +- name_offset = MAX(ANON_MEMBER_OFFSET("ftrace_event_call", "name"), +- ANON_MEMBER_OFFSET("trace_event_call", "name")); +- if (name_offset < 0) +- return -1; ++ name_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "tp"), ++ MEMBER_OFFSET("trace_event_call", "tp")); ++ if (name_offset >= 0) { ++ flags_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "flags"), ++ MEMBER_OFFSET("trace_event_call", "flags")); ++ if (flags_offset < 0) ++ return -1; + +- flags_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "flags"), +- MEMBER_OFFSET("trace_event_call", "flags")); +- if (flags_offset < 0) +- return -1; ++ tp_name_offset = MEMBER_OFFSET("tracepoint", "name"); ++ if (tp_name_offset < 0) ++ return -1; + +- tp_name_offset = MEMBER_OFFSET("tracepoint", "name"); +- if (tp_name_offset < 0) +- return -1; ++ if (!enumerator_value("TRACE_EVENT_FL_TRACEPOINT", &tracepoint_flag)) ++ return -1; + +- inited = 2; ++ inited = 2; ++ } else { ++ name_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "name"), ++ MEMBER_OFFSET("trace_event_call", "name")); ++ inited = 1; ++ } + + work: + if (name_offset < 0) +@@ -1067,7 +1178,7 @@ work: + RETURN_ON_ERROR)) + return -1; + +- if (flags & TRACE_EVENT_FL_TRACEPOINT) { ++ if (flags & (uint)tracepoint_flag) { + if (!readmem(name_addr + tp_name_offset, KVADDR, + &name_addr, sizeof(name_addr), + "read tracepoint name", RETURN_ON_ERROR)) +@@ -1476,26 +1587,72 @@ static int dump_kallsyms(const char *dum + + static int trace_cmd_data_output(int fd); + ++#define FTRACE_DUMP_SYMBOLS (1 << 0) ++#define FTRACE_DUMP_META_DATA (1 << 1) ++ ++static int populate_ftrace_dir_tree(struct trace_instance *ti, ++ char *root, uint flags) ++{ ++ char path[PATH_MAX]; ++ int ret; ++ ++ ret = mkdir(root, 0755); ++ if (ret < 0) { ++ if (errno == EEXIST) ++ error(INFO, "mkdir: %s exists\n", root); ++ return FALSE; ++ } ++ ++ snprintf(path, sizeof(path), "%s/per_cpu", root); ++ if (try_mkdir(path, 0755) < 0) ++ return FALSE; ++ ++ if (ftrace_dump_buffers(path, ti) < 0) ++ return FALSE; ++ ++ if (flags & FTRACE_DUMP_META_DATA) { ++ /* Dump event types */ ++ snprintf(path, sizeof(path), "%s/events", root); ++ if (try_mkdir(path, 0755) < 0) ++ return FALSE; ++ ++ if (ftrace_dump_event_types(path) < 0) ++ return FALSE; ++ ++ /* Dump pids with corresponding cmdlines */ ++ if (dump_saved_cmdlines(root) < 0) ++ return FALSE; ++ } ++ ++ if (flags & FTRACE_DUMP_SYMBOLS) { ++ /* Dump all symbols of the kernel */ ++ dump_kallsyms(root); ++ } ++ ++ return TRUE; ++} ++ + static void ftrace_dump(int argc, char *argv[]) + { + int c; +- int dump_meta_data = 0; +- int dump_symbols = 0; ++ int i; ++ uint flags = 0; + char *dump_tracing_dir; +- char path[PATH_MAX]; +- int ret; ++ char instance_path[PATH_MAX]; + + while ((c = getopt(argc, argv, "smt")) != EOF) { + switch(c) + { + case 's': +- dump_symbols = 1; ++ flags |= FTRACE_DUMP_SYMBOLS; + break; + case 'm': +- dump_meta_data = 1; ++ flags |= FTRACE_DUMP_META_DATA; + break; + case 't': +- if (dump_symbols || dump_meta_data || argc - optind > 1) ++ if (flags & FTRACE_DUMP_SYMBOLS || ++ flags & FTRACE_DUMP_META_DATA || ++ argc - optind > 1) + cmd_usage(pc->curcmd, SYNOPSIS); + else { + char *trace_dat = "trace.dat"; +@@ -1526,38 +1683,34 @@ static void ftrace_dump(int argc, char * + return; + } + +- ret = mkdir(dump_tracing_dir, 0755); +- if (ret < 0) { +- if (errno == EEXIST) +- error(INFO, "mkdir: %s exists\n", dump_tracing_dir); ++ if (!populate_ftrace_dir_tree(&global_trace_instance, dump_tracing_dir, flags)) + return; +- } + +- snprintf(path, sizeof(path), "%s/per_cpu", dump_tracing_dir); +- if (try_mkdir(path, 0755) < 0) ++ if (!multiple_instances_available || instance_count == 0) + return; + +- if (ftrace_dump_buffers(path) < 0) ++ /* Create an instances directory, and dump instance data in there */ ++ snprintf(instance_path, sizeof(instance_path), ++ "%s/instances", dump_tracing_dir); ++ if (try_mkdir(instance_path, 0755) < 0) + return; + +- if (dump_meta_data) { +- /* Dump event types */ +- snprintf(path, sizeof(path), "%s/events", dump_tracing_dir); +- if (try_mkdir(path, 0755) < 0) +- return; ++ /* Don't care about the flags anymore */ ++ flags = 0; + +- if (ftrace_dump_event_types(path) < 0) +- return; ++ for (i = 0; i < instance_count; i++) ++ { ++ struct trace_instance *ti = &trace_instances[i]; ++ ++ snprintf(instance_path, sizeof(instance_path), ++ "%s/instances/%s", dump_tracing_dir, ++ ti->name); + +- /* Dump pids with corresponding cmdlines */ +- if (dump_saved_cmdlines(dump_tracing_dir) < 0) +- return; ++ if (populate_ftrace_dir_tree(ti, instance_path, flags) < 0) ++ break; + } + +- if (dump_symbols) { +- /* Dump all symbols of the kernel */ +- dump_kallsyms(dump_tracing_dir); +- } ++ return; + } + + static void ftrace_show(int argc, char *argv[]) +@@ -2161,26 +2314,69 @@ static int save_ftrace_cmdlines(int fd) + return tmp_file_flush(fd); + } + +-static int save_res_data(int fd, int nr_cpu_buffers) ++/* From trace-cmd.h */ ++enum { ++ TRACECMD_OPTION_DONE, /* 0 */ ++ TRACECMD_OPTION_DATE, /* 1 */ ++ TRACECMD_OPTION_CPUSTAT, /* 2 */ ++ TRACECMD_OPTION_BUFFER, /* 3 */ ++ TRACECMD_OPTION_TRACECLOCK, /* 4 */ ++ TRACECMD_OPTION_UNAME, /* 5 */ ++ TRACECMD_OPTION_HOOK, /* 6 */ ++}; ++ ++static int write_options(int fd, unsigned long long *buffer_offsets) + { +- unsigned short option = 0; ++ int i; ++ unsigned short option; + +- if (write_and_check(fd, &nr_cpu_buffers, 4)) +- return -1; ++ if (!multiple_instances_available) ++ return 0; + + if (write_and_check(fd, "options ", 10)) + return -1; + ++ option = TRACECMD_OPTION_BUFFER; ++ for (i = 0; i < instance_count; i++) ++ { ++ char *name = trace_instances[i].name; ++ size_t name_size = strlen(name) + 1; /* Name length + '\0' */ ++ unsigned long long option_size = 8 + name_size; ++ unsigned long long offset; ++ ++ offset = buffer_offsets ? buffer_offsets[i] : 0; ++ if (write_and_check(fd, &option, 2)) ++ return -1; ++ if (write_and_check(fd, &option_size, 4)) ++ return -1; ++ if (write_and_check(fd, &offset, 8)) ++ return -1; ++ if (write_and_check(fd, name, name_size)) ++ return -1; ++ } ++ ++ option = TRACECMD_OPTION_DONE; + if (write_and_check(fd, &option, 2)) + return -1; + ++ return 0; ++} ++ ++static int save_res_data(int fd, int nr_cpu_buffers, unsigned long long *buffer_offsets) ++{ ++ if (write_and_check(fd, &nr_cpu_buffers, 4)) ++ return -1; ++ ++ if (write_options(fd, buffer_offsets)) ++ return -1; ++ + if (write_and_check(fd, "flyrecord", 10)) + return -1; + + return 0; + } + +-static int save_record_data(int fd, int nr_cpu_buffers) ++static int save_record_data(int fd, int nr_cpu_buffers, struct trace_instance *ti) + { + int i, j; + unsigned long long offset, buffer_offset; +@@ -2192,7 +2388,7 @@ static int save_record_data(int fd, int + buffer_offset = offset; + + for (i = 0; i < nr_cpu_ids; i++) { +- struct ring_buffer_per_cpu *cpu_buffer = &global_buffers[i]; ++ struct ring_buffer_per_cpu *cpu_buffer = &ti->buffers[i]; + unsigned long long buffer_size; + + if (!cpu_buffer->kaddr) +@@ -2212,7 +2408,7 @@ static int save_record_data(int fd, int + + lseek(fd, offset, SEEK_SET); + for (i = 0; i < nr_cpu_ids; i++) { +- struct ring_buffer_per_cpu *cpu_buffer = &global_buffers[i]; ++ struct ring_buffer_per_cpu *cpu_buffer = &ti->buffers[i]; + + if (!cpu_buffer->kaddr) + continue; +@@ -2231,13 +2427,13 @@ static int save_record_data(int fd, int + return 0; + } + +-static int __trace_cmd_data_output(int fd) ++static int get_nr_cpu_buffers(struct trace_instance *ti) + { + int i; + int nr_cpu_buffers = 0; + + for (i = 0; i < nr_cpu_ids; i++) { +- struct ring_buffer_per_cpu *cpu_buffer = &global_buffers[i]; ++ struct ring_buffer_per_cpu *cpu_buffer = &ti->buffers[i]; + + if (!cpu_buffer->kaddr) + continue; +@@ -2245,6 +2441,19 @@ static int __trace_cmd_data_output(int f + nr_cpu_buffers++; + } + ++ return nr_cpu_buffers; ++} ++ ++static int __trace_cmd_data_output(int fd) ++{ ++ int nr_cpu_buffers; ++ unsigned long long global_res_data_offset; ++ unsigned long long *instance_offsets; ++ ++ instance_offsets = calloc(sizeof(unsigned long long), instance_count); ++ ++ nr_cpu_buffers = get_nr_cpu_buffers(&global_trace_instance); ++ + if (save_initial_data(fd)) + return -1; + if (save_header_files(fd)) +@@ -2257,9 +2466,38 @@ static int __trace_cmd_data_output(int f + return -1; + if (save_ftrace_cmdlines(fd)) + return -1; +- if (save_res_data(fd, nr_cpu_buffers)) ++ ++ /* We don't have the instance buffer offsets yet, so we'll write in 0s ++ * for now, and fix it up after we have that information available */ ++ global_res_data_offset = lseek(fd, 0, SEEK_CUR); ++ if (save_res_data(fd, nr_cpu_buffers, NULL)) + return -1; +- if (save_record_data(fd, nr_cpu_buffers)) ++ if (save_record_data(fd, nr_cpu_buffers, &global_trace_instance)) ++ return -1; ++ ++ if (multiple_instances_available) ++ { ++ int i; ++ ++ for (i = 0; i < instance_count; i++) ++ { ++ struct trace_instance *ti = &trace_instances[i]; ++ nr_cpu_buffers = get_nr_cpu_buffers(ti); ++ ++ /* Save off the instance offset for fixup later */ ++ instance_offsets[i] = lseek(fd, 0, SEEK_CUR); ++ ++ if (write_and_check(fd, "flyrecord", 10)) ++ return -1; ++ if (save_record_data(fd, nr_cpu_buffers, ti)) ++ return -1; ++ } ++ } ++ ++ /* Fix up the global trace's options header with the instance offsets */ ++ lseek(fd, global_res_data_offset, SEEK_SET); ++ nr_cpu_buffers = get_nr_cpu_buffers(&global_trace_instance); ++ if (save_res_data(fd, nr_cpu_buffers, instance_offsets)) + return -1; + + return 0; diff --git a/SOURCES/big_endian_nr_pages.patch b/SOURCES/big_endian_nr_pages.patch new file mode 100644 index 0000000..3498fad --- /dev/null +++ b/SOURCES/big_endian_nr_pages.patch @@ -0,0 +1,26 @@ +--- crash-trace-command-2.0/trace.c.orig ++++ crash-trace-command-2.0/trace.c +@@ -96,7 +96,7 @@ struct ring_buffer_per_cpu { + ulong real_head_page; + + int head_page_index; +- unsigned int nr_pages; ++ unsigned long nr_pages; + ulong *pages; + + ulong *linear_pages; +@@ -430,7 +432,13 @@ static int ftrace_init_buffers(struct ri + buffer_read_value(overrun); + buffer_read_value(entries); + if (per_cpu_buffer_sizes) { +- buffer_read_value(nr_pages); ++ if (MEMBER_SIZE("ring_buffer_per_cpu", "nr_pages") == sizeof(unsigned int)) { ++ unsigned int tmp_nr_pages; ++ read_value(tmp_nr_pages, buffers[i].kaddr, ring_buffer_per_cpu, nr_pages); ++ buffers[i].nr_pages = (unsigned long) tmp_nr_pages; ++ } else { ++ buffer_read_value(nr_pages); ++ } + pages = buffers[i].nr_pages; + } else + buffers[i].nr_pages = pages; diff --git a/SOURCES/initialize_trace_dat.patch b/SOURCES/initialize_trace_dat.patch new file mode 100644 index 0000000..6b1a25b --- /dev/null +++ b/SOURCES/initialize_trace_dat.patch @@ -0,0 +1,11 @@ +--- crash-trace-command-2.0/trace.c.orig ++++ crash-trace-command-2.0/trace.c +@@ -1373,7 +1373,7 @@ static void ftrace_dump(int argc, char * + if (dump_symbols || dump_meta_data || argc - optind > 1) + cmd_usage(pc->curcmd, SYNOPSIS); + else { +- char *trace_dat; ++ char *trace_dat = "trace.dat"; + int fd; + + if (argc - optind == 0) diff --git a/SOURCES/linux_3.10_support.patch b/SOURCES/linux_3.10_support.patch new file mode 100644 index 0000000..d520492 --- /dev/null +++ b/SOURCES/linux_3.10_support.patch @@ -0,0 +1,152 @@ +--- crash-trace-command-2.0/trace.c.orig ++++ crash-trace-command-2.0/trace.c +@@ -26,9 +26,21 @@ static int nr_cpu_ids; + */ + static int lockless_ring_buffer; + static int per_cpu_buffer_sizes; ++/* ++ * global and encapsulated current_trace are both supported ++ */ ++static int encapsulated_current_trace; ++/* ++ * trace_buffer is supported ++ */ ++static int trace_buffer_available; + + #define koffset(struct, member) struct##_##member##_offset + ++static int koffset(trace_array, current_trace); ++static int koffset(trace_array, trace_buffer); ++static int koffset(trace_array, max_buffer); ++static int koffset(trace_buffer, buffer); + static int koffset(trace_array, buffer); + static int koffset(tracer, name); + +@@ -85,6 +97,8 @@ struct ring_buffer_per_cpu { + }; + + static ulong global_trace; ++static ulong global_trace_buffer; ++static ulong global_max_buffer; + static ulong global_ring_buffer; + static unsigned global_pages; + static struct ring_buffer_per_cpu *global_buffers; +@@ -144,8 +158,16 @@ static int init_offsets(void) + } \ + } while (0) + ++ if (encapsulated_current_trace) ++ init_offset(trace_array, current_trace); + +- init_offset(trace_array, buffer); ++ if (trace_buffer_available) { ++ init_offset(trace_array, trace_buffer); ++ init_offset(trace_array, max_buffer); ++ init_offset(trace_buffer, buffer); ++ } else { ++ init_offset(trace_array, buffer); ++ } + init_offset(tracer, name); + + if (MEMBER_EXISTS("ring_buffer_per_cpu", "nr_pages")) { +@@ -400,8 +422,13 @@ out_fail: + + static int ftrace_int_global_trace(void) + { +- read_value(global_ring_buffer, global_trace, trace_array, buffer); +- read_value(global_pages, global_ring_buffer, ring_buffer, pages); ++ if (trace_buffer_available) { ++ global_trace_buffer = global_trace + koffset(trace_array, trace_buffer); ++ read_value(global_ring_buffer, global_trace_buffer, trace_buffer, buffer); ++ } else { ++ read_value(global_ring_buffer, global_trace, trace_array, buffer); ++ read_value(global_pages, global_ring_buffer, ring_buffer, pages); ++ } + + global_buffers = calloc(sizeof(*global_buffers), nr_cpu_ids); + if (global_buffers == NULL) +@@ -420,12 +447,17 @@ out_fail: + + static int ftrace_int_max_tr_trace(void) + { +- read_value(max_tr_ring_buffer, max_tr_trace, trace_array, buffer); ++ if (trace_buffer_available) { ++ global_max_buffer = global_trace + koffset(trace_array, max_buffer); ++ read_value(max_tr_ring_buffer, global_max_buffer, trace_buffer, buffer); ++ } else { ++ read_value(max_tr_ring_buffer, max_tr_trace, trace_array, buffer); + +- if (!max_tr_ring_buffer) +- return 0; ++ if (!max_tr_ring_buffer) ++ return 0; + +- read_value(max_tr_pages, max_tr_ring_buffer, ring_buffer, pages); ++ read_value(max_tr_pages, max_tr_ring_buffer, ring_buffer, pages); ++ } + + max_tr_buffers = calloc(sizeof(*max_tr_buffers), nr_cpu_ids); + if (max_tr_buffers == NULL) +@@ -449,7 +481,12 @@ static int ftrace_init_current_tracer(vo + char tmp[128]; + + /* Get current tracer name */ +- read_value(addr, current_trace, POINTER_SYM, POINTER); ++ if (encapsulated_current_trace) { ++ read_value(addr, global_trace, trace_array, current_trace); ++ } else { ++ read_value(addr, current_trace, POINTER_SYM, POINTER); ++ } ++ + read_value(addr, addr, tracer, name); + read_string(addr, tmp, 128); + +@@ -471,19 +508,33 @@ static int ftrace_init(void) + struct syment *sym_current_trace; + + sym_global_trace = symbol_search("global_trace"); +- sym_max_tr_trace = symbol_search("max_tr"); + sym_ftrace_events = symbol_search("ftrace_events"); +- sym_current_trace = symbol_search("current_trace"); + +- if (sym_global_trace == NULL || sym_max_tr_trace == NULL +- || sym_ftrace_events == NULL +- || sym_current_trace == NULL) ++ if (sym_global_trace == NULL || sym_ftrace_events == NULL) + return -1; + + global_trace = sym_global_trace->value; +- max_tr_trace = sym_max_tr_trace->value; + ftrace_events = sym_ftrace_events->value; +- current_trace = sym_current_trace->value; ++ ++ if (MEMBER_EXISTS("trace_array", "current_trace")) { ++ encapsulated_current_trace = 1; ++ } else { ++ sym_current_trace = symbol_search("current_trace"); ++ if (sym_current_trace == NULL) ++ return -1; ++ ++ current_trace = sym_current_trace->value; ++ } ++ ++ if (MEMBER_EXISTS("trace_array", "trace_buffer")) { ++ trace_buffer_available = 1; ++ } else { ++ sym_max_tr_trace = symbol_search("max_tr"); ++ if (sym_max_tr_trace == NULL) ++ return -1; ++ ++ max_tr_trace = sym_max_tr_trace->value; ++ } + + if (!try_get_symbol_data("nr_cpu_ids", sizeof(int), &nr_cpu_ids)) + nr_cpu_ids = 1; +@@ -1453,6 +1504,7 @@ static void ftrace_show(int argc, char * + if ((file = popen(trace_cmd, "r"))) { + ret = fread(buf, 1, sizeof(buf), file); + buf[ret] = 0; ++ pclose(file); + } + if (!strstr(buf, "trace-cmd version")) { + if (env_trace_cmd) diff --git a/SOURCES/linux_4.2_support.patch b/SOURCES/linux_4.2_support.patch new file mode 100644 index 0000000..487b84e --- /dev/null +++ b/SOURCES/linux_4.2_support.patch @@ -0,0 +1,265 @@ +--- crash-trace-command-2.0/trace.c.orig ++++ crash-trace-command-2.0/trace.c +@@ -34,6 +34,10 @@ static int encapsulated_current_trace; + * trace_buffer is supported + */ + static int trace_buffer_available; ++/* ++ * max_buffer is supported ++ */ ++static int max_buffer_available; + + #define koffset(struct, member) struct##_##member##_offset + +@@ -154,6 +158,7 @@ static int init_offsets(void) + if (koffset(struct, member) < 0) { \ + fprintf(fp, "failed to init the offset, struct:"\ + #struct ", member:" #member); \ ++ fprintf(fp, "\n"); \ + return -1; \ + } \ + } while (0) +@@ -163,8 +168,10 @@ static int init_offsets(void) + + if (trace_buffer_available) { + init_offset(trace_array, trace_buffer); +- init_offset(trace_array, max_buffer); + init_offset(trace_buffer, buffer); ++ ++ if (max_buffer_available) ++ init_offset(trace_array, max_buffer); + } else { + init_offset(trace_array, buffer); + } +@@ -176,6 +183,9 @@ static int init_offsets(void) + fprintf(fp, "per cpu buffer sizes\n"); + } + ++ if (kernel_symbol_exists("ring_buffer_read")) ++ gdb_set_crash_scope(symbol_value("ring_buffer_read"), "ring_buffer_read"); ++ + if (!per_cpu_buffer_sizes) + init_offset(ring_buffer, pages); + init_offset(ring_buffer, flags); +@@ -205,7 +215,12 @@ static int init_offsets(void) + + init_offset(list_head, next); + +- init_offset(ftrace_event_call, list); ++ koffset(ftrace_event_call, list) = MAX(MEMBER_OFFSET("ftrace_event_call", "list"), ++ MEMBER_OFFSET("trace_event_call", "list")); ++ if (koffset(ftrace_event_call, list) < 0) { ++ fprintf(fp, "failed to init the offset, struct:[f]trace_event_call member:list)\n"); ++ return -1; \ ++ } + + init_offset(ftrace_event_field, link); + init_offset(ftrace_event_field, name); +@@ -448,6 +463,9 @@ out_fail: + static int ftrace_int_max_tr_trace(void) + { + if (trace_buffer_available) { ++ if (!max_buffer_available) ++ return 0; ++ + global_max_buffer = global_trace + koffset(trace_array, max_buffer); + read_value(max_tr_ring_buffer, global_max_buffer, trace_buffer, buffer); + } else { +@@ -528,6 +546,9 @@ static int ftrace_init(void) + + if (MEMBER_EXISTS("trace_array", "trace_buffer")) { + trace_buffer_available = 1; ++ ++ if (MEMBER_EXISTS("trace_array", "max_buffer")) ++ max_buffer_available = 1; + } else { + sym_max_tr_trace = symbol_search("max_tr"); + if (sym_max_tr_trace == NULL) +@@ -710,7 +731,8 @@ static int syscall_get_enter_fields(ulon + goto work; + + inited = 1; +- data_offset = MEMBER_OFFSET("ftrace_event_call", "data"); ++ data_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "data"), ++ MEMBER_OFFSET("trace_event_call", "data")); + if (data_offset < 0) + return -1; + +@@ -742,7 +764,8 @@ static int syscall_get_exit_fields_old(u + goto work; + + inited = 1; +- data_offset = MEMBER_OFFSET("ftrace_event_call", "data"); ++ data_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "data"), ++ MEMBER_OFFSET("trace_event_call", "data")); + if (data_offset < 0) + return -1; + +@@ -803,18 +826,22 @@ int ftrace_get_event_type_fields(ulong c + goto work; + + inited = 1; +- fields_offset = MEMBER_OFFSET("ftrace_event_call", "fields"); ++ fields_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "fields"), ++ MEMBER_OFFSET("trace_event_call", "fields")); + +- class_offset = MEMBER_OFFSET("ftrace_event_call", "class"); ++ class_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "class"), ++ MEMBER_OFFSET("trace_event_call", "class")); + if (class_offset < 0) + goto work; + + inited = 2; +- fields_offset = MEMBER_OFFSET("ftrace_event_class", "fields"); ++ fields_offset = MAX(MEMBER_OFFSET("ftrace_event_class", "fields"), ++ MEMBER_OFFSET("trace_event_class", "fields")); + if (fields_offset < 0) + return -1; + +- get_fields_offset = MEMBER_OFFSET("ftrace_event_class", "get_fields"); ++ get_fields_offset = MAX(MEMBER_OFFSET("ftrace_event_class", "get_fields"), ++ MEMBER_OFFSET("trace_event_class", "get_fields")); + if ((sp = symbol_search("syscall_get_enter_fields")) != NULL) + syscall_get_enter_fields_value = sp->value; + if ((sp = symbol_search("syscall_get_exit_fields")) != NULL) +@@ -988,19 +1015,45 @@ static void ftrace_destroy_event_types(v + free(ftrace_common_fields); + } + ++#define TRACE_EVENT_FL_TRACEPOINT 0x40 ++ + static + int ftrace_get_event_type_name(ulong call, char *name, int len) + { + static int inited; + static int name_offset; ++ static int flags_offset; ++ static int tp_name_offset; ++ uint flags; + + ulong name_addr; + +- if (!inited) { +- inited = 1; +- name_offset = MEMBER_OFFSET("ftrace_event_call", "name"); +- } ++ if (inited) ++ goto work; + ++ inited = 1; ++ name_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "name"), ++ MEMBER_OFFSET("trace_event_call", "name")); ++ if (name_offset >= 0) ++ goto work; ++ ++ name_offset = MAX(ANON_MEMBER_OFFSET("ftrace_event_call", "name"), ++ ANON_MEMBER_OFFSET("trace_event_call", "name")); ++ if (name_offset < 0) ++ return -1; ++ ++ flags_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "flags"), ++ MEMBER_OFFSET("trace_event_call", "flags")); ++ if (flags_offset < 0) ++ return -1; ++ ++ tp_name_offset = MEMBER_OFFSET("tracepoint", "name"); ++ if (tp_name_offset < 0) ++ return -1; ++ ++ inited = 2; ++ ++work: + if (name_offset < 0) + return -1; + +@@ -1008,6 +1061,21 @@ int ftrace_get_event_type_name(ulong cal + "read ftrace_event_call name_addr", RETURN_ON_ERROR)) + return -1; + ++ if (inited == 2) { ++ if (!readmem(call + flags_offset, KVADDR, &flags, ++ sizeof(flags), "read ftrace_event_call flags", ++ RETURN_ON_ERROR)) ++ return -1; ++ ++ if (flags & TRACE_EVENT_FL_TRACEPOINT) { ++ if (!readmem(name_addr + tp_name_offset, KVADDR, ++ &name_addr, sizeof(name_addr), ++ "read tracepoint name", RETURN_ON_ERROR)) ++ return -1; ++ } ++ ++ } ++ + if (!read_string(name_addr, name, len)) + return -1; + +@@ -1028,16 +1096,19 @@ int ftrace_get_event_type_system(ulong c + goto work; + + inited = 1; +- sys_offset = MEMBER_OFFSET("ftrace_event_call", "system"); ++ sys_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "system"), ++ MEMBER_OFFSET("trace_event_call", "system")); + + if (sys_offset >= 0) + goto work; + +- class_offset = MEMBER_OFFSET("ftrace_event_call", "class"); ++ class_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "class"), ++ MEMBER_OFFSET("trace_event_call", "class")); + if (class_offset < 0) + return -1; + +- sys_offset = MEMBER_OFFSET("ftrace_event_class", "system"); ++ sys_offset = MAX(MEMBER_OFFSET("ftrace_event_class", "system"), ++ MEMBER_OFFSET("trace_event_class", "system")); + inited = 2; + + work: +@@ -1109,7 +1180,8 @@ int ftrace_get_event_type_print_fmt(ulon + + if (!inited) { + inited = 1; +- fmt_offset = MEMBER_OFFSET("ftrace_event_call", "print_fmt"); ++ fmt_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "print_fmt"), ++ MEMBER_OFFSET("trace_event_call", "print_fmt")); + } + + if (fmt_offset < 0) { +@@ -1132,11 +1204,13 @@ int ftrace_get_event_type_id(ulong call, + + if (!inited) { + inited = 1; +- id_offset = MEMBER_OFFSET("ftrace_event_call", "id"); ++ id_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "id"), ++ MEMBER_OFFSET("trace_event_call", "id")); + + if (id_offset < 0) { + /* id = call->event.type */ +- int f1 = MEMBER_OFFSET("ftrace_event_call", "event"); ++ int f1 = MAX(MEMBER_OFFSET("ftrace_event_call", "event"), ++ MEMBER_OFFSET("trace_event_call", "event")); + int f2 = MEMBER_OFFSET("trace_event", "type"); + + if (f1 >= 0 && f2 >= 0) +@@ -1495,7 +1569,6 @@ static void ftrace_show(int argc, char * + FILE *file; + size_t ret; + size_t nitems __attribute__ ((__unused__)); +- char *unused __attribute__ ((__unused__)); + + /* check trace-cmd */ + if (env_trace_cmd) +@@ -1519,8 +1592,9 @@ static void ftrace_show(int argc, char * + } + + /* dump trace.dat to the temp file */ +- unused = mktemp(tmp); +- fd = open(tmp, O_WRONLY | O_CREAT | O_TRUNC, 0644); ++ fd = mkstemp(tmp); ++ if (fd < 0) ++ return; + if (trace_cmd_data_output(fd) < 0) + goto out; + diff --git a/SOURCES/ppc64_ring_buffer_read.patch b/SOURCES/ppc64_ring_buffer_read.patch new file mode 100644 index 0000000..84ab1c8 --- /dev/null +++ b/SOURCES/ppc64_ring_buffer_read.patch @@ -0,0 +1,13 @@ +--- crash-trace-command-2.0/trace.c.orig ++++ crash-trace-command-2.0/trace.c +@@ -198,7 +198,9 @@ static int init_offsets(void) + fprintf(fp, "per cpu buffer sizes\n"); + } + +- if (kernel_symbol_exists("ring_buffer_read")) ++ if (machine_type("PPC64") && kernel_symbol_exists(".ring_buffer_read")) ++ gdb_set_crash_scope(symbol_value(".ring_buffer_read"), ".ring_buffer_read"); ++ else if (kernel_symbol_exists("ring_buffer_read")) + gdb_set_crash_scope(symbol_value("ring_buffer_read"), "ring_buffer_read"); + + if (!per_cpu_buffer_sizes) diff --git a/SOURCES/ppc64le.patch b/SOURCES/ppc64le.patch new file mode 100644 index 0000000..b787e66 --- /dev/null +++ b/SOURCES/ppc64le.patch @@ -0,0 +1,13 @@ +--- crash-trace-command-2.0/Makefile.orig ++++ crash-trace-command-2.0/Makefile +@@ -6,6 +6,10 @@ ifeq ($(shell arch), ppc64) + TARGET=PPC64 + TARGET_CFLAGS=-m64 + endif ++ifeq ($(shell arch), ppc64le) ++ TARGET=PPC64 ++ TARGET_CFLAGS=-m64 ++endif + ifeq ($(shell arch), ia64) + TARGET=IA64 + TARGET_CFLAGS= diff --git a/SOURCES/replace_obsolete_init_and_fini.patch b/SOURCES/replace_obsolete_init_and_fini.patch new file mode 100644 index 0000000..1357429 --- /dev/null +++ b/SOURCES/replace_obsolete_init_and_fini.patch @@ -0,0 +1,31 @@ +--- crash-trace-command-2.0/trace.c.orig ++++ crash-trace-command-2.0/trace.c +@@ -1536,23 +1535,21 @@ static struct command_table_entry comman + + static int ftrace_initialized; + +-int _init(void) ++void __attribute__((constructor)) ++trace_init(void) + { + if (ftrace_init() < 0) +- return 0; ++ return; + + ftrace_initialized = 1; + register_extension(command_table); +- +- return 1; + } + +-int _fini(void) ++void __attribute__((destructor)) ++trace_fini(void) + { + if (ftrace_initialized) + ftrace_destroy(); +- +- return 1; + } + + #define TRACE_CMD_FILE_VERSION_STRING "6" diff --git a/SOURCES/rhel8_build.patch b/SOURCES/rhel8_build.patch new file mode 100644 index 0000000..da1d8c3 --- /dev/null +++ b/SOURCES/rhel8_build.patch @@ -0,0 +1,9 @@ +diff -up crash-trace-command-2.0/Makefile.orig crash-trace-command-2.0/Makefile +--- crash-trace-command-2.0/Makefile.orig 2018-09-19 15:46:23.812160803 -0400 ++++ crash-trace-command-2.0/Makefile 2018-09-19 15:47:12.489890130 -0400 +@@ -36,4 +36,4 @@ INCDIR=/usr/include/crash + all: trace.so + + trace.so: $(INCDIR)/defs.h trace.c +- gcc $(RPM_OPT_FLAGS) -Wall -I$(INCDIR) -nostartfiles -shared -rdynamic -o trace.so trace.c -fPIC -D$(TARGET) $(TARGET_CFLAGS) ++ gcc $(RPM_OPT_FLAGS) -Wall -I$(INCDIR) -nostartfiles -shared -rdynamic -o trace.so trace.c -fPIC -D$(TARGET) $(TARGET_CFLAGS) -Wl,-z,now diff --git a/SOURCES/sigsegv_on_calloc_failure.patch b/SOURCES/sigsegv_on_calloc_failure.patch new file mode 100644 index 0000000..580adac --- /dev/null +++ b/SOURCES/sigsegv_on_calloc_failure.patch @@ -0,0 +1,10 @@ +--- crash-trace-command-2.0/trace.c.orig ++++ crash-trace-command-2.0/trace.c +@@ -251,7 +251,6 @@ static int ftrace_init_pages(struct ring + + cpu_buffer->linear_pages = calloc(sizeof(ulong), nr_pages + 1); + if (cpu_buffer->linear_pages == NULL) { +- free(cpu_buffer->pages); + return -1; + } + diff --git a/SOURCES/trace_compiler_warnings.patch b/SOURCES/trace_compiler_warnings.patch new file mode 100644 index 0000000..904f4d0 --- /dev/null +++ b/SOURCES/trace_compiler_warnings.patch @@ -0,0 +1,46 @@ +--- crash-trace-command-2.0/trace.c.orig ++++ crash-trace-command-2.0/trace.c +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include + + static int verbose = 0; + +@@ -892,7 +893,7 @@ out_fail: + + static int ftrace_init_event_type(ulong call, struct event_type *aevent_type) + { +- ulong fields_head; ++ ulong fields_head = 0; + + if (ftrace_get_event_type_fields(call, &fields_head) < 0) + return -1; +@@ -1443,6 +1444,8 @@ static void ftrace_show(int argc, char * + int fd; + FILE *file; + size_t ret; ++ size_t nitems __attribute__ ((__unused__)); ++ char *unused __attribute__ ((__unused__)); + + /* check trace-cmd */ + if (env_trace_cmd) +@@ -1465,7 +1468,7 @@ static void ftrace_show(int argc, char * + } + + /* dump trace.dat to the temp file */ +- mktemp(tmp); ++ unused = mktemp(tmp); + fd = open(tmp, O_WRONLY | O_CREAT | O_TRUNC, 0644); + if (trace_cmd_data_output(fd) < 0) + goto out; +@@ -1478,7 +1481,7 @@ static void ftrace_show(int argc, char * + ret = fread(buf, 1, sizeof(buf), file); + if (ret == 0) + break; +- fwrite(buf, 1, ret, fp); ++ nitems = fwrite(buf, 1, ret, fp); + } + pclose(file); + out: diff --git a/SPECS/crash-trace-command.spec b/SPECS/crash-trace-command.spec new file mode 100644 index 0000000..2ab3f85 --- /dev/null +++ b/SPECS/crash-trace-command.spec @@ -0,0 +1,163 @@ +# +# crash core analysis suite +# +Summary: Trace extension module for the crash utility +Name: crash-trace-command +Version: 2.0 +Release: 15%{?dist} +License: GPLv2 +Group: Development/Debuggers +Source: %{name}-%{version}.tar.gz +URL: http://people.redhat.com/anderson/extensions/%{name}-%{version}.tar.gz +# Vendor: Fujitsu Limited +# Packager: Qiao Nuohan +ExclusiveOS: Linux +ExclusiveArch: x86_64 %{ix86} ppc64 ia64 s390 s390x aarch64 %{arm} ppc64le %{mips} +Buildroot: %{_tmppath}/%{name}-root +BuildRequires: zlib-devel lzo-devel snappy-devel +BuildRequires: crash-devel >= 7.2.0-2 +Requires: trace-cmd +Requires: crash >= 7.2.0-2 +Patch0: trace_compiler_warnings.patch +Patch1: replace_obsolete_init_and_fini.patch +Patch2: sigsegv_on_calloc_failure.patch +Patch3: initialize_trace_dat.patch +Patch4: ARM64.patch +Patch5: linux_3.10_support.patch +Patch6: ppc64le.patch +Patch7: linux_4.2_support.patch +Patch8: TRACE_EVENT_FL_TRACEPOINT_flag.patch +Patch9: big_endian_nr_pages.patch +Patch10: ppc64_ring_buffer_read.patch +Patch11: rhel8_build.patch + +Patch1001: ARM32.patch +Patch1002: MIPS.patch + +%description +Command for reading ftrace data from a dumpfile. + +%prep +%setup -q -n %{name}-%{version} +%patch0 -p1 -b trace_compiler_warnings.patch +%patch1 -p1 -b replace_obsolete_init_and_fini.patch +%patch2 -p1 -b sigsegv_on_calloc_failure.patch +%patch3 -p1 -b initialize_trace_dat.patch +%patch4 -p1 -b ARM64.patch +%patch5 -p1 -b linux_3.10_support.patch +%patch6 -p1 -b ppc64le.patch +%patch7 -p1 -b linux_4.2_support.patch +%patch8 -p1 -b TRACE_EVENT_FL_TRACEPOINT_flag.patch +%patch9 -p1 -b big_endian_nr_pages.patch +%patch10 -p1 -b ppc64_ring_buffer_read.patch +%patch11 -p1 -b rhel8_build.patch + +%patch1001 -p1 -b ARM32.patch +%patch1002 -p1 -b MIPS.patch + +%build +make + +%install +mkdir -p %{buildroot}%{_libdir}/crash/extensions/ +cp %{_builddir}/%{name}-%{version}/trace.so %{buildroot}%{_libdir}/crash/extensions/ + +%clean +rm -rf %{buildroot} + +%files +%defattr(-,root,root) +%{_libdir}/crash/extensions/trace.so +%doc COPYING + +%changelog +* Mon Feb 10 2020 Nan xiongchao - 2.0-15 +- add mips64 support + +* Mon May 20 2019 Pablo Greco - 2.0-15 +- Fix for armhfp + +* Wed Sep 19 2018 Dave Anderson - 2.0-15 +- annocheck: link with -Wl,-z,now + Resolves: rhbz#1630558 + +* Mon Aug 13 2018 Dave Anderson - 2.0-14 +- Bump release for mass rebuild + Resolves: rhbz#1615511 + +* Wed Dec 6 2017 Dave Anderson - 2.0.13 +- Build requires crash-devel-7.2.0-2 and usage requires crash-7.2.0-2 + because of load_module structure change. + Resolves: rhbz#1520825 + +* Sun Apr 16 2017 Dave Anderson - 2.0.12 +- Differentiate ppc64 .ring_buffer_read text symbol from ring_buffer_read data symbol +- Fix for ring_buffer_per_cpu.nr_pages size change on big-endian systems +- Fix for Linux 4.7 change to the TRACE_EVENT_FL_TRACEPOINT flag + Resolves: rhbz#1441914 + Resolves: rhbz#1440726 + +* Thu Feb 25 2016 Dave Anderson - 2.0-10 +- Fix for ftrace symbol name changes in Linux 4.2 + Resolves: rhbz#1265553 + +* Tue Sep 02 2014 Dave Anderson - 2.0-9 +- Add ppc64le support. + Resolves: rhbz#1123995 + +* Fri Jan 24 2014 Daniel Mach - 2.0-8 +- Mass rebuild 2014-01-24 + +* Fri Dec 27 2013 Daniel Mach - 2.0-7 +- Mass rebuild 2013-12-27 + +* Thu Dec 5 2013 Dave Anderson - 2.0-6 +- Add Linux 3.10 support. + Resolves: rhbz#863833 + +* Tue Nov 12 2013 Dave Anderson - 2.0-5 +- Add ARM64 support. + Resolves: rhbz#1028580 + +* Tue Aug 20 2013 Dave Anderson - 2.0-4 +- crash utility has added LZO and snappy compression in addition to zlib + +* Wed May 29 2013 Dave Anderson - 2.0-3 +- Replace obsolete _init() and _fini() functions. +- Fix possible segmentation violation on calloc() failure. +- Initialize trace_dat to avoid compiler warning. + +* Mon Nov 26 2012 Dave Anderson - 2.0-2 +- trace-cmd package required +- rpmlint cleanups to this file +- fix compiler warnings for trace.c + +* Wed Nov 21 2012 Qiao Nuohan - 2.0-1 +- update code + Resolves: rhbz#863833 + +* Wed Feb 8 2012 Dave Anderson - 1.0-4 +- Build with RPM_OPT_FLAGS. + Resolves: rhbz#729018 + +* Wed Jun 9 2010 Dave Anderson - 1.0-3 +- Remove trace_dump.patch, which requires a kernel later than + the RHEL6 base of 2.6.32. + Resolves: rbhz#601536 + +* Mon May 24 2010 Dave Anderson - 1.0-2 +- Fix for segmentation violation with "trace show -c cpu" command, + and add "trace dump -t" command. + Resolves: rbhz#592887 + +* Wed Dec 09 2009 Dave Anderson - 1.0-1.2 +- fix Makefile to account for s390 build +- change exclusive arch entry from i386 to {ix86} +- Resolves: rbhz#545564 + +* Tue Dec 08 2009 Dennis Gregorovic - 1.0-1.1 +- Rebuilt for RHEL 6 + +* Fri Sep 25 2009 Dave Anderson +- Initial crash-trace-command package +