696c6f
--- crash-trace-command-2.0/trace.c.orig
696c6f
+++ crash-trace-command-2.0/trace.c
696c6f
@@ -38,6 +38,10 @@ static int trace_buffer_available;
696c6f
  * max_buffer is supported
696c6f
  */
696c6f
 static int max_buffer_available;
696c6f
+/*
696c6f
+ * multiple trace instances are supported
696c6f
+ */
696c6f
+static int multiple_instances_available;
696c6f
 
696c6f
 #define koffset(struct, member) struct##_##member##_offset
696c6f
 
696c6f
@@ -78,6 +82,8 @@ static int koffset(ftrace_event_field, o
696c6f
 static int koffset(ftrace_event_field, size);
696c6f
 static int koffset(ftrace_event_field, is_signed);
696c6f
 
696c6f
+static int koffset(trace_array, name);
696c6f
+
696c6f
 static int koffset(POINTER_SYM, POINTER) = 0;
696c6f
 
696c6f
 struct ring_buffer_per_cpu {
696c6f
@@ -101,16 +107,25 @@ struct ring_buffer_per_cpu {
696c6f
 };
696c6f
 
696c6f
 static ulong global_trace;
696c6f
-static ulong global_trace_buffer;
696c6f
-static ulong global_max_buffer;
696c6f
-static ulong global_ring_buffer;
696c6f
-static unsigned global_pages;
696c6f
-static struct ring_buffer_per_cpu *global_buffers;
696c6f
-
696c6f
 static ulong max_tr_trace;
696c6f
-static ulong max_tr_ring_buffer;
696c6f
-static unsigned max_tr_pages;
696c6f
-static struct ring_buffer_per_cpu *max_tr_buffers;
696c6f
+
696c6f
+struct trace_instance {
696c6f
+	char name[NAME_MAX + 1];
696c6f
+	ulong trace_buffer;
696c6f
+	ulong max_buffer;
696c6f
+	ulong ring_buffer;
696c6f
+	unsigned pages;
696c6f
+	struct ring_buffer_per_cpu *buffers;
696c6f
+
696c6f
+	ulong max_tr_ring_buffer;
696c6f
+	unsigned max_tr_pages;
696c6f
+	struct ring_buffer_per_cpu *max_tr_buffers;
696c6f
+};
696c6f
+
696c6f
+static ulong ftrace_trace_arrays;
696c6f
+static struct trace_instance global_trace_instance;
696c6f
+static struct trace_instance *trace_instances = NULL;
696c6f
+static int instance_count;
696c6f
 
696c6f
 static ulong ftrace_events;
696c6f
 static ulong current_trace;
696c6f
@@ -229,6 +244,9 @@ static int init_offsets(void)
696c6f
 	init_offset(ftrace_event_field, size);
696c6f
 	init_offset(ftrace_event_field, is_signed);
696c6f
 
696c6f
+	if (MEMBER_EXISTS("trace_array", "name"))
696c6f
+		init_offset(trace_array, name);
696c6f
+
696c6f
 	return 0;
696c6f
 #undef init_offset
696c6f
 }
696c6f
@@ -435,61 +453,140 @@ out_fail:
696c6f
 	return -1;
696c6f
 }
696c6f
 
696c6f
-static int ftrace_int_global_trace(void)
696c6f
+static int ftrace_init_trace(struct trace_instance *ti, ulong instance_addr)
696c6f
 {
696c6f
 	if (trace_buffer_available) {
696c6f
-		global_trace_buffer = global_trace + koffset(trace_array, trace_buffer);
696c6f
-		read_value(global_ring_buffer, global_trace_buffer, trace_buffer, buffer);
696c6f
+		ti->trace_buffer = instance_addr +
696c6f
+				koffset(trace_array, trace_buffer);
696c6f
+		read_value(ti->ring_buffer, ti->trace_buffer,
696c6f
+				trace_buffer, buffer);
696c6f
+
696c6f
+		if (max_buffer_available) {
696c6f
+			ti->max_buffer = instance_addr +
696c6f
+					koffset(trace_array, max_buffer);
696c6f
+			read_value(ti->max_tr_ring_buffer, ti->max_buffer,
696c6f
+					trace_buffer, buffer);
696c6f
+		}
696c6f
 	} else {
696c6f
-		read_value(global_ring_buffer, global_trace, trace_array, buffer);
696c6f
-		read_value(global_pages, global_ring_buffer, ring_buffer, pages);
696c6f
+		read_value(ti->ring_buffer, instance_addr, trace_array, buffer);
696c6f
+		read_value(ti->pages, ti->ring_buffer, ring_buffer, pages);
696c6f
+
696c6f
+		read_value(ti->max_tr_ring_buffer, max_tr_trace, trace_array, buffer);
696c6f
+		if (ti->max_tr_ring_buffer)
696c6f
+			read_value(ti->max_tr_pages, ti->max_tr_ring_buffer, ring_buffer, pages);
696c6f
 	}
696c6f
 
696c6f
-	global_buffers = calloc(sizeof(*global_buffers), nr_cpu_ids);
696c6f
-	if (global_buffers == NULL)
696c6f
+	ti->buffers = calloc(sizeof(*ti->buffers), nr_cpu_ids);
696c6f
+	if (ti->buffers == NULL)
696c6f
+		goto out_fail;
696c6f
+
696c6f
+	if (ftrace_init_buffers(ti->buffers, ti->ring_buffer,
696c6f
+			ti->pages) < 0)
696c6f
+		goto out_fail;
696c6f
+
696c6f
+	if (!ti->max_tr_ring_buffer)
696c6f
+		return 0;
696c6f
+
696c6f
+	ti->max_tr_buffers = calloc(sizeof(*ti->max_tr_buffers), nr_cpu_ids);
696c6f
+	if (ti->max_tr_buffers == NULL)
696c6f
 		goto out_fail;
696c6f
 
696c6f
-	if (ftrace_init_buffers(global_buffers, global_ring_buffer,
696c6f
-			global_pages) < 0)
696c6f
+	if (ftrace_init_buffers(ti->max_tr_buffers, ti->max_tr_ring_buffer,
696c6f
+			ti->max_tr_pages) < 0)
696c6f
 		goto out_fail;
696c6f
 
696c6f
 	return 0;
696c6f
 
696c6f
 out_fail:
696c6f
-	free(global_buffers);
696c6f
+	free(ti->max_tr_buffers);
696c6f
+	free(ti->buffers);
696c6f
 	return -1;
696c6f
 }
696c6f
 
696c6f
-static int ftrace_int_max_tr_trace(void)
696c6f
+static void ftrace_destroy_all_instance_buffers()
696c6f
 {
696c6f
-	if (trace_buffer_available) {
696c6f
-		if (!max_buffer_available)
696c6f
-			return 0;
696c6f
+	int i;
696c6f
 
696c6f
-		global_max_buffer = global_trace + koffset(trace_array, max_buffer);
696c6f
-		read_value(max_tr_ring_buffer, global_max_buffer, trace_buffer, buffer);
696c6f
-	} else {
696c6f
-		read_value(max_tr_ring_buffer, max_tr_trace, trace_array, buffer);
696c6f
+	for (i = 0; i < instance_count; i++)
696c6f
+	{
696c6f
+		struct trace_instance *ti = &trace_instances[i];
696c6f
 
696c6f
-		if (!max_tr_ring_buffer)
696c6f
-			return 0;
696c6f
+		if (ti->max_tr_ring_buffer) {
696c6f
+			ftrace_destroy_buffers(ti->max_tr_buffers);
696c6f
+			free(ti->max_tr_buffers);
696c6f
+		}
696c6f
 
696c6f
-		read_value(max_tr_pages, max_tr_ring_buffer, ring_buffer, pages);
696c6f
+		ftrace_destroy_buffers(ti->buffers);
696c6f
+		free(ti->buffers);
696c6f
 	}
696c6f
+}
696c6f
 
696c6f
-	max_tr_buffers = calloc(sizeof(*max_tr_buffers), nr_cpu_ids);
696c6f
-	if (max_tr_buffers == NULL)
696c6f
-		goto out_fail;
696c6f
+static void ftrace_destroy_instances()
696c6f
+{
696c6f
+	ftrace_destroy_all_instance_buffers();
696c6f
+	free(trace_instances);
696c6f
+}
696c6f
 
696c6f
-	if (ftrace_init_buffers(max_tr_buffers, max_tr_ring_buffer,
696c6f
-			max_tr_pages) < 0)
696c6f
-		goto out_fail;
696c6f
+static int ftrace_init_instances()
696c6f
+{
696c6f
+	int i;
696c6f
+	struct trace_instance *ti;
696c6f
+	struct list_data list_data;
696c6f
+	struct list_data *ld = &list_data;
696c6f
+
696c6f
+	if (!multiple_instances_available)
696c6f
+		return 0;
696c6f
+
696c6f
+	BZERO(ld, sizeof(struct list_data));
696c6f
+	ld->start = ftrace_trace_arrays;
696c6f
+	ld->end = global_trace;
696c6f
+	ld->flags = LIST_ALLOCATE;
696c6f
+	instance_count = do_list(ld);
696c6f
+
696c6f
+	/* The do_list count includes the list_head, which is not a
696c6f
+	 * proper instance */
696c6f
+	instance_count--;
696c6f
+	if (instance_count <= 0)
696c6f
+		return 0;
696c6f
+
696c6f
+	trace_instances = calloc(sizeof(struct trace_instance), instance_count);
696c6f
+
696c6f
+	/* We start i at 1 to skip over the list_head and continue to the last
696c6f
+	 * instance, which lies at index instance_count */
696c6f
+	for (i = 1; i <= instance_count; i++)
696c6f
+	{
696c6f
+		ulong instance_ptr;
696c6f
+		ulong name_addr;
696c6f
+		int ret;
696c6f
+
696c6f
+		ti = &trace_instances[i-1];
696c6f
+		instance_ptr = ld->list_ptr[i];
696c6f
+		read_value(name_addr, instance_ptr, trace_array, name);
696c6f
+		if (!name_addr)
696c6f
+		{
696c6f
+			console("Instance name is NULL\n");
696c6f
+		}
696c6f
+		else if (!read_string(name_addr, ti->name, sizeof(ti->name)))
696c6f
+		{
696c6f
+			console("Failed to read instance name at address %p\n", (void*)name_addr);
696c6f
+			goto out_fail;
696c6f
+		}
696c6f
+
696c6f
+		ret = ftrace_init_trace(ti, instance_ptr);
696c6f
+		if (ret < 0)
696c6f
+			goto out_fail;
696c6f
+	}
696c6f
+	FREEBUF(ld->list_ptr);
696c6f
 
696c6f
 	return 0;
696c6f
 
696c6f
 out_fail:
696c6f
-	free(max_tr_buffers);
696c6f
-	max_tr_ring_buffer = 0;
696c6f
+	/* We've already freed the current instance's trace buffer info, so
696c6f
+	 * we'll clear that out to avoid double freeing in
696c6f
+	 * ftrace_destroy_instances() */
696c6f
+	BZERO(ti, sizeof(struct trace_instance));
696c6f
+	ftrace_destroy_instances();
696c6f
+
696c6f
 	return -1;
696c6f
 }
696c6f
 
696c6f
@@ -504,7 +601,7 @@ static int ftrace_init_current_tracer(vo
696c6f
 	} else {
696c6f
 		read_value(addr, current_trace, POINTER_SYM, POINTER);
696c6f
 	}
696c6f
-	
696c6f
+
696c6f
 	read_value(addr, addr, tracer, name);
696c6f
 	read_string(addr, tmp, 128);
696c6f
 
696c6f
@@ -524,9 +621,11 @@ static int ftrace_init(void)
696c6f
 	struct syment *sym_max_tr_trace;
696c6f
 	struct syment *sym_ftrace_events;
696c6f
 	struct syment *sym_current_trace;
696c6f
+	struct syment *sym_ftrace_trace_arrays;
696c6f
 
696c6f
 	sym_global_trace = symbol_search("global_trace");
696c6f
 	sym_ftrace_events = symbol_search("ftrace_events");
696c6f
+	sym_ftrace_trace_arrays = symbol_search("ftrace_trace_arrays");
696c6f
 
696c6f
 	if (sym_global_trace == NULL || sym_ftrace_events == NULL)
696c6f
 		return -1;
696c6f
@@ -534,6 +633,13 @@ static int ftrace_init(void)
696c6f
 	global_trace = sym_global_trace->value;
696c6f
 	ftrace_events = sym_ftrace_events->value;
696c6f
 
696c6f
+	if (sym_ftrace_trace_arrays)
696c6f
+	{
696c6f
+		multiple_instances_available = 1;
696c6f
+		ftrace_trace_arrays = sym_ftrace_trace_arrays->value;
696c6f
+	}
696c6f
+
696c6f
+
696c6f
 	if (MEMBER_EXISTS("trace_array", "current_trace")) {
696c6f
 		encapsulated_current_trace = 1;
696c6f
 	} else {
696c6f
@@ -564,28 +670,31 @@ static int ftrace_init(void)
696c6f
 		return -1;
696c6f
 	print_offsets();
696c6f
 
696c6f
-	if (ftrace_int_global_trace() < 0)
696c6f
+	if (ftrace_init_trace(&global_trace_instance, global_trace) < 0)
696c6f
 		goto out_0;
696c6f
 
696c6f
-	ftrace_int_max_tr_trace();
696c6f
+	if (ftrace_init_instances() < 0)
696c6f
+		goto out_1;
696c6f
 
696c6f
 	if (ftrace_init_event_types() < 0)
696c6f
-		goto out_1;
696c6f
+		goto out_2;
696c6f
 
696c6f
 	if (ftrace_init_current_tracer() < 0)
696c6f
-		goto out_2;
696c6f
+		goto out_3;
696c6f
 
696c6f
 	return 0;
696c6f
 
696c6f
-out_2:
696c6f
+out_3:
696c6f
 	ftrace_destroy_event_types();
696c6f
+out_2:
696c6f
+	ftrace_destroy_instances();
696c6f
 out_1:
696c6f
-	if (max_tr_ring_buffer) {
696c6f
-		ftrace_destroy_buffers(max_tr_buffers);
696c6f
-		free(max_tr_buffers);
696c6f
+	if (global_trace_instance.max_tr_ring_buffer) {
696c6f
+		ftrace_destroy_buffers(global_trace_instance.max_tr_buffers);
696c6f
+		free(global_trace_instance.max_tr_buffers);
696c6f
 	}
696c6f
-	ftrace_destroy_buffers(global_buffers);
696c6f
-	free(global_buffers);
696c6f
+	ftrace_destroy_buffers(global_trace_instance.buffers);
696c6f
+	free(global_trace_instance.buffers);
696c6f
 out_0:
696c6f
 	return -1;
696c6f
 }
696c6f
@@ -595,13 +704,15 @@ static void ftrace_destroy(void)
696c6f
 	free(current_tracer_name);
696c6f
 	ftrace_destroy_event_types();
696c6f
 
696c6f
-	if (max_tr_ring_buffer) {
696c6f
-		ftrace_destroy_buffers(max_tr_buffers);
696c6f
-		free(max_tr_buffers);
696c6f
+	ftrace_destroy_instances();
696c6f
+
696c6f
+	if (global_trace_instance.max_tr_ring_buffer) {
696c6f
+		ftrace_destroy_buffers(global_trace_instance.max_tr_buffers);
696c6f
+		free(global_trace_instance.max_tr_buffers);
696c6f
 	}
696c6f
 
696c6f
-	ftrace_destroy_buffers(global_buffers);
696c6f
-	free(global_buffers);
696c6f
+	ftrace_destroy_buffers(global_trace_instance.buffers);
696c6f
+	free(global_trace_instance.buffers);
696c6f
 }
696c6f
 
696c6f
 static int ftrace_dump_page(int fd, ulong page, void *page_tmp)
696c6f
@@ -652,7 +763,8 @@ static int try_mkdir(const char *pathnam
696c6f
 	return 0;
696c6f
 }
696c6f
 
696c6f
-static int ftrace_dump_buffers(const char *per_cpu_path)
696c6f
+static int ftrace_dump_buffers(const char *per_cpu_path,
696c6f
+                                struct trace_instance *ti)
696c6f
 {
696c6f
 	int i;
696c6f
 	void *page_tmp;
696c6f
@@ -664,7 +776,7 @@ static int ftrace_dump_buffers(const cha
696c6f
 		return -1;
696c6f
 
696c6f
 	for (i = 0; i < nr_cpu_ids; i++) {
696c6f
-		struct ring_buffer_per_cpu *cpu_buffer = &global_buffers[i];
696c6f
+		struct ring_buffer_per_cpu *cpu_buffer = &ti->buffers[i];
696c6f
 
696c6f
 		if (!cpu_buffer->kaddr)
696c6f
 			continue;
696c6f
@@ -679,7 +791,7 @@ static int ftrace_dump_buffers(const cha
696c6f
 		if (fd < 0)
696c6f
 			goto out_fail;
696c6f
 
696c6f
-		ftrace_dump_buffer(fd, cpu_buffer, global_pages, page_tmp);
696c6f
+		ftrace_dump_buffer(fd, cpu_buffer, ti->pages, page_tmp);
696c6f
 		close(fd);
696c6f
 	}
696c6f
 
696c6f
@@ -1015,8 +1127,6 @@ static void ftrace_destroy_event_types(v
696c6f
 	free(ftrace_common_fields);
696c6f
 }
696c6f
 
696c6f
-#define TRACE_EVENT_FL_TRACEPOINT 0x40
696c6f
-
696c6f
 static
696c6f
 int ftrace_get_event_type_name(ulong call, char *name, int len)
696c6f
 {
696c6f
@@ -1024,34 +1134,35 @@ int ftrace_get_event_type_name(ulong cal
696c6f
 	static int name_offset;
696c6f
 	static int flags_offset;
696c6f
 	static int tp_name_offset;
696c6f
-	uint flags;
696c6f
+	static long tracepoint_flag;
696c6f
 
696c6f
+	uint flags;
696c6f
 	ulong name_addr;
696c6f
 
696c6f
 	if (inited)
696c6f
 		goto work;
696c6f
 
696c6f
-	inited = 1;
696c6f
-	name_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "name"),
696c6f
-		MEMBER_OFFSET("trace_event_call", "name"));
696c6f
-	if (name_offset >= 0)
696c6f
-		goto work;
696c6f
-
696c6f
-	name_offset = MAX(ANON_MEMBER_OFFSET("ftrace_event_call", "name"),
696c6f
-		ANON_MEMBER_OFFSET("trace_event_call", "name"));
696c6f
-	if (name_offset < 0)
696c6f
-		return -1;
696c6f
+	name_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "tp"),
696c6f
+		MEMBER_OFFSET("trace_event_call", "tp"));
696c6f
+	if (name_offset >= 0) {
696c6f
+		flags_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "flags"),
696c6f
+			MEMBER_OFFSET("trace_event_call", "flags"));
696c6f
+		if (flags_offset < 0)
696c6f
+			return -1;
696c6f
 
696c6f
-	flags_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "flags"),
696c6f
-		MEMBER_OFFSET("trace_event_call", "flags"));
696c6f
-	if (flags_offset < 0)
696c6f
-		return -1;
696c6f
+		tp_name_offset = MEMBER_OFFSET("tracepoint", "name");
696c6f
+		if (tp_name_offset < 0)
696c6f
+			return -1;
696c6f
 
696c6f
-	tp_name_offset = MEMBER_OFFSET("tracepoint", "name");
696c6f
-	if (tp_name_offset < 0)
696c6f
-		return -1;
696c6f
+		if (!enumerator_value("TRACE_EVENT_FL_TRACEPOINT", &tracepoint_flag))
696c6f
+			return -1;
696c6f
 
696c6f
-	inited = 2;
696c6f
+		inited = 2;
696c6f
+	} else {
696c6f
+		name_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "name"),
696c6f
+			MEMBER_OFFSET("trace_event_call", "name"));
696c6f
+		inited = 1;
696c6f
+	}
696c6f
 
696c6f
 work:
696c6f
 	if (name_offset < 0)
696c6f
@@ -1067,7 +1178,7 @@ work:
696c6f
 			     RETURN_ON_ERROR))
696c6f
 			return -1;
696c6f
 
696c6f
-		if (flags & TRACE_EVENT_FL_TRACEPOINT) {
696c6f
+		if (flags & (uint)tracepoint_flag) {
696c6f
 			if (!readmem(name_addr + tp_name_offset, KVADDR,
696c6f
 				     &name_addr, sizeof(name_addr),
696c6f
 				     "read tracepoint name", RETURN_ON_ERROR))
696c6f
@@ -1476,26 +1587,72 @@ static int dump_kallsyms(const char *dum
696c6f
 
696c6f
 static int trace_cmd_data_output(int fd);
696c6f
 
696c6f
+#define	FTRACE_DUMP_SYMBOLS	(1 << 0)
696c6f
+#define	FTRACE_DUMP_META_DATA	(1 << 1)
696c6f
+
696c6f
+static int populate_ftrace_dir_tree(struct trace_instance *ti,
696c6f
+		char *root, uint flags)
696c6f
+{
696c6f
+	char path[PATH_MAX];
696c6f
+	int ret;
696c6f
+
696c6f
+	ret = mkdir(root, 0755);
696c6f
+	if (ret < 0) {
696c6f
+		if (errno == EEXIST)
696c6f
+			error(INFO, "mkdir: %s exists\n", root);
696c6f
+		return FALSE;
696c6f
+	}
696c6f
+
696c6f
+	snprintf(path, sizeof(path), "%s/per_cpu", root);
696c6f
+	if (try_mkdir(path, 0755) < 0)
696c6f
+		return FALSE;
696c6f
+
696c6f
+	if (ftrace_dump_buffers(path, ti) < 0)
696c6f
+		return FALSE;
696c6f
+
696c6f
+	if (flags & FTRACE_DUMP_META_DATA) {
696c6f
+		/* Dump event types */
696c6f
+		snprintf(path, sizeof(path), "%s/events", root);
696c6f
+		if (try_mkdir(path, 0755) < 0)
696c6f
+			return FALSE;
696c6f
+
696c6f
+		if (ftrace_dump_event_types(path) < 0)
696c6f
+			return FALSE;
696c6f
+
696c6f
+		/* Dump pids with corresponding cmdlines */
696c6f
+		if (dump_saved_cmdlines(root) < 0)
696c6f
+			return FALSE;
696c6f
+	}
696c6f
+
696c6f
+	if (flags & FTRACE_DUMP_SYMBOLS) {
696c6f
+		/* Dump all symbols of the kernel */
696c6f
+		dump_kallsyms(root);
696c6f
+	}
696c6f
+
696c6f
+	return TRUE;
696c6f
+}
696c6f
+
696c6f
 static void ftrace_dump(int argc, char *argv[])
696c6f
 {
696c6f
 	int c;
696c6f
-	int dump_meta_data = 0;
696c6f
-	int dump_symbols = 0;
696c6f
+	int i;
696c6f
+	uint flags = 0;
696c6f
 	char *dump_tracing_dir;
696c6f
-	char path[PATH_MAX];
696c6f
-	int ret;
696c6f
+	char instance_path[PATH_MAX];
696c6f
 
696c6f
         while ((c = getopt(argc, argv, "smt")) != EOF) {
696c6f
                 switch(c)
696c6f
 		{
696c6f
 		case 's':
696c6f
-			dump_symbols = 1;
696c6f
+			flags |= FTRACE_DUMP_SYMBOLS;
696c6f
 			break;
696c6f
 		case 'm':
696c6f
-			dump_meta_data = 1;
696c6f
+			flags |= FTRACE_DUMP_META_DATA;
696c6f
 			break;
696c6f
 		case 't':
696c6f
-			if (dump_symbols || dump_meta_data || argc - optind > 1)
696c6f
+			if (flags & FTRACE_DUMP_SYMBOLS ||
696c6f
+				flags & FTRACE_DUMP_META_DATA ||
696c6f
+				argc - optind > 1)
696c6f
 				cmd_usage(pc->curcmd, SYNOPSIS);
696c6f
 			else {
696c6f
 				char *trace_dat = "trace.dat";
696c6f
@@ -1526,38 +1683,34 @@ static void ftrace_dump(int argc, char *
696c6f
 		return;
696c6f
 	}
696c6f
 
696c6f
-	ret = mkdir(dump_tracing_dir, 0755);
696c6f
-	if (ret < 0) {
696c6f
-		if (errno == EEXIST)
696c6f
-			error(INFO, "mkdir: %s exists\n", dump_tracing_dir);
696c6f
+	if (!populate_ftrace_dir_tree(&global_trace_instance, dump_tracing_dir, flags))
696c6f
 		return;
696c6f
-	}
696c6f
 
696c6f
-	snprintf(path, sizeof(path), "%s/per_cpu", dump_tracing_dir);
696c6f
-	if (try_mkdir(path, 0755) < 0)
696c6f
+	if (!multiple_instances_available || instance_count == 0)
696c6f
 		return;
696c6f
 
696c6f
-	if (ftrace_dump_buffers(path) < 0)
696c6f
+	/* Create an instances directory, and dump instance data in there */
696c6f
+	snprintf(instance_path, sizeof(instance_path),
696c6f
+			"%s/instances", dump_tracing_dir);
696c6f
+	if (try_mkdir(instance_path, 0755) < 0)
696c6f
 		return;
696c6f
 
696c6f
-	if (dump_meta_data) {
696c6f
-		/* Dump event types */
696c6f
-		snprintf(path, sizeof(path), "%s/events", dump_tracing_dir);
696c6f
-		if (try_mkdir(path, 0755) < 0)
696c6f
-			return;
696c6f
+	/* Don't care about the flags anymore */
696c6f
+	flags = 0;
696c6f
 
696c6f
-		if (ftrace_dump_event_types(path) < 0)
696c6f
-			return;
696c6f
+	for (i = 0; i < instance_count; i++)
696c6f
+	{
696c6f
+		struct trace_instance *ti = &trace_instances[i];
696c6f
+
696c6f
+		snprintf(instance_path, sizeof(instance_path),
696c6f
+			"%s/instances/%s", dump_tracing_dir,
696c6f
+			ti->name);
696c6f
 
696c6f
-		/* Dump pids with corresponding cmdlines */
696c6f
-		if (dump_saved_cmdlines(dump_tracing_dir) < 0)
696c6f
-			return;
696c6f
+		if (populate_ftrace_dir_tree(ti, instance_path, flags) < 0)
696c6f
+			break;
696c6f
 	}
696c6f
 
696c6f
-	if (dump_symbols) {
696c6f
-		/* Dump all symbols of the kernel */
696c6f
-		dump_kallsyms(dump_tracing_dir);
696c6f
-	}
696c6f
+	return;
696c6f
 }
696c6f
 
696c6f
 static void ftrace_show(int argc, char *argv[])
696c6f
@@ -2161,26 +2314,69 @@ static int save_ftrace_cmdlines(int fd)
696c6f
 	return tmp_file_flush(fd);
696c6f
 }
696c6f
 
696c6f
-static int save_res_data(int fd, int nr_cpu_buffers)
696c6f
+/* From trace-cmd.h */
696c6f
+enum {
696c6f
+	TRACECMD_OPTION_DONE,         /* 0 */
696c6f
+	TRACECMD_OPTION_DATE,         /* 1 */
696c6f
+	TRACECMD_OPTION_CPUSTAT,      /* 2 */
696c6f
+	TRACECMD_OPTION_BUFFER,       /* 3 */
696c6f
+	TRACECMD_OPTION_TRACECLOCK,   /* 4 */
696c6f
+	TRACECMD_OPTION_UNAME,        /* 5 */
696c6f
+	TRACECMD_OPTION_HOOK,         /* 6 */
696c6f
+};
696c6f
+
696c6f
+static int write_options(int fd, unsigned long long *buffer_offsets)
696c6f
 {
696c6f
-	unsigned short option = 0;
696c6f
+	int i;
696c6f
+	unsigned short option;
696c6f
 
696c6f
-	if (write_and_check(fd, &nr_cpu_buffers, 4))
696c6f
-		return -1;
696c6f
+	if (!multiple_instances_available)
696c6f
+		return 0;
696c6f
 
696c6f
 	if (write_and_check(fd, "options  ", 10))
696c6f
 		return -1;
696c6f
 
696c6f
+	option = TRACECMD_OPTION_BUFFER;
696c6f
+	for (i = 0; i < instance_count; i++)
696c6f
+	{
696c6f
+		char *name = trace_instances[i].name;
696c6f
+		size_t name_size = strlen(name) + 1; /* Name length + '\0' */
696c6f
+		unsigned long long option_size = 8 + name_size;
696c6f
+		unsigned long long offset;
696c6f
+
696c6f
+		offset = buffer_offsets ? buffer_offsets[i] : 0;
696c6f
+		if (write_and_check(fd, &option, 2))
696c6f
+			return -1;
696c6f
+		if (write_and_check(fd, &option_size, 4))
696c6f
+			return -1;
696c6f
+		if (write_and_check(fd, &offset, 8))
696c6f
+			return -1;
696c6f
+		if (write_and_check(fd, name, name_size))
696c6f
+			return -1;
696c6f
+	}
696c6f
+
696c6f
+	option = TRACECMD_OPTION_DONE;
696c6f
 	if (write_and_check(fd, &option, 2))
696c6f
 		return -1;
696c6f
 
696c6f
+	return 0;
696c6f
+}
696c6f
+
696c6f
+static int save_res_data(int fd, int nr_cpu_buffers, unsigned long long *buffer_offsets)
696c6f
+{
696c6f
+	if (write_and_check(fd, &nr_cpu_buffers, 4))
696c6f
+		return -1;
696c6f
+
696c6f
+	if (write_options(fd, buffer_offsets))
696c6f
+		return -1;
696c6f
+
696c6f
 	if (write_and_check(fd, "flyrecord", 10))
696c6f
 		return -1;
696c6f
 
696c6f
 	return 0;
696c6f
 }
696c6f
 
696c6f
-static int save_record_data(int fd, int nr_cpu_buffers)
696c6f
+static int save_record_data(int fd, int nr_cpu_buffers, struct trace_instance *ti)
696c6f
 {
696c6f
 	int i, j;
696c6f
 	unsigned long long offset, buffer_offset;
696c6f
@@ -2192,7 +2388,7 @@ static int save_record_data(int fd, int
696c6f
 	buffer_offset = offset;
696c6f
 
696c6f
 	for (i = 0; i < nr_cpu_ids; i++) {
696c6f
-		struct ring_buffer_per_cpu *cpu_buffer = &global_buffers[i];
696c6f
+		struct ring_buffer_per_cpu *cpu_buffer = &ti->buffers[i];
696c6f
 		unsigned long long buffer_size;
696c6f
 
696c6f
 		if (!cpu_buffer->kaddr)
696c6f
@@ -2212,7 +2408,7 @@ static int save_record_data(int fd, int
696c6f
 
696c6f
 	lseek(fd, offset, SEEK_SET);
696c6f
 	for (i = 0; i < nr_cpu_ids; i++) {
696c6f
-		struct ring_buffer_per_cpu *cpu_buffer = &global_buffers[i];
696c6f
+		struct ring_buffer_per_cpu *cpu_buffer = &ti->buffers[i];
696c6f
 
696c6f
 		if (!cpu_buffer->kaddr)
696c6f
 			continue;
696c6f
@@ -2231,13 +2427,13 @@ static int save_record_data(int fd, int
696c6f
 	return 0;
696c6f
 }
696c6f
 
696c6f
-static int __trace_cmd_data_output(int fd)
696c6f
+static int get_nr_cpu_buffers(struct trace_instance *ti)
696c6f
 {
696c6f
 	int i;
696c6f
 	int nr_cpu_buffers = 0;
696c6f
 
696c6f
 	for (i = 0; i < nr_cpu_ids; i++) {
696c6f
-		struct ring_buffer_per_cpu *cpu_buffer = &global_buffers[i];
696c6f
+		struct ring_buffer_per_cpu *cpu_buffer = &ti->buffers[i];
696c6f
 
696c6f
 		if (!cpu_buffer->kaddr)
696c6f
 			continue;
696c6f
@@ -2245,6 +2441,19 @@ static int __trace_cmd_data_output(int f
696c6f
 		nr_cpu_buffers++;
696c6f
 	}
696c6f
 
696c6f
+	return nr_cpu_buffers;
696c6f
+}
696c6f
+
696c6f
+static int __trace_cmd_data_output(int fd)
696c6f
+{
696c6f
+	int nr_cpu_buffers;
696c6f
+	unsigned long long global_res_data_offset;
696c6f
+	unsigned long long *instance_offsets;
696c6f
+
696c6f
+	instance_offsets = calloc(sizeof(unsigned long long), instance_count);
696c6f
+
696c6f
+	nr_cpu_buffers = get_nr_cpu_buffers(&global_trace_instance);
696c6f
+
696c6f
 	if (save_initial_data(fd))
696c6f
 		return -1;
696c6f
 	if (save_header_files(fd))
696c6f
@@ -2257,9 +2466,38 @@ static int __trace_cmd_data_output(int f
696c6f
 		return -1;
696c6f
 	if (save_ftrace_cmdlines(fd))
696c6f
 		return -1;
696c6f
-	if (save_res_data(fd, nr_cpu_buffers))
696c6f
+
696c6f
+	/* We don't have the instance buffer offsets yet, so we'll write in 0s
696c6f
+	 * for now, and fix it up after we have that information available */
696c6f
+	global_res_data_offset = lseek(fd, 0, SEEK_CUR);
696c6f
+	if (save_res_data(fd, nr_cpu_buffers, NULL))
696c6f
 		return -1;
696c6f
-	if (save_record_data(fd, nr_cpu_buffers))
696c6f
+	if (save_record_data(fd, nr_cpu_buffers, &global_trace_instance))
696c6f
+		return -1;
696c6f
+
696c6f
+	if (multiple_instances_available)
696c6f
+	{
696c6f
+		int i;
696c6f
+
696c6f
+		for (i = 0; i < instance_count; i++)
696c6f
+		{
696c6f
+			struct trace_instance *ti = &trace_instances[i];
696c6f
+			nr_cpu_buffers = get_nr_cpu_buffers(ti);
696c6f
+
696c6f
+			/* Save off the instance offset for fixup later */
696c6f
+			instance_offsets[i] = lseek(fd, 0, SEEK_CUR);
696c6f
+
696c6f
+			if (write_and_check(fd, "flyrecord", 10))
696c6f
+				return -1;
696c6f
+			if (save_record_data(fd, nr_cpu_buffers, ti))
696c6f
+				return -1;
696c6f
+		}
696c6f
+	}
696c6f
+
696c6f
+	/* Fix up the global trace's options header with the instance offsets */
696c6f
+	lseek(fd, global_res_data_offset, SEEK_SET);
696c6f
+	nr_cpu_buffers = get_nr_cpu_buffers(&global_trace_instance);
696c6f
+	if (save_res_data(fd, nr_cpu_buffers, instance_offsets))
696c6f
 		return -1;
696c6f
 
696c6f
 	return 0;